Skip to content

Commit

Permalink
Merge pull request #36 from Aleph-Alpha/add-model
Browse files Browse the repository at this point in the history
Add model
  • Loading branch information
volkerstampa authored Jun 27, 2022
2 parents 451b7ff + ebf6f2e commit f756975
Show file tree
Hide file tree
Showing 15 changed files with 415 additions and 436 deletions.
125 changes: 55 additions & 70 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,15 @@ pip install aleph-alpha-client


```python
from aleph_alpha_client import ImagePrompt, AlephAlphaClient, CompletionRequest
from aleph_alpha_client import ImagePrompt, AlephAlphaModel, AlephAlphaClient, CompletionRequest
import os

client = AlephAlphaClient(
host="https://api.aleph-alpha.com",
token=os.getenv("AA_TOKEN")
model = AlephAlphaModel(
AlephAlphaClient(host="https://api.aleph-alpha.com", token=os.getenv("AA_TOKEN")),
model_name = "luminous-extended"
)

# You need to choose a model with multimodal capabilities for this example.
model = "luminous-base"
url = "https://cdn-images-1.medium.com/max/1200/1*HunNdlTmoPj8EKpl-jqvBA.png"

image = ImagePrompt.from_url(url)
Expand All @@ -39,7 +38,7 @@ prompt = [
"Q: What does the picture show? A:",
]
request = CompletionRequest(prompt=prompt, maximum_tokens=20)
result = client.complete(model, request=request)
result = model.complete(request)

print(result.completions[0]["completion"])
```
Expand All @@ -49,17 +48,16 @@ print(result.completions[0]["completion"])


```python
from aleph_alpha_client import ImagePrompt, AlephAlphaClient, EvaluationRequest
from aleph_alpha_client import AlephAlphaClient, AlephAlphaModel, EvaluationRequest
import os

client = AlephAlphaClient(
host="https://api.aleph-alpha.com",
token=os.getenv("AA_TOKEN")
model = AlephAlphaModel(
AlephAlphaClient(host="https://api.aleph-alpha.com", token=os.getenv("AA_TOKEN")),
model_name = "luminous-extended"
)

model = "luminous-base"
request = EvaluationRequest(prompt="The api works", completion_expected=" well")
result = client.evaluate(model, request=request)
result = model.evaluate(request)

print(result)

Expand All @@ -71,25 +69,23 @@ print(result)


```python
from aleph_alpha_client import ImagePrompt, AlephAlphaClient, EvaluationRequest
from aleph_alpha_client import ImagePrompt, AlephAlphaClient, AlephAlphaModel, EvaluationRequest
import os

client = AlephAlphaClient(
host="https://api.aleph-alpha.com",
token=os.getenv("AA_TOKEN")
model = AlephAlphaModel(
AlephAlphaClient(host="https://api.aleph-alpha.com", token=os.getenv("AA_TOKEN")),
# You need to choose a model with multimodal capabilities for this example.
model_name = "luminous-extended"
)

# You need to choose a model with multimodal capabilities for this example.
model = "luminous-base"

url = "https://upload.wikimedia.org/wikipedia/commons/thumb/7/74/2008-09-24_Blockbuster_in_Durham.jpg/330px-2008-09-24_Blockbuster_in_Durham.jpg"
image = ImagePrompt.from_url(url)
prompt = [
image,
"Q: What is the name of the store?\nA:",
]
request = EvaluationRequest(prompt=prompt, completion_expected=" Blockbuster Video")
result = client.evaluate(model, request=request)
result = model.evaluate(request)

print(result)
```
Expand All @@ -100,17 +96,16 @@ print(result)


```python
from aleph_alpha_client import ImagePrompt, AlephAlphaClient, EmbeddingRequest
from aleph_alpha_client import AlephAlphaModel, AlephAlphaClient, EmbeddingRequest
import os

client = AlephAlphaClient(
host="https://api.aleph-alpha.com",
token=os.getenv("AA_TOKEN")
model = AlephAlphaModel(
AlephAlphaClient(host="https://api.aleph-alpha.com", token=os.getenv("AA_TOKEN")),
model_name = "luminous-extended"
)

model = "luminous-base"
request = EmbeddingRequest(prompt=["This is an example."], layers=[-1], pooling=["mean"])
result = client.embed(model, request=request)
result = model.embed(request)

print(result)
```
Expand All @@ -121,25 +116,23 @@ print(result)


```python
from aleph_alpha_client import ImagePrompt, AlephAlphaClient, EmbeddingRequest
from aleph_alpha_client import ImagePrompt, AlephAlphaClient, AlephAlphaModel, EmbeddingRequest
import os

client = AlephAlphaClient(
host="https://api.aleph-alpha.com",
token=os.getenv("AA_TOKEN")
model = AlephAlphaModel(
AlephAlphaClient(host="https://api.aleph-alpha.com", token=os.getenv("AA_TOKEN")),
# You need to choose a model with multimodal capabilities for this example.
model_name = "luminous-extended"
)

# You need to choose a model with multimodal capabilities for this example.
model = "luminous-base"

url = "https://upload.wikimedia.org/wikipedia/commons/thumb/7/74/2008-09-24_Blockbuster_in_Durham.jpg/330px-2008-09-24_Blockbuster_in_Durham.jpg"
image = ImagePrompt.from_url(url)
prompt = [
image,
"Q: What is the name of the store?\nA:",
]
request = EmbeddingRequest(prompt=prompt, layers=[-1], pooling=["mean"])
result = client.embed(model, request=request)
result = model.embed(request)

print(result)
```
Expand All @@ -150,17 +143,15 @@ print(result)


```python
from aleph_alpha_client import Document, AlephAlphaClient, QaRequest
from aleph_alpha_client import Document, AlephAlphaClient, AlephAlphaModel, QaRequest
import os

client = AlephAlphaClient(
host="https://api.aleph-alpha.com",
token=os.getenv("AA_TOKEN")
model = AlephAlphaModel(
AlephAlphaClient(host="https://api.aleph-alpha.com", token=os.getenv("AA_TOKEN")),
# You need to choose a model with qa support for this example.
model_name = "luminous-extended"
)

# You need to choose a model with qa support for this example.
model = "luminous-extended"

docx_file = "./tests/sample.docx"
document = Document.from_docx_file(docx_file)

Expand All @@ -169,7 +160,7 @@ request = QaRequest(
documents = [document]
)

result = client.qa(model, request=request)
result = model.qa(request)

print(result)
```
Expand All @@ -179,17 +170,15 @@ print(result)


```python
from aleph_alpha_client import Document, AlephAlphaClient, QaRequest
from aleph_alpha_client import AlephAlphaClient, AlephAlphaModel, QaRequest
import os

client = AlephAlphaClient(
host="https://api.aleph-alpha.com",
token=os.getenv("AA_TOKEN")
model = AlephAlphaModel(
AlephAlphaClient(host="https://api.aleph-alpha.com", token=os.getenv("AA_TOKEN")),
# You need to choose a model with qa support for this example.
model_name = "luminous-extended"
)

# You need to choose a model with qa support for this example.
model = "luminous-extended"

prompt = "In imperative programming, a computer program is a sequence of instructions in a programming language that a computer can execute or interpret."
document = Document.from_text(prompt)

Expand All @@ -198,7 +187,7 @@ request = QaRequest(
documents = [document],
)

result = client.qa(model, request=request)
result = model.qa(request)

print(result)
```
Expand All @@ -209,17 +198,15 @@ print(result)


```python
from aleph_alpha_client import Document, ImagePrompt, AlephAlphaClient, QaRequest
from aleph_alpha_client import Document, ImagePrompt, AlephAlphaClient, AlephAlphaModel, QaRequest
import os

client = AlephAlphaClient(
host="https://api.aleph-alpha.com",
token=os.getenv("AA_TOKEN")
model = AlephAlphaModel(
AlephAlphaClient(host="https://api.aleph-alpha.com", token=os.getenv("AA_TOKEN")),
# You need to choose a model with qa support for this example.
model_name = "luminous-extended"
)

# You need to choose a model with qa support and multimodal capabilities for this example.
model = "luminous-extended"

url = "https://upload.wikimedia.org/wikipedia/commons/thumb/7/74/2008-09-24_Blockbuster_in_Durham.jpg/330px-2008-09-24_Blockbuster_in_Durham.jpg"
image = ImagePrompt.from_url(url)
prompt = [image]
Expand All @@ -230,7 +217,7 @@ request = QaRequest (
documents = [document]
)

result = client.qa(model, request=request)
result = model.qa(request)

print(result)
```
Expand All @@ -240,18 +227,17 @@ print(result)


```python
from aleph_alpha_client import Document, ImagePrompt, AlephAlphaClient, TokenizationRequest
from aleph_alpha_client import AlephAlphaClient, AlephAlphaModel, TokenizationRequest
import os

client = AlephAlphaClient(
host="https://api.aleph-alpha.com",
token=os.getenv("AA_TOKEN")
model = AlephAlphaModel(
AlephAlphaClient(host="https://api.aleph-alpha.com", token=os.getenv("AA_TOKEN")),
model_name = "luminous-extended"
)

# You need to choose a model with qa support and multimodal capabilities for this example.
model = "luminous-extended"
request = TokenizationRequest(prompt="This is an example.", tokens=True, token_ids=True)
response = client.tokenize(model, request=request)
response = model.tokenize(request)

print(response)
```
Expand All @@ -261,18 +247,17 @@ print(response)


```python
from aleph_alpha_client import Document, ImagePrompt, AlephAlphaClient, DetokenizationRequest
from aleph_alpha_client import AlephAlphaClient, AlephAlphaModel, DetokenizationRequest
import os

client = AlephAlphaClient(
host="https://api.aleph-alpha.com",
token=os.getenv("AA_TOKEN")
model = AlephAlphaModel(
AlephAlphaClient(host="https://api.aleph-alpha.com", token=os.getenv("AA_TOKEN")),
model_name = "luminous-extended"
)

# You need to choose a model with qa support and multimodal capabilities for this example.
model = "luminous-extended"
request = DetokenizationRequest(token_ids=[1730, 387, 300, 4377, 17])
response = client.detokenize(model, request=request)
response = model.detokenize(request)

print(response)
```
Expand Down
1 change: 1 addition & 0 deletions aleph_alpha_client/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from .aleph_alpha_client import AlephAlphaClient, QuotaError, POOLING_OPTIONS
from .aleph_alpha_model import AlephAlphaModel
from .image import ImagePrompt
from .explanation import ExplanationRequest
from .embedding import EmbeddingRequest
Expand Down
Loading

0 comments on commit f756975

Please sign in to comment.