Skip to content

Commit

Permalink
Merge pull request #108 from Aleph-Alpha/v3
Browse files Browse the repository at this point in the history
v3 Release
  • Loading branch information
ahartel authored Apr 4, 2023
2 parents e25bdd5 + c273d92 commit 3e5c3de
Show file tree
Hide file tree
Showing 22 changed files with 152 additions and 1,755 deletions.
12 changes: 12 additions & 0 deletions Changelog.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,17 @@
# Changelog

## 3.0.0

### Breaking Changes

- Removed deprecated `AlephAlphaClient` and `AlephAlphaModel`. Use `Client` or `AsyncClient` instead.
- Removed deprecated `ImagePrompt`. Import `Image` instead for image prompt items.
- New Q&A interface. We've improved the Q&A implementation, and most parameters are no longer needed.
- You only need to specify your documents, a query, and (optional) the max number of answers you want to receive.
- You no longer specify a model.
- Removed "model" parameter from summarize method
- Removed "model_version" from `SummarizationResponse`

## 2.17.0

### Features
Expand Down
6 changes: 0 additions & 6 deletions aleph_alpha_client/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
ControlTokenOverlap,
Image,
ImageControl,
ImagePrompt,
Prompt,
Text,
TextControl,
Expand All @@ -11,12 +10,10 @@
)
from .aleph_alpha_client import (
POOLING_OPTIONS,
AlephAlphaClient,
AsyncClient,
Client,
QuotaError,
)
from .aleph_alpha_model import AlephAlphaModel
from .completion import CompletionRequest, CompletionResponse
from .detokenization import DetokenizationRequest, DetokenizationResponse
from .document import Document
Expand Down Expand Up @@ -52,8 +49,6 @@
from .version import __version__

__all__ = [
"AlephAlphaClient",
"AlephAlphaModel",
"AsyncClient",
"Client",
"CompletionRequest",
Expand All @@ -73,7 +68,6 @@
"ExplanationResponse",
"Image",
"ImageControl",
"ImagePrompt",
"ImagePromptItemExplanation",
"ImageScore",
"POOLING_OPTIONS",
Expand Down
837 changes: 8 additions & 829 deletions aleph_alpha_client/aleph_alpha_client.py

Large diffs are not rendered by default.

172 changes: 0 additions & 172 deletions aleph_alpha_client/aleph_alpha_model.py

This file was deleted.

82 changes: 80 additions & 2 deletions aleph_alpha_client/explanation.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
# Import Literal with Python 3.7 fallback
from typing_extensions import Literal

from aleph_alpha_client import Text

from aleph_alpha_client.prompt import ControlTokenOverlap, Image, Prompt, PromptItem


Expand Down Expand Up @@ -204,6 +206,20 @@ def from_json(score: Any) -> "TextScore":
score=score["score"],
)

class TextScoreWithRaw(NamedTuple):
start: int
length: int
score: float
text: str

@staticmethod
def from_text_score(score: TextScore, prompt: Text) -> "TextScoreWithRaw":
return TextScoreWithRaw(
start=score.start,
length=score.length,
score=score.score,
text=prompt.text[score.start:score.start + score.length],
)

class ImageScore(NamedTuple):
left: float
Expand Down Expand Up @@ -236,6 +252,20 @@ def from_json(score: Any) -> "TargetScore":
score=score["score"],
)

class TargetScoreWithRaw(NamedTuple):
start: int
length: int
score: float
text: str

@staticmethod
def from_target_score(score: TargetScore, target: str) -> "TargetScoreWithRaw":
return TargetScoreWithRaw(
start=score.start,
length=score.length,
score=score.score,
text=target[score.start:score.start + score.length],
)

class TokenScore(NamedTuple):
score: float
Expand Down Expand Up @@ -275,23 +305,37 @@ def in_pixels(self, prompt_item: PromptItem) -> "ImagePromptItemExplanation":


class TextPromptItemExplanation(NamedTuple):
scores: List[TextScore]
scores: List[Union[TextScore, TextScoreWithRaw]]

@staticmethod
def from_json(item: Dict[str, Any]) -> "TextPromptItemExplanation":
return TextPromptItemExplanation(
scores=[TextScore.from_json(score) for score in item["scores"]]
)

def with_text(self, prompt: Text) -> "TextPromptItemExplanation":
return TextPromptItemExplanation(
scores=[TextScoreWithRaw.from_text_score(score, prompt) if isinstance(score, TextScore) else score for score in self.scores]
)



class TargetPromptItemExplanation(NamedTuple):
scores: List[TargetScore]
scores: List[Union[TargetScore, TargetScoreWithRaw]]

@staticmethod
def from_json(item: Dict[str, Any]) -> "TargetPromptItemExplanation":
return TargetPromptItemExplanation(
scores=[TargetScore.from_json(score) for score in item["scores"]]
)

def with_text(self, prompt: str) -> "TargetPromptItemExplanation":
return TargetPromptItemExplanation(
scores=[TargetScoreWithRaw.from_target_score(score, prompt) if isinstance(score, TargetScore) else score for score in self.scores]
)





class TokenPromptItemExplanation(NamedTuple):
Expand Down Expand Up @@ -352,6 +396,31 @@ def with_image_prompt_items_in_pixels(self, prompt: Prompt) -> "Explanation":
],
)

def with_text_from_prompt(self, prompt: Prompt, target: str) -> "Explanation":
items: List[Union[
TextPromptItemExplanation,
ImagePromptItemExplanation,
TargetPromptItemExplanation,
TokenPromptItemExplanation,
]] = []
for item_index, item in enumerate(self.items):
if isinstance(item, TextPromptItemExplanation):
# separate variable to fix linting error
prompt_item = prompt.items[item_index]
if isinstance(prompt_item, Text):
items.append(item.with_text(prompt_item))
else:
items.append(item)
elif isinstance(item, TargetPromptItemExplanation):
items.append(item.with_text(target))
else:
items.append(item)
return Explanation(
target=self.target,
items=items,
)



class ExplanationResponse(NamedTuple):
model_version: str
Expand All @@ -375,3 +444,12 @@ def with_image_prompt_items_in_pixels(
for explanation in self.explanations
]
return ExplanationResponse(self.model_version, mapped_explanations)

def with_text_from_prompt(
self, request: ExplanationRequest
) -> "ExplanationResponse":
mapped_explanations = [
explanation.with_text_from_prompt(request.prompt, request.target)
for explanation in self.explanations
]
return ExplanationResponse(self.model_version, mapped_explanations)
Loading

0 comments on commit 3e5c3de

Please sign in to comment.