diff --git a/skynet/modules/ttt/summaries/processor.py b/skynet/modules/ttt/summaries/processor.py index 35445f2..3d585d5 100644 --- a/skynet/modules/ttt/summaries/processor.py +++ b/skynet/modules/ttt/summaries/processor.py @@ -47,7 +47,7 @@ async def process(payload: DocumentPayload, job_type: JobType, model: ChatOpenAI if not text: return "" - system_message = hint_type_to_prompt[job_type][payload.hint] + system_message = payload.prompt or hint_type_to_prompt[job_type][payload.hint] prompt = PromptTemplate(template=system_message, input_variables=['text']) # this is a rough estimate of the number of tokens in the input text, since llama models will have a different tokenization scheme diff --git a/skynet/modules/ttt/summaries/v1/models.py b/skynet/modules/ttt/summaries/v1/models.py index bfa25d7..f912f76 100644 --- a/skynet/modules/ttt/summaries/v1/models.py +++ b/skynet/modules/ttt/summaries/v1/models.py @@ -14,6 +14,13 @@ class HintType(Enum): class DocumentPayload(BaseModel): text: str hint: HintType = summary_default_hint_type + prompt: str | None = None + + model_config = { + 'json_schema_extra': { + 'examples': [{'text': 'Your text here', 'hint': 'text', 'prompt': 'Summarize the following text {text}'}] + } + } class DocumentMetadata(BaseModel):