forked from sobelio/llm-chain
-
Notifications
You must be signed in to change notification settings - Fork 0
/
simple_sequential_generation_stream.rs
40 lines (35 loc) · 1.68 KB
/
simple_sequential_generation_stream.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
use llm_chain::{chains::sequential::Chain, executor, output::StreamExt, prompt, step::Step};
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor with the default settings
let exec = executor!()?;
// Create a chain of steps with two prompts
// Second step is streaming
let chain = Chain::new(vec![
// First step: make a personalized birthday email
Step::for_prompt_template(
prompt!("You are a bot for making personalized greetings", "Make personalized birthday e-mail to the whole company for {{name}} who has their birthday on {{date}}. Include their name")
),
// Second step: summarize the email into a tweet. Importantly, the text parameter becomes the result of the previous prompt.
Step::for_prompt_with_streaming(
prompt!( "You are an assistant for managing social media accounts for a company", "Summarize this email into a tweet to be sent by the company, use emoji if you can. \n--\n{{text}}")
)
]);
// Run the chain with the provided parameters
let res = chain
.run(
// Create a Parameters object with key-value pairs for the placeholders
vec![("name", "Emil"), ("date", "February 30th 2023")].into(),
&exec,
)
.await
.unwrap();
// Print the result to the console
// Call `res.primary_textual_output()` explictly to get the streamed response.
let mut stream = res.as_stream().await?;
while let Some(v) = stream.next().await {
print!("{}", v);
}
// Call `res.as_stream()` to access the Stream without polling.
Ok(())
}