forked from sobelio/llm-chain
-
Notifications
You must be signed in to change notification settings - Fork 0
/
sequential_generation.rs
38 lines (33 loc) · 1.52 KB
/
sequential_generation.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
use llm_chain::parameters;
use llm_chain::step::Step;
use llm_chain::traits::Executor as ExecutorTrait;
use llm_chain::{chains::sequential::Chain, prompt};
use llm_chain_openai::chatgpt::Executor;
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a new ChatGPT executor with the default settings
let exec = Executor::new()?;
// Create a chain of steps with two prompts
let chain: Chain = Chain::new(vec![
// First step: make a personalized birthday email
Step::for_prompt_template(
prompt!("You are a bot for making personalized greetings", "Make personalized birthday e-mail to the whole company for {{name}} who has their birthday on {{date}}. Include their name")
),
// Second step: summarize the email into a tweet. Importantly, the text parameter becomes the result of the previous prompt.
Step::for_prompt_template(
prompt!( "You are an assistant for managing social media accounts for a company", "Summarize this email into a tweet to be sent by the company, use emoji if you can. \n--\n{{text}}")
)
]);
// Run the chain with the provided parameters
let res = chain
.run(
// Create a Parameters object with key-value pairs for the placeholders
parameters!("name" => "Emil", "date" => "February 30th 2023"),
&exec,
)
.await
.unwrap();
// Print the result to the console
println!("{:}", res);
Ok(())
}