use openai_api_rs::v1::api::Client; use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest}; use std::env; #[tokio::main] async fn main() -> Result<(), Box> { let client = Client::new(env::var("OPENAI_API_KEY").unwrap().to_string()); let req = ChatCompletionRequest { model: chat_completion::GPT4.to_string(), messages: vec![chat_completion::ChatCompletionMessage { role: chat_completion::MessageRole::user, content: Some(String::from("What is Bitcoin?")), name: None, function_call: None, }], functions: None, function_call: None, }; let result = client.chat_completion(req).await?; println!("{:?}", result.choices[0].message.content); Ok(()) } // OPENAI_API_KEY=xxxx cargo run --package openai-api-rs --example chat_completion