diff --git a/examples/chat_completion.rs b/examples/chat_completion.rs index e4f965b..b87d63d 100644 --- a/examples/chat_completion.rs +++ b/examples/chat_completion.rs @@ -6,7 +6,7 @@ use std::env; async fn main() -> Result<(), Box> { let client = Client::new(env::var("OPENAI_API_KEY").unwrap().to_string()); let req = ChatCompletionRequest { - model: chat_completion::GPT3_5_TURBO.to_string(), + model: chat_completion::GPT4.to_string(), messages: vec![chat_completion::ChatCompletionMessage { role: chat_completion::MessageRole::user, content: String::from("NFTとは?"), diff --git a/src/v1/chat_completion.rs b/src/v1/chat_completion.rs index d3cfa8f..22330ed 100644 --- a/src/v1/chat_completion.rs +++ b/src/v1/chat_completion.rs @@ -4,6 +4,10 @@ use crate::v1::common; pub const GPT3_5_TURBO: &str = "gpt-3.5-turbo"; pub const GPT3_5_TURBO_0301: &str = "gpt-3.5-turbo-0301"; +pub const GPT4: &str = "gpt-4"; +pub const GPT4_0314: &str = "gpt-4-0314"; +pub const GPT4_32K: &str = "gpt-4-32k"; +pub const GPT4_32K_0314: &str = "gpt-4-32k-0314"; #[derive(Debug, Serialize)] pub struct ChatCompletionRequest {