cargo fmt

This commit is contained in:
Dongri Jin
2025-07-23 18:59:10 +09:00
parent 5c4326490d
commit a2a98258fa
2 changed files with 25 additions and 30 deletions

View File

@ -1,5 +1,7 @@
use openai_api_rs::v1::api::OpenAIClient; use openai_api_rs::v1::api::OpenAIClient;
use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest, Reasoning, ReasoningMode, ReasoningEffort}; use openai_api_rs::v1::chat_completion::{
self, ChatCompletionRequest, Reasoning, ReasoningEffort, ReasoningMode,
};
use std::env; use std::env;
#[tokio::main] #[tokio::main]
@ -15,13 +17,15 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
"x-ai/grok-3-mini".to_string(), // Grok model that supports reasoning "x-ai/grok-3-mini".to_string(), // Grok model that supports reasoning
vec![chat_completion::ChatCompletionMessage { vec![chat_completion::ChatCompletionMessage {
role: chat_completion::MessageRole::user, role: chat_completion::MessageRole::user,
content: chat_completion::Content::Text(String::from("Explain quantum computing in simple terms.")), content: chat_completion::Content::Text(String::from(
"Explain quantum computing in simple terms.",
)),
name: None, name: None,
tool_calls: None, tool_calls: None,
tool_call_id: None, tool_call_id: None,
}], }],
); );
// Set reasoning with high effort // Set reasoning with high effort
req.reasoning = Some(Reasoning { req.reasoning = Some(Reasoning {
mode: Some(ReasoningMode::Effort { mode: Some(ReasoningMode::Effort {
@ -33,24 +37,24 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let result = client.chat_completion(req).await?; let result = client.chat_completion(req).await?;
println!("Content: {:?}", result.choices[0].message.content); println!("Content: {:?}", result.choices[0].message.content);
// Example 2: Using reasoning with max_tokens // Example 2: Using reasoning with max_tokens
let mut req2 = ChatCompletionRequest::new( let mut req2 = ChatCompletionRequest::new(
"anthropic/claude-4-sonnet".to_string(), // Claude model that supports max_tokens "anthropic/claude-4-sonnet".to_string(), // Claude model that supports max_tokens
vec![chat_completion::ChatCompletionMessage { vec![chat_completion::ChatCompletionMessage {
role: chat_completion::MessageRole::user, role: chat_completion::MessageRole::user,
content: chat_completion::Content::Text(String::from("What's the most efficient sorting algorithm?")), content: chat_completion::Content::Text(String::from(
"What's the most efficient sorting algorithm?",
)),
name: None, name: None,
tool_calls: None, tool_calls: None,
tool_call_id: None, tool_call_id: None,
}], }],
); );
// Set reasoning with max_tokens // Set reasoning with max_tokens
req2.reasoning = Some(Reasoning { req2.reasoning = Some(Reasoning {
mode: Some(ReasoningMode::MaxTokens { mode: Some(ReasoningMode::MaxTokens { max_tokens: 2000 }),
max_tokens: 2000,
}),
exclude: None, exclude: None,
enabled: None, enabled: None,
}); });
@ -61,4 +65,4 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
Ok(()) Ok(())
} }
// OPENROUTER_API_KEY=xxxx cargo run --package openai-api-rs --example openrouter_reasoning // OPENROUTER_API_KEY=xxxx cargo run --package openai-api-rs --example openrouter_reasoning

View File

@ -26,12 +26,8 @@ pub enum ReasoningEffort {
#[derive(Debug, Serialize, Deserialize, Clone)] #[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(untagged)] #[serde(untagged)]
pub enum ReasoningMode { pub enum ReasoningMode {
Effort { Effort { effort: ReasoningEffort },
effort: ReasoningEffort, MaxTokens { max_tokens: i64 },
},
MaxTokens {
max_tokens: i64,
},
} }
#[derive(Debug, Serialize, Deserialize, Clone)] #[derive(Debug, Serialize, Deserialize, Clone)]
@ -366,32 +362,30 @@ mod tests {
exclude: Some(false), exclude: Some(false),
enabled: None, enabled: None,
}; };
let serialized = serde_json::to_value(&reasoning).unwrap(); let serialized = serde_json::to_value(&reasoning).unwrap();
let expected = json!({ let expected = json!({
"effort": "high", "effort": "high",
"exclude": false "exclude": false
}); });
assert_eq!(serialized, expected); assert_eq!(serialized, expected);
} }
#[test] #[test]
fn test_reasoning_max_tokens_serialization() { fn test_reasoning_max_tokens_serialization() {
let reasoning = Reasoning { let reasoning = Reasoning {
mode: Some(ReasoningMode::MaxTokens { mode: Some(ReasoningMode::MaxTokens { max_tokens: 2000 }),
max_tokens: 2000,
}),
exclude: None, exclude: None,
enabled: Some(true), enabled: Some(true),
}; };
let serialized = serde_json::to_value(&reasoning).unwrap(); let serialized = serde_json::to_value(&reasoning).unwrap();
let expected = json!({ let expected = json!({
"max_tokens": 2000, "max_tokens": 2000,
"enabled": true "enabled": true
}); });
assert_eq!(serialized, expected); assert_eq!(serialized, expected);
} }
@ -399,7 +393,7 @@ mod tests {
fn test_reasoning_deserialization() { fn test_reasoning_deserialization() {
let json_str = r#"{"effort": "medium", "exclude": true}"#; let json_str = r#"{"effort": "medium", "exclude": true}"#;
let reasoning: Reasoning = serde_json::from_str(json_str).unwrap(); let reasoning: Reasoning = serde_json::from_str(json_str).unwrap();
match reasoning.mode { match reasoning.mode {
Some(ReasoningMode::Effort { effort }) => { Some(ReasoningMode::Effort { effort }) => {
assert_eq!(effort, ReasoningEffort::Medium); assert_eq!(effort, ReasoningEffort::Medium);
@ -411,11 +405,8 @@ mod tests {
#[test] #[test]
fn test_chat_completion_request_with_reasoning() { fn test_chat_completion_request_with_reasoning() {
let mut req = ChatCompletionRequest::new( let mut req = ChatCompletionRequest::new("gpt-4".to_string(), vec![]);
"gpt-4".to_string(),
vec![],
);
req.reasoning = Some(Reasoning { req.reasoning = Some(Reasoning {
mode: Some(ReasoningMode::Effort { mode: Some(ReasoningMode::Effort {
effort: ReasoningEffort::Low, effort: ReasoningEffort::Low,
@ -423,7 +414,7 @@ mod tests {
exclude: None, exclude: None,
enabled: None, enabled: None,
}); });
let serialized = serde_json::to_value(&req).unwrap(); let serialized = serde_json::to_value(&req).unwrap();
assert_eq!(serialized["reasoning"]["effort"], "low"); assert_eq!(serialized["reasoning"]["effort"], "low");
} }