cargo fmt

This commit is contained in:
Dongri Jin
2025-07-23 18:59:10 +09:00
parent 5c4326490d
commit a2a98258fa
2 changed files with 25 additions and 30 deletions

View File

@ -1,5 +1,7 @@
use openai_api_rs::v1::api::OpenAIClient;
use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest, Reasoning, ReasoningMode, ReasoningEffort};
use openai_api_rs::v1::chat_completion::{
self, ChatCompletionRequest, Reasoning, ReasoningEffort, ReasoningMode,
};
use std::env;
#[tokio::main]
@ -15,7 +17,9 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
"x-ai/grok-3-mini".to_string(), // Grok model that supports reasoning
vec![chat_completion::ChatCompletionMessage {
role: chat_completion::MessageRole::user,
content: chat_completion::Content::Text(String::from("Explain quantum computing in simple terms.")),
content: chat_completion::Content::Text(String::from(
"Explain quantum computing in simple terms.",
)),
name: None,
tool_calls: None,
tool_call_id: None,
@ -39,7 +43,9 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
"anthropic/claude-4-sonnet".to_string(), // Claude model that supports max_tokens
vec![chat_completion::ChatCompletionMessage {
role: chat_completion::MessageRole::user,
content: chat_completion::Content::Text(String::from("What's the most efficient sorting algorithm?")),
content: chat_completion::Content::Text(String::from(
"What's the most efficient sorting algorithm?",
)),
name: None,
tool_calls: None,
tool_call_id: None,
@ -48,9 +54,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Set reasoning with max_tokens
req2.reasoning = Some(Reasoning {
mode: Some(ReasoningMode::MaxTokens {
max_tokens: 2000,
}),
mode: Some(ReasoningMode::MaxTokens { max_tokens: 2000 }),
exclude: None,
enabled: None,
});

View File

@ -26,12 +26,8 @@ pub enum ReasoningEffort {
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(untagged)]
pub enum ReasoningMode {
Effort {
effort: ReasoningEffort,
},
MaxTokens {
max_tokens: i64,
},
Effort { effort: ReasoningEffort },
MaxTokens { max_tokens: i64 },
}
#[derive(Debug, Serialize, Deserialize, Clone)]
@ -379,9 +375,7 @@ mod tests {
#[test]
fn test_reasoning_max_tokens_serialization() {
let reasoning = Reasoning {
mode: Some(ReasoningMode::MaxTokens {
max_tokens: 2000,
}),
mode: Some(ReasoningMode::MaxTokens { max_tokens: 2000 }),
exclude: None,
enabled: Some(true),
};
@ -411,10 +405,7 @@ mod tests {
#[test]
fn test_chat_completion_request_with_reasoning() {
let mut req = ChatCompletionRequest::new(
"gpt-4".to_string(),
vec![],
);
let mut req = ChatCompletionRequest::new("gpt-4".to_string(), vec![]);
req.reasoning = Some(Reasoning {
mode: Some(ReasoningMode::Effort {