mirror of
https://github.com/mii443/openai-api-rs.git
synced 2025-08-22 15:15:34 +00:00
cargo fmt
This commit is contained in:
@ -1,5 +1,7 @@
|
||||
use openai_api_rs::v1::api::OpenAIClient;
|
||||
use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest, Reasoning, ReasoningMode, ReasoningEffort};
|
||||
use openai_api_rs::v1::chat_completion::{
|
||||
self, ChatCompletionRequest, Reasoning, ReasoningEffort, ReasoningMode,
|
||||
};
|
||||
use std::env;
|
||||
|
||||
#[tokio::main]
|
||||
@ -15,13 +17,15 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
"x-ai/grok-3-mini".to_string(), // Grok model that supports reasoning
|
||||
vec![chat_completion::ChatCompletionMessage {
|
||||
role: chat_completion::MessageRole::user,
|
||||
content: chat_completion::Content::Text(String::from("Explain quantum computing in simple terms.")),
|
||||
content: chat_completion::Content::Text(String::from(
|
||||
"Explain quantum computing in simple terms.",
|
||||
)),
|
||||
name: None,
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
}],
|
||||
);
|
||||
|
||||
|
||||
// Set reasoning with high effort
|
||||
req.reasoning = Some(Reasoning {
|
||||
mode: Some(ReasoningMode::Effort {
|
||||
@ -33,24 +37,24 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
|
||||
let result = client.chat_completion(req).await?;
|
||||
println!("Content: {:?}", result.choices[0].message.content);
|
||||
|
||||
|
||||
// Example 2: Using reasoning with max_tokens
|
||||
let mut req2 = ChatCompletionRequest::new(
|
||||
"anthropic/claude-4-sonnet".to_string(), // Claude model that supports max_tokens
|
||||
vec![chat_completion::ChatCompletionMessage {
|
||||
role: chat_completion::MessageRole::user,
|
||||
content: chat_completion::Content::Text(String::from("What's the most efficient sorting algorithm?")),
|
||||
content: chat_completion::Content::Text(String::from(
|
||||
"What's the most efficient sorting algorithm?",
|
||||
)),
|
||||
name: None,
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
}],
|
||||
);
|
||||
|
||||
|
||||
// Set reasoning with max_tokens
|
||||
req2.reasoning = Some(Reasoning {
|
||||
mode: Some(ReasoningMode::MaxTokens {
|
||||
max_tokens: 2000,
|
||||
}),
|
||||
mode: Some(ReasoningMode::MaxTokens { max_tokens: 2000 }),
|
||||
exclude: None,
|
||||
enabled: None,
|
||||
});
|
||||
@ -61,4 +65,4 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// OPENROUTER_API_KEY=xxxx cargo run --package openai-api-rs --example openrouter_reasoning
|
||||
// OPENROUTER_API_KEY=xxxx cargo run --package openai-api-rs --example openrouter_reasoning
|
||||
|
@ -26,12 +26,8 @@ pub enum ReasoningEffort {
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[serde(untagged)]
|
||||
pub enum ReasoningMode {
|
||||
Effort {
|
||||
effort: ReasoningEffort,
|
||||
},
|
||||
MaxTokens {
|
||||
max_tokens: i64,
|
||||
},
|
||||
Effort { effort: ReasoningEffort },
|
||||
MaxTokens { max_tokens: i64 },
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
@ -366,32 +362,30 @@ mod tests {
|
||||
exclude: Some(false),
|
||||
enabled: None,
|
||||
};
|
||||
|
||||
|
||||
let serialized = serde_json::to_value(&reasoning).unwrap();
|
||||
let expected = json!({
|
||||
"effort": "high",
|
||||
"exclude": false
|
||||
});
|
||||
|
||||
|
||||
assert_eq!(serialized, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reasoning_max_tokens_serialization() {
|
||||
let reasoning = Reasoning {
|
||||
mode: Some(ReasoningMode::MaxTokens {
|
||||
max_tokens: 2000,
|
||||
}),
|
||||
mode: Some(ReasoningMode::MaxTokens { max_tokens: 2000 }),
|
||||
exclude: None,
|
||||
enabled: Some(true),
|
||||
};
|
||||
|
||||
|
||||
let serialized = serde_json::to_value(&reasoning).unwrap();
|
||||
let expected = json!({
|
||||
"max_tokens": 2000,
|
||||
"enabled": true
|
||||
});
|
||||
|
||||
|
||||
assert_eq!(serialized, expected);
|
||||
}
|
||||
|
||||
@ -399,7 +393,7 @@ mod tests {
|
||||
fn test_reasoning_deserialization() {
|
||||
let json_str = r#"{"effort": "medium", "exclude": true}"#;
|
||||
let reasoning: Reasoning = serde_json::from_str(json_str).unwrap();
|
||||
|
||||
|
||||
match reasoning.mode {
|
||||
Some(ReasoningMode::Effort { effort }) => {
|
||||
assert_eq!(effort, ReasoningEffort::Medium);
|
||||
@ -411,11 +405,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_chat_completion_request_with_reasoning() {
|
||||
let mut req = ChatCompletionRequest::new(
|
||||
"gpt-4".to_string(),
|
||||
vec![],
|
||||
);
|
||||
|
||||
let mut req = ChatCompletionRequest::new("gpt-4".to_string(), vec![]);
|
||||
|
||||
req.reasoning = Some(Reasoning {
|
||||
mode: Some(ReasoningMode::Effort {
|
||||
effort: ReasoningEffort::Low,
|
||||
@ -423,7 +414,7 @@ mod tests {
|
||||
exclude: None,
|
||||
enabled: None,
|
||||
});
|
||||
|
||||
|
||||
let serialized = serde_json::to_value(&req).unwrap();
|
||||
assert_eq!(serialized["reasoning"]["effort"], "low");
|
||||
}
|
||||
|
Reference in New Issue
Block a user