mirror of
https://github.com/mii443/openai-api-rs.git
synced 2025-12-03 02:58:20 +00:00
4c80598be87358c1e5ab640b5cef7df5dc1c22a4
unofficial OpenAI API client library for Rust
Installation:
Cargo.toml
[dependencies]
openai-api-rs = "0.1.5"
Usage
The library needs to be configured with your account's secret key, which is available on the website. We recommend setting it as an environment variable. Here's an example of initializing the library with the API key loaded from an environment variable and creating a completion:
Set OPENAI_API_KEY to environment variable
$ export OPENAI_API_KEY=sk-xxxxxxx
Create client
use openai_api_rs::v1::api::Client;
use std::env;
let client = Client::new(env::var("OPENAI_API_KEY").unwrap().to_string());
Create request
use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest};
let req = ChatCompletionRequest {
model: chat_completion::GPT4.to_string(),
messages: vec![chat_completion::ChatCompletionMessage {
role: chat_completion::MessageRole::user,
content: String::from("Hello OpenAI!"),
}],
};
Send request
let result = client.completion(req).await?;
println!("{:?}", result.choices[0].text);
Example of chat completion
use openai_api_rs::v1::api::Client;
use openai_api_rs::v1::chat_completion::{self, ChatCompletionRequest};
use std::env;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(env::var("OPENAI_API_KEY").unwrap().to_string());
let req = ChatCompletionRequest {
model: chat_completion::GPT4.to_string(),
messages: vec![chat_completion::ChatCompletionMessage {
role: chat_completion::MessageRole::user,
content: String::from("Hello OpenAI!"),
}],
};
let result = client.chat_completion(req).await?;
println!("{:?}", result.choices[0].message.content);
Ok(())
}
Check out the full API documentation for examples of all the available functions.
Supported APIs
Languages
Rust
100%