Skip to content
SDK

Rust SDK

High-performance Rust client library for Nur's Voice AI platform. Zero-cost abstractions with async/await support.

Installation

Requires Rust 1.70 or higher. Add to your Cargo.toml:

1[dependencies]
2nur-ai = "0.1.0"
3tokio = { version = "1", features = ["full"] }

Quick Start

Create a client and generate your first speech with async/await:

1use nur_ai::{Client, TTSRequest};
2
3#[tokio::main]
4async fn main() -> Result<(), Box<dyn std::error::Error>> {
5 // Initialize client (uses NUR_API_KEY env var)
6 let client = Client::new()?;
7
8 // Generate speech from text
9 let audio = client.tts()
10 .generate(TTSRequest {
11 text: "Hello from Rust! This is Nur's voice AI.".to_string(),
12 voice_id: "rachel_v2".to_string(),
13 language: Some("en".to_string()),
14 ..Default::default()
15 })
16 .await?;
17
18 // Save to file
19 audio.save("output.mp3").await?;
20
21 println!("Generated {:.2}s of audio", audio.duration());
22
23 Ok(())
24}

Authentication

The SDK automatically reads your API key from the NUR_API_KEY environment variable. You can also configure it explicitly:

1use nur_ai::{Client, Config};
2use std::time::Duration;
3
4// Option 1: Use environment variable
5let client = Client::new()?;
6
7// Option 2: Explicit API key
8let client = Client::with_api_key("nur_your_api_key_here")?;
9
10// Option 3: Custom configuration
11let config = Config::builder()
12 .api_key("nur_your_api_key_here")
13 .base_url("https://api.nur.ai")
14 .timeout(Duration::from_secs(60))
15 .max_retries(3)
16 .build()?;
17
18let client = Client::with_config(config);

Text to Speech

Generate natural-sounding speech with full type safety:

1use nur_ai::{TTSRequest, OutputFormat};
2
3// Basic generation
4let audio = client.tts()
5 .generate(TTSRequest {
6 text: "Converting text to natural speech.".to_string(),
7 voice_id: "rachel_v2".to_string(),
8 ..Default::default()
9 })
10 .await?;
11
12// With advanced options
13let audio = client.tts()
14 .generate(TTSRequest {
15 text: "Converting text to natural speech.".to_string(),
16 voice_id: "rachel_v2".to_string(),
17 language: Some("en".to_string()),
18 speed: Some(1.0),
19 pitch: Some(1.0),
20 output_format: Some(OutputFormat::Mp3),
21 sample_rate: Some(24000),
22 enable_ssml: Some(true),
23 })
24 .await?;
25
26// Save audio
27audio.save("output.mp3").await?;
28
29// Get audio bytes
30let bytes = audio.as_bytes();
31
32// Streaming generation
33let mut stream = client.tts()
34 .stream(TTSRequest {
35 text: "Streaming long-form content...".to_string(),
36 voice_id: "rachel_v2".to_string(),
37 ..Default::default()
38 })
39 .await?;
40
41while let Some(chunk) = stream.next().await {
42 let chunk = chunk?;
43 println!("Received {} bytes", chunk.len());
44 // Process chunk
45}

Speech to Text

High-accuracy transcription with speaker diarization:

1use nur_ai::STTRequest;
2use tokio::fs::File;
3
4// Basic transcription
5let transcript = client.stt()
6 .transcribe(STTRequest {
7 file: "audio.mp3".to_string(),
8 ..Default::default()
9 })
10 .await?;
11
12println!("{}", transcript.text);
13
14// With speaker diarization and timestamps
15let transcript = client.stt()
16 .transcribe(STTRequest {
17 file: "meeting.mp3".to_string(),
18 language: Some("en".to_string()),
19 speaker_diarization: Some(true),
20 timestamps: Some(true),
21 punctuation_enabled: Some(true),
22 filter_profanity: Some(true),
23 })
24 .await?;
25
26// Access segments with speaker info
27for segment in &transcript.segments {
28 println!(
29 "[Speaker {}] {}",
30 segment.speaker,
31 segment.text
32 );
33 println!(" Time: {:.2}s - {:.2}s", segment.start, segment.end);
34}
35
36// Real-time streaming transcription
37let mut stream = client.stt()
38 .stream_transcribe(Default::default())
39 .await?;
40
41// Send audio chunks in background task
42tokio::spawn(async move {
43 let mut file = File::open("audio.mp3").await.unwrap();
44 let mut buffer = vec![0u8; 4096];
45
46 loop {
47 let n = file.read(&mut buffer).await.unwrap();
48 if n == 0 { break; }
49
50 stream.send(&buffer[..n]).await.unwrap();
51 }
52
53 stream.finish().await.unwrap();
54});
55
56// Receive transcripts
57while let Some(result) = stream.next().await {
58 let result = result?;
59 println!(
60 "Transcription: {} (final: {})",
61 result.text,
62 result.is_final
63 );
64}

Voice Agents

Build real-time conversational AI with bidirectional streaming:

1use nur_ai::{VoiceAgentConfig, ResponseType};
2use tokio::sync::mpsc;
3
4// Create a voice agent session
5let session = client.voice_agent()
6 .create_session(VoiceAgentConfig {
7 voice_id: "rachel_v2".to_string(),
8 language: "en".to_string(),
9 system_prompt: "You are a helpful assistant.".to_string(),
10 temperature: 0.7,
11 max_tokens: 150,
12 ..Default::default()
13 })
14 .await?;
15
16let (tx, mut rx) = mpsc::channel(100);
17
18// Send audio from microphone in background task
19tokio::spawn(async move {
20 while let Some(audio_chunk) = microphone_stream.next().await {
21 if let Err(e) = session.send_audio(audio_chunk).await {
22 eprintln!("Error sending audio: {}", e);
23 break;
24 }
25 }
26});
27
28// Receive agent responses
29while let Some(response) = session.receive().await? {
30 match response.response_type {
31 ResponseType::Transcript => {
32 println!("User: {}", response.transcript.unwrap().text);
33 }
34 ResponseType::AgentText => {
35 println!("Agent: {}", response.agent_text.unwrap());
36 }
37 ResponseType::Audio => {
38 // Play audio chunk
39 play_audio(&response.audio.unwrap());
40 }
41 }
42}

Configuration Options

All available client configuration options:

MethodTypeDefaultDescription
api_key()StringNUR_API_KEYYour API key
base_url()&strhttps://api.nur.aiAPI base URL
timeout()Duration30sRequest timeout
max_retries()u323Maximum retry attempts
retry_delay()Duration1sDelay between retries
user_agent()&strnur-rust/0.1.0HTTP user agent

Error Handling

The SDK provides a comprehensive error type with pattern matching:

1use nur_ai::{Error, ErrorKind};
2
3let result = client.tts()
4 .generate(TTSRequest {
5 text: "Hello world".to_string(),
6 voice_id: "rachel_v2".to_string(),
7 ..Default::default()
8 })
9 .await;
10
11match result {
12 Ok(audio) => {
13 println!("Success!");
14 }
15 Err(e) => match e.kind() {
16 ErrorKind::RateLimit { retry_after } => {
17 println!("Rate limited. Retry after {} seconds", retry_after);
18 tokio::time::sleep(Duration::from_secs(*retry_after)).await;
19 // Retry request...
20 }
21 ErrorKind::Authentication => {
22 eprintln!("Authentication failed: check your API key");
23 }
24 ErrorKind::Api { status_code, message } => {
25 eprintln!("API error [{}]: {}", status_code, message);
26 }
27 ErrorKind::Network => {
28 eprintln!("Network error: {}", e);
29 }
30 ErrorKind::Timeout => {
31 eprintln!("Request timed out");
32 }
33 _ => {
34 eprintln!("Unexpected error: {}", e);
35 }
36 }
37}
38
39// Using the ? operator with custom error types
40async fn process_audio() -> Result<(), Box<dyn std::error::Error>> {
41 let audio = client.tts()
42 .generate(TTSRequest {
43 text: "Hello".to_string(),
44 voice_id: "rachel_v2".to_string(),
45 ..Default::default()
46 })
47 .await?;
48
49 audio.save("output.mp3").await?;
50 Ok(())
51}

Need Help?