SDK
iOS SDK
Official Swift client library for Nur's Voice AI platform. Build native iOS voice applications with modern Swift concurrency.
Installation
Requires iOS 15.0+, macOS 12.0+. Install via Swift Package Manager:
1dependencies: [
2 .package(url: "https://github.com/nur-ai/nur-ios.git", from: "0.1.0")
3]
Quick Start
Create a client and generate your first speech using async/await:
1import NurAI
2
3class ViewController: UIViewController {
4 let client = NurClient()
5
6 override func viewDidLoad() {
7 super.viewDidLoad()
8 Task {
9 await generateSpeech()
10 }
11 }
12
13 func generateSpeech() async {
14 do {
15 // Generate speech from text
16 let audio = try await client.tts.generate(
17 text: "Hello from iOS! This is Nur's voice AI.",
18 voiceId: "rachel_v2",
19 language: "en"
20 )
21
22 // Save to file
23 try await audio.save(to: "output.mp3")
24
25 print("Generated \(audio.duration)s of audio")
26 } catch {
27 print("Error: \(error)")
28 }
29 }
30}
Authentication
Configure your API key during client initialization. For production apps, store it securely:
1import NurAI
2
3// Option 1: Initialize with API key
4let client = NurClient(apiKey: "nur_your_api_key_here")
5
6// Option 2: Custom configuration
7let config = NurClientConfig(
8 apiKey: "nur_your_api_key_here",
9 baseURL: "https://api.nur.ai",
10 timeout: 60,
11 maxRetries: 3
12)
13let client = NurClient(config: config)
14
15// Option 3: Using Info.plist (not recommended for production)
16// Add NUR_API_KEY to Info.plist
17let client = NurClient() // Reads from Info.plist
Text to Speech
Generate natural-sounding speech with AVFoundation integration:
1import NurAI
2import AVFoundation
3
4// Basic generation
5let audio = try await client.tts.generate(
6 text: "Converting text to natural speech.",
7 voiceId: "rachel_v2"
8)
9
10// With advanced options
11let audio = try await client.tts.generate(
12 text: "Converting text to natural speech.",
13 voiceId: "rachel_v2",
14 language: "en",
15 speed: 1.0,
16 pitch: 1.0,
17 outputFormat: .mp3,
18 sampleRate: 24000,
19 enableSSML: true
20)
21
22// Save audio
23try await audio.save(to: "output.mp3")
24
25// Get audio data
26let data = audio.data
27
28// Play audio directly
29let player = try AVAudioPlayer(data: audio.data)
30player.play()
31
32// Streaming generation with progress
33for try await chunk in client.tts.stream(
34 text: "Streaming long-form content...",
35 voiceId: "rachel_v2"
36) {
37 print("Received \(chunk.count) bytes")
38 // Process or play chunk in real-time
39}
Speech to Text
High-accuracy transcription with microphone integration:
1import NurAI
2
3// Basic transcription from file
4let transcript = try await client.stt.transcribe(
5 file: URL(fileURLWithPath: "audio.mp3")
6)
7print(transcript.text)
8
9// With speaker diarization and timestamps
10let transcript = try await client.stt.transcribe(
11 file: URL(fileURLWithPath: "meeting.mp3"),
12 language: "en",
13 speakerDiarization: true,
14 timestamps: true,
15 punctuationEnabled: true,
16 filterProfanity: true
17)
18
19// Access segments with speaker info
20for segment in transcript.segments {
21 print("[Speaker \(segment.speaker)] \(segment.text)")
22 print(" Time: \(segment.start)s - \(segment.end)s")
23}
24
25// Real-time microphone transcription
26let stream = try await client.stt.streamTranscribe(
27 language: "en",
28 timestamps: true
29)
30
31// Start recording from microphone
32let audioEngine = AVAudioEngine()
33let inputNode = audioEngine.inputNode
34let format = inputNode.outputFormat(forBus: 0)
35
36inputNode.installTap(onBus: 0, bufferSize: 4096, format: format) { buffer, _ in
37 Task {
38 try await stream.send(buffer.audioBufferList)
39 }
40}
41
42try audioEngine.start()
43
44// Receive transcripts
45for try await result in stream.receive() {
46 print("Transcription: \(result.text) (final: \(result.isFinal))")
47}
Voice Agents
Build real-time conversational AI with bidirectional streaming:
1import NurAI
2import AVFoundation
3
4// Create a voice agent session
5let session = try await client.voiceAgent.createSession(
6 voiceId: "rachel_v2",
7 language: "en",
8 systemPrompt: "You are a helpful assistant.",
9 temperature: 0.7,
10 maxTokens: 150
11)
12
13// Setup audio engine for microphone input
14let audioEngine = AVAudioEngine()
15let inputNode = audioEngine.inputNode
16let format = inputNode.outputFormat(forBus: 0)
17
18// Send microphone audio to agent
19inputNode.installTap(onBus: 0, bufferSize: 4096, format: format) { buffer, _ in
20 Task {
21 try await session.sendAudio(buffer.audioBufferList)
22 }
23}
24
25try audioEngine.start()
26
27// Receive and process agent responses
28for try await response in session.receive() {
29 switch response.type {
30 case .transcript:
31 print("User: \(response.transcript.text)")
32
33 case .agentText:
34 print("Agent: \(response.agentText)")
35
36 case .audio:
37 // Play audio response
38 let player = try AVAudioPlayer(data: response.audio)
39 player.play()
40 }
41}
Configuration Options
All available client configuration options:
| Property | Type | Default | Description |
|---|---|---|---|
| apiKey | String | Required | Your API key |
| baseURL | String | https://api.nur.ai | API base URL |
| timeout | TimeInterval | 30s | Request timeout |
| maxRetries | Int | 3 | Maximum retry attempts |
| retryDelay | TimeInterval | 1s | Delay between retries |
| urlSession | URLSession | .shared | Custom URL session |
Error Handling
The SDK provides typed errors with Swift's native error handling:
1import NurAI
2
3do {
4 let audio = try await client.tts.generate(
5 text: "Hello world",
6 voiceId: "rachel_v2"
7 )
8} catch let error as NurError.RateLimit {
9 print("Rate limited. Retry after \(error.retryAfter) seconds")
10 try await Task.sleep(nanoseconds: UInt64(error.retryAfter) * 1_000_000_000)
11 // Retry request...
12
13} catch NurError.authentication {
14 print("Authentication failed: check your API key")
15
16} catch let error as NurError.API {
17 print("API error [\(error.statusCode)]: \(error.message)")
18
19} catch {
20 print("Unexpected error: \(error)")
21}
22
23// Using Task with timeout
24let task = Task {
25 try await client.tts.generate(
26 text: "Long text...",
27 voiceId: "rachel_v2"
28 )
29}
30
31do {
32 try await withTimeout(seconds: 10) {
33 try await task.value
34 }
35} catch is TimeoutError {
36 print("Request timed out")
37 task.cancel()
38}