Skip to content
SDK

Flutter SDK

Official Flutter/Dart client library for Nur's Voice AI platform. Build cross-platform voice apps for iOS, Android, and Web.

Installation

Requires Flutter 3.0+ and Dart 3.0+. Add to your pubspec.yaml:

1dependencies:
2 nur_ai: ^0.1.0

Quick Start

Create a client and generate your first speech with async/await:

1import 'package:nur_ai/nur_ai.dart';
2
3void main() async {
4 // Initialize client (uses NUR_API_KEY env var)
5 final client = NurClient();
6
7 try {
8 // Generate speech from text
9 final audio = await client.tts.generate(
10 text: 'Hello from Flutter! This is Nur\'s voice AI.',
11 voiceId: 'rachel_v2',
12 language: 'en',
13 );
14
15 // Save to file
16 await audio.save('output.mp3');
17
18 print('Generated ${audio.duration.toStringAsFixed(2)}s of audio');
19 } catch (e) {
20 print('Error: $e');
21 }
22}

Authentication

Configure your API key during client initialization:

1import 'package:nur_ai/nur_ai.dart';
2
3// Option 1: Use environment variable
4final client = NurClient();
5
6// Option 2: Explicit API key
7final client = NurClient(apiKey: 'nur_your_api_key_here');
8
9// Option 3: Custom configuration
10final config = NurClientConfig(
11 apiKey: 'nur_your_api_key_here',
12 baseUrl: 'https://api.nur.ai',
13 timeout: Duration(seconds: 60),
14 maxRetries: 3,
15);
16
17final client = NurClient.withConfig(config);

Text to Speech

Generate natural-sounding speech with Flutter's audio players:

1import 'package:nur_ai/nur_ai.dart';
2import 'package:audioplayers/audioplayers.dart';
3
4// Basic generation
5final audio = await client.tts.generate(
6 text: 'Converting text to natural speech.',
7 voiceId: 'rachel_v2',
8);
9
10// With advanced options
11final audio = await client.tts.generate(
12 text: 'Converting text to natural speech.',
13 voiceId: 'rachel_v2',
14 language: 'en',
15 speed: 1.0,
16 pitch: 1.0,
17 outputFormat: OutputFormat.mp3,
18 sampleRate: 24000,
19 enableSSML: true,
20);
21
22// Save audio
23await audio.save('output.mp3');
24
25// Get audio bytes
26final bytes = audio.bytes;
27
28// Play audio directly
29final player = AudioPlayer();
30await player.play(BytesSource(audio.bytes));
31
32// Streaming generation
33await for (final chunk in client.tts.stream(
34 text: 'Streaming long-form content...',
35 voiceId: 'rachel_v2',
36)) {
37 print('Received ${chunk.length} bytes');
38 // Process or play chunk in real-time
39}
40
41// Widget integration
42class TTSWidget extends StatefulWidget {
43 @override
44 _TTSWidgetState createState() => _TTSWidgetState();
45}
46
47class _TTSWidgetState extends State<TTSWidget> {
48 final client = NurClient();
49 bool isLoading = false;
50
51 Future<void> generateSpeech(String text) async {
52 setState(() => isLoading = true);
53
54 try {
55 final audio = await client.tts.generate(
56 text: text,
57 voiceId: 'rachel_v2',
58 );
59
60 final player = AudioPlayer();
61 await player.play(BytesSource(audio.bytes));
62 } finally {
63 setState(() => isLoading = false);
64 }
65 }
66
67 @override
68 Widget build(BuildContext context) {
69 // Your UI here
70 }
71}

Speech to Text

High-accuracy transcription with microphone integration:

1import 'package:nur_ai/nur_ai.dart';
2import 'dart:io';
3
4// Basic transcription from file
5final transcript = await client.stt.transcribe(
6 file: File('audio.mp3'),
7);
8print(transcript.text);
9
10// With speaker diarization and timestamps
11final transcript = await client.stt.transcribe(
12 file: File('meeting.mp3'),
13 language: 'en',
14 speakerDiarization: true,
15 timestamps: true,
16 punctuationEnabled: true,
17 filterProfanity: true,
18);
19
20// Access segments with speaker info
21for (final segment in transcript.segments) {
22 print('[Speaker ${segment.speaker}] ${segment.text}');
23 print(' Time: ${segment.start}s - ${segment.end}s');
24}
25
26// Real-time streaming transcription
27final stream = await client.stt.streamTranscribe(
28 language: 'en',
29 timestamps: true,
30);
31
32// Send audio chunks from microphone
33// (Using record package for microphone access)
34import 'package:record/record.dart';
35
36final recorder = Record();
37await recorder.start();
38
39// Stream audio data
40recorder.onStateChanged().listen((state) async {
41 if (state == RecordState.record) {
42 final audioData = await recorder.getAudioData();
43 await stream.send(audioData);
44 }
45});
46
47// Receive transcripts
48await for (final result in stream.receive()) {
49 print('Transcription: ${result.text} (final: ${result.isFinal})');
50}
51
52// Widget integration
53class STTWidget extends StatefulWidget {
54 @override
55 _STTWidgetState createState() => _STTWidgetState();
56}
57
58class _STTWidgetState extends State<STTWidget> {
59 final client = NurClient();
60 final recorder = Record();
61 String transcription = '';
62
63 Future<void> startRecording() async {
64 final stream = await client.stt.streamTranscribe();
65
66 await recorder.start();
67
68 await for (final result in stream.receive()) {
69 setState(() {
70 transcription = result.text;
71 });
72 }
73 }
74
75 @override
76 Widget build(BuildContext context) {
77 // Your UI here
78 }
79}

Voice Agents

Build real-time conversational AI with Flutter widgets:

1import 'package:nur_ai/nur_ai.dart';
2
3// Create a voice agent session
4final session = await client.voiceAgent.createSession(
5 voiceId: 'rachel_v2',
6 language: 'en',
7 systemPrompt: 'You are a helpful assistant.',
8 temperature: 0.7,
9 maxTokens: 150,
10);
11
12// Setup microphone recording
13final recorder = Record();
14await recorder.start();
15
16// Send audio from microphone
17recorder.onStateChanged().listen((state) async {
18 if (state == RecordState.record) {
19 final audioData = await recorder.getAudioData();
20 await session.sendAudio(audioData);
21 }
22});
23
24// Receive agent responses
25await for (final response in session.receive()) {
26 switch (response.type) {
27 case ResponseType.transcript:
28 print('User: ${response.transcript.text}');
29 break;
30
31 case ResponseType.agentText:
32 print('Agent: ${response.agentText}');
33 break;
34
35 case ResponseType.audio:
36 // Play audio chunk
37 final player = AudioPlayer();
38 await player.play(BytesSource(response.audio));
39 break;
40 }
41}
42
43// Complete voice agent widget
44class VoiceAgentWidget extends StatefulWidget {
45 @override
46 _VoiceAgentWidgetState createState() => _VoiceAgentWidgetState();
47}
48
49class _VoiceAgentWidgetState extends State<VoiceAgentWidget> {
50 final client = NurClient();
51 final recorder = Record();
52 final player = AudioPlayer();
53
54 VoiceAgentSession? session;
55 List<String> messages = [];
56 bool isActive = false;
57
58 Future<void> startConversation() async {
59 setState(() => isActive = true);
60
61 session = await client.voiceAgent.createSession(
62 voiceId: 'rachel_v2',
63 systemPrompt: 'You are a helpful assistant.',
64 );
65
66 // Start recording
67 await recorder.start();
68
69 // Handle responses
70 await for (final response in session!.receive()) {
71 if (response.type == ResponseType.agentText) {
72 setState(() {
73 messages.add('Agent: ${response.agentText}');
74 });
75 } else if (response.type == ResponseType.audio) {
76 await player.play(BytesSource(response.audio));
77 }
78 }
79 }
80
81 @override
82 Widget build(BuildContext context) {
83 return Column(
84 children: [
85 Expanded(
86 child: ListView.builder(
87 itemCount: messages.length,
88 itemBuilder: (context, index) {
89 return ListTile(title: Text(messages[index]));
90 },
91 ),
92 ),
93 ElevatedButton(
94 onPressed: isActive ? null : startConversation,
95 child: Text(isActive ? 'Active' : 'Start'),
96 ),
97 ],
98 );
99 }
100}

Configuration Options

All available client configuration options:

PropertyTypeDefaultDescription
apiKeyStringNUR_API_KEYYour API key
baseUrlStringhttps://api.nur.aiAPI base URL
timeoutDuration30sRequest timeout
maxRetriesint3Maximum retry attempts
retryDelayDuration1sDelay between retries
httpClienthttp.ClientDefaultCustom HTTP client

Error Handling

The SDK provides typed exceptions with proper error handling:

1import 'package:nur_ai/nur_ai.dart';
2
3try {
4 final audio = await client.tts.generate(
5 text: 'Hello world',
6 voiceId: 'rachel_v2',
7 );
8} on RateLimitException catch (e) {
9 print('Rate limited. Retry after ${e.retryAfter} seconds');
10 await Future.delayed(Duration(seconds: e.retryAfter));
11 // Retry request...
12
13} on AuthenticationException {
14 print('Authentication failed: check your API key');
15
16} on NurApiException catch (e) {
17 print('API error [${e.statusCode}]: ${e.message}');
18
19} on TimeoutException {
20 print('Request timed out');
21
22} catch (e) {
23 print('Unexpected error: $e');
24}
25
26// Using timeout with Future
27try {
28 final audio = await client.tts.generate(
29 text: 'Long text...',
30 voiceId: 'rachel_v2',
31 ).timeout(Duration(seconds: 10));
32} on TimeoutException {
33 print('Request timed out after 10 seconds');
34}

Need Help?