|
1 | 1 | // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
2 | 2 |
|
3 |
| -import { APIResource } from '../resource'; |
4 |
| -import { APIPromise } from '../core'; |
5 |
| -import * as Core from '../core'; |
6 |
| -import * as AudioAPI from './audio'; |
7 |
| -import { Stream } from '../streaming'; |
8 |
| -import { type Response } from '../_shims/index'; |
9 |
| - |
10 |
| -export class Audio extends APIResource { |
11 |
| - /** |
12 |
| - * Generate audio from input text |
13 |
| - * |
14 |
| - * @example |
15 |
| - * ```ts |
16 |
| - * const audio = await client.audio.create({ |
17 |
| - * input: 'input', |
18 |
| - * model: 'cartesia/sonic', |
19 |
| - * voice: 'laidback woman', |
20 |
| - * }); |
21 |
| - * |
22 |
| - * const content = await audio.blob(); |
23 |
| - * console.log(content); |
24 |
| - * ``` |
25 |
| - */ |
26 |
| - create(body: AudioCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise<Response>; |
27 |
| - create( |
28 |
| - body: AudioCreateParamsStreaming, |
29 |
| - options?: Core.RequestOptions, |
30 |
| - ): APIPromise<Stream<AudioSpeechStreamChunk>>; |
31 |
| - create( |
32 |
| - body: AudioCreateParamsBase, |
33 |
| - options?: Core.RequestOptions, |
34 |
| - ): APIPromise<Stream<AudioSpeechStreamChunk> | Response>; |
35 |
| - create( |
36 |
| - body: AudioCreateParams, |
37 |
| - options?: Core.RequestOptions, |
38 |
| - ): APIPromise<Response> | APIPromise<Stream<AudioSpeechStreamChunk>> { |
39 |
| - return this._client.post('/audio/speech', { |
40 |
| - body, |
41 |
| - ...options, |
42 |
| - headers: { Accept: 'application/octet-stream', ...options?.headers }, |
43 |
| - stream: body.stream ?? false, |
44 |
| - __binaryResponse: true, |
45 |
| - }) as APIPromise<Response> | APIPromise<Stream<AudioSpeechStreamChunk>>; |
46 |
| - } |
47 |
| -} |
48 |
| - |
49 |
| -export type AudioFile = AudioFile.AudioSpeechStreamEvent | AudioFile.StreamSentinel; |
50 |
| - |
51 |
| -export namespace AudioFile { |
52 |
| - export interface AudioSpeechStreamEvent { |
53 |
| - data: AudioAPI.AudioSpeechStreamChunk; |
54 |
| - } |
55 |
| - |
56 |
| - export interface StreamSentinel { |
57 |
| - data: '[DONE]'; |
58 |
| - } |
59 |
| -} |
60 |
| - |
61 |
| -export interface AudioSpeechStreamChunk { |
62 |
| - /** |
63 |
| - * base64 encoded audio stream |
64 |
| - */ |
65 |
| - b64: string; |
66 |
| - |
67 |
| - model: string; |
68 |
| - |
69 |
| - object: 'audio.tts.chunk'; |
70 |
| -} |
71 |
| - |
72 |
| -export type AudioCreateParams = AudioCreateParamsNonStreaming | AudioCreateParamsStreaming; |
73 |
| - |
74 |
| -export interface AudioCreateParamsBase { |
75 |
| - /** |
76 |
| - * Input text to generate the audio for |
77 |
| - */ |
78 |
| - input: string; |
79 |
| - |
80 |
| - /** |
81 |
| - * The name of the model to query. |
82 |
| - * |
83 |
| - * [See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#audio-models) |
84 |
| - */ |
85 |
| - model: 'cartesia/sonic' | (string & {}); |
86 |
| - |
87 |
| - /** |
88 |
| - * The voice to use for generating the audio. |
89 |
| - * [View all supported voices here](https://docs.together.ai/docs/text-to-speech#voices-available). |
90 |
| - */ |
91 |
| - voice: 'laidback woman' | 'polite man' | 'storyteller lady' | 'friendly sidekick' | (string & {}); |
92 |
| - |
93 |
| - /** |
94 |
| - * Language of input text |
95 |
| - */ |
96 |
| - language?: |
97 |
| - | 'en' |
98 |
| - | 'de' |
99 |
| - | 'fr' |
100 |
| - | 'es' |
101 |
| - | 'hi' |
102 |
| - | 'it' |
103 |
| - | 'ja' |
104 |
| - | 'ko' |
105 |
| - | 'nl' |
106 |
| - | 'pl' |
107 |
| - | 'pt' |
108 |
| - | 'ru' |
109 |
| - | 'sv' |
110 |
| - | 'tr' |
111 |
| - | 'zh'; |
112 |
| - |
113 |
| - /** |
114 |
| - * Audio encoding of response |
115 |
| - */ |
116 |
| - response_encoding?: 'pcm_f32le' | 'pcm_s16le' | 'pcm_mulaw' | 'pcm_alaw'; |
117 |
| - |
118 |
| - /** |
119 |
| - * The format of audio output |
120 |
| - */ |
121 |
| - response_format?: 'mp3' | 'wav' | 'raw'; |
122 |
| - |
123 |
| - /** |
124 |
| - * Sampling rate to use for the output audio |
125 |
| - */ |
126 |
| - sample_rate?: number; |
127 |
| - |
128 |
| - /** |
129 |
| - * If true, output is streamed for several characters at a time instead of waiting |
130 |
| - * for the full response. The stream terminates with `data: [DONE]`. If false, |
131 |
| - * return the encoded audio as octet stream |
132 |
| - */ |
133 |
| - stream?: boolean; |
134 |
| -} |
135 |
| - |
136 |
| -export namespace AudioCreateParams { |
137 |
| - export type AudioCreateParamsNonStreaming = AudioAPI.AudioCreateParamsNonStreaming; |
138 |
| - export type AudioCreateParamsStreaming = AudioAPI.AudioCreateParamsStreaming; |
139 |
| -} |
140 |
| - |
141 |
| -export interface AudioCreateParamsNonStreaming extends AudioCreateParamsBase { |
142 |
| - /** |
143 |
| - * If true, output is streamed for several characters at a time instead of waiting |
144 |
| - * for the full response. The stream terminates with `data: [DONE]`. If false, |
145 |
| - * return the encoded audio as octet stream |
146 |
| - */ |
147 |
| - stream?: false; |
148 |
| -} |
149 |
| - |
150 |
| -export interface AudioCreateParamsStreaming extends AudioCreateParamsBase { |
151 |
| - /** |
152 |
| - * If true, output is streamed for several characters at a time instead of waiting |
153 |
| - * for the full response. The stream terminates with `data: [DONE]`. If false, |
154 |
| - * return the encoded audio as octet stream |
155 |
| - */ |
156 |
| - stream: true; |
157 |
| -} |
158 |
| - |
159 |
| -export declare namespace Audio { |
160 |
| - export { |
161 |
| - type AudioFile as AudioFile, |
162 |
| - type AudioSpeechStreamChunk as AudioSpeechStreamChunk, |
163 |
| - type AudioCreateParams as AudioCreateParams, |
164 |
| - type AudioCreateParamsNonStreaming as AudioCreateParamsNonStreaming, |
165 |
| - type AudioCreateParamsStreaming as AudioCreateParamsStreaming, |
166 |
| - }; |
167 |
| -} |
| 3 | +export * from './audio/index'; |
0 commit comments