AUDIO
Base-Path:
https://api.hyprlab.io/v1
Create Speech
curl https://api.hyprlab.io/v1/audio/speech \
-H "Authorization: Bearer $HYPRLAB_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"model": "tts-1",
"input": "The quick brown fox jumped over the lazy dog.",
"voice": "alloy"
}' \
--output speech.mp3
import fs from "fs";
import path from "path";
import OpenAI from "openai";
const openai = new OpenAI({
apiKey: "<HYPRLAB_API_KEY>",
baseURL: "https://api.hyprlab.io/v1",
});
const speechFile = path.resolve("./speech.mp3");
async function main() {
const mp3 = await openai.audio.speech.create({
model: "tts-1",
voice: "alloy",
input: "Today is a wonderful day to build something people love!",
});
console.log(speechFile);
const buffer = Buffer.from(await mp3.arrayBuffer());
await fs.promises.writeFile(speechFile, buffer);
}
main();
from pathlib import Path
from openai import OpenAI
client = OpenAI(
api_key="<HYPRLAB_API_KEY>",
base_url="https://api.hyprlab.io/v1",
)
speech_file_path = Path(__file__).parent / "speech.mp3"
response = client.audio.speech.create(
model="tts-1",
voice="alloy",
input="The quick brown fox jumped over the lazy dog."
)
response.stream_to_file(speech_file_path)
Create Transcription
curl https://api.hyprlab.io/v1/audio/transcriptions \
-H "Authorization: Bearer $HYPRLAB_API_KEY" \
-H "Content-Type: multipart/form-data" \
-F file="@/path/to/file/audio.mp3" \
-F model="whisper-1"
import fs from "fs";
import OpenAI from "openai";
const openai = new OpenAI({
apiKey: "<HYPRLAB_API_KEY>",
baseURL: "https://api.hyprlab.io/v1",
});
async function main() {
const transcription = await openai.audio.transcriptions.create({
file: fs.createReadStream("audio.mp3"),
model: "whisper-1",
});
console.log(transcription.text);
}
main();
from openai import OpenAI
client = OpenAI(
api_key="<HYPRLAB_API_KEY>",
base_url="https://api.hyprlab.io/v1",
)
audio_file = open("speech.mp3", "rb")
transcript = client.audio.transcriptions.create(
model="whisper-1",
file=audio_file
)
Create Translation
curl https://api.hyprlab.io/v1/audio/translations \
-H "Authorization: Bearer $HYPRLAB_API_KEY" \
-H "Content-Type: multipart/form-data" \
-F file="@/path/to/file/german.m4a" \
-F model="whisper-1"
import fs from "fs";
import OpenAI from "openai";
const openai = new OpenAI({
apiKey: "<HYPRLAB_API_KEY>",
baseURL: "https://api.hyprlab.io/v1",
});
async function main() {
const translation = await openai.audio.translations.create({
file: fs.createReadStream("speech.mp3"),
model: "whisper-1",
});
console.log(translation.text);
}
main();
from openai import OpenAI
client = OpenAI(
api_key="<HYPRLAB_API_KEY>",
base_url="https://api.hyprlab.io/v1",
)
audio_file = open("speech.mp3", "rb")
transcript = client.audio.translations.create(
model="whisper-1",
file=audio_file
)
🎧 Audio Models
Model Name:
Flat-Rate Pricing:
whisper-1
$0.006 / Request
tts-1
$0.015 / Request
tts-1-1106
$0.015 / Request
tts-1-hd
$0.03 / Request
tts-1-hd-1106
$0.03 / Request
Last updated