|
| 1 | +// Copyright (c) 2024 Xiaomi Corporation |
| 2 | +import 'dart:io'; |
| 3 | +import 'dart:typed_data'; |
| 4 | + |
| 5 | +import 'package:args/args.dart'; |
| 6 | +import 'package:sherpa_onnx/sherpa_onnx.dart' as sherpa_onnx; |
| 7 | + |
| 8 | +import './init.dart'; |
| 9 | + |
| 10 | +void main(List<String> arguments) async { |
| 11 | + await initSherpaOnnx(); |
| 12 | + |
| 13 | + final parser = ArgParser() |
| 14 | + ..addOption('model', help: 'Path to the ONNX model') |
| 15 | + ..addOption('tokens', help: 'Path to tokens.txt') |
| 16 | + ..addOption('lexicon', help: 'Path to lexicon.txt') |
| 17 | + ..addOption( |
| 18 | + 'dict-dir', |
| 19 | + help: 'Path to jieba dict directory', |
| 20 | + defaultsTo: '', |
| 21 | + ) |
| 22 | + ..addOption('rule-fsts', help: 'Path to rule fsts', defaultsTo: '') |
| 23 | + ..addOption('rule-fars', help: 'Path to rule fars', defaultsTo: '') |
| 24 | + ..addOption('text', help: 'Text to generate TTS for') |
| 25 | + ..addOption('output-wav', help: 'Filename to save the generated audio') |
| 26 | + ..addOption('speed', help: 'Speech speed', defaultsTo: '1.0') |
| 27 | + ..addOption( |
| 28 | + 'sid', |
| 29 | + help: 'Speaker ID to select. Used only for multi-speaker TTS', |
| 30 | + defaultsTo: '0', |
| 31 | + ); |
| 32 | + final res = parser.parse(arguments); |
| 33 | + if (res['model'] == null || |
| 34 | + res['lexicon'] == null || |
| 35 | + res['tokens'] == null || |
| 36 | + res['output-wav'] == null || |
| 37 | + res['text'] == null) { |
| 38 | + print(parser.usage); |
| 39 | + exit(1); |
| 40 | + } |
| 41 | + final model = res['model'] as String; |
| 42 | + final lexicon = res['lexicon'] as String; |
| 43 | + final tokens = res['tokens'] as String; |
| 44 | + final dictDir = res['dict-dir'] as String; |
| 45 | + final ruleFsts = res['rule-fsts'] as String; |
| 46 | + final ruleFars = res['rule-fars'] as String; |
| 47 | + final text = res['text'] as String; |
| 48 | + final outputWav = res['output-wav'] as String; |
| 49 | + var speed = double.tryParse(res['speed'] as String) ?? 1.0; |
| 50 | + final sid = int.tryParse(res['sid'] as String) ?? 0; |
| 51 | + |
| 52 | + if (speed == 0) { |
| 53 | + speed = 1.0; |
| 54 | + } |
| 55 | + |
| 56 | + final vits = sherpa_onnx.OfflineTtsVitsModelConfig( |
| 57 | + model: model, |
| 58 | + lexicon: lexicon, |
| 59 | + tokens: tokens, |
| 60 | + dictDir: dictDir, |
| 61 | + lengthScale: 1 / speed, |
| 62 | + ); |
| 63 | + |
| 64 | + final modelConfig = sherpa_onnx.OfflineTtsModelConfig( |
| 65 | + vits: vits, |
| 66 | + numThreads: 1, |
| 67 | + debug: true, |
| 68 | + ); |
| 69 | + final config = sherpa_onnx.OfflineTtsConfig( |
| 70 | + model: modelConfig, |
| 71 | + maxNumSenetences: 1, |
| 72 | + ruleFsts: ruleFsts, |
| 73 | + ruleFars: ruleFars, |
| 74 | + ); |
| 75 | + |
| 76 | + final tts = sherpa_onnx.OfflineTts(config); |
| 77 | + final audio = tts.generate(text: text, sid: sid, speed: speed); |
| 78 | + tts.free(); |
| 79 | + |
| 80 | + sherpa_onnx.writeWave( |
| 81 | + filename: outputWav, |
| 82 | + samples: audio.samples, |
| 83 | + sampleRate: audio.sampleRate, |
| 84 | + ); |
| 85 | + print('Saved to ${outputWav}'); |
| 86 | +} |
0 commit comments