|
| 1 | +#!/usr/bin/env python3 |
| 2 | + |
| 3 | +""" |
| 4 | +This file computes fbank features of the librispeech dataset. |
| 5 | +Its looks for manifests in the directory data/manifests |
| 6 | +and generated fbank features are saved in data/fbank. |
| 7 | +""" |
| 8 | + |
| 9 | +import os |
| 10 | +import subprocess |
| 11 | +from contextlib import contextmanager |
| 12 | +from pathlib import Path |
| 13 | + |
| 14 | +from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer, combine |
| 15 | +from lhotse.recipes.utils import read_manifests_if_cached |
| 16 | + |
| 17 | + |
| 18 | +@contextmanager |
| 19 | +def get_executor(): |
| 20 | + # We'll either return a process pool or a distributed worker pool. |
| 21 | + # Note that this has to be a context manager because we might use multiple |
| 22 | + # context manager ("with" clauses) inside, and this way everything will |
| 23 | + # free up the resources at the right time. |
| 24 | + try: |
| 25 | + # If this is executed on the CLSP grid, we will try to use the |
| 26 | + # Grid Engine to distribute the tasks. |
| 27 | + # Other clusters can also benefit from that, provided a cluster-specific wrapper. |
| 28 | + # (see https://github.com/pzelasko/plz for reference) |
| 29 | + # |
| 30 | + # The following must be installed: |
| 31 | + # $ pip install dask distributed |
| 32 | + # $ pip install git+https://github.com/pzelasko/plz |
| 33 | + name = subprocess.check_output("hostname -f", shell=True, text=True) |
| 34 | + if name.strip().endswith(".clsp.jhu.edu"): |
| 35 | + import plz |
| 36 | + from distributed import Client |
| 37 | + |
| 38 | + with plz.setup_cluster() as cluster: |
| 39 | + cluster.scale(80) |
| 40 | + yield Client(cluster) |
| 41 | + return |
| 42 | + except: |
| 43 | + pass |
| 44 | + # No need to return anything - compute_and_store_features |
| 45 | + # will just instantiate the pool itself. |
| 46 | + yield None |
| 47 | + |
| 48 | + |
| 49 | +def compute_fbank_librispeech(): |
| 50 | + src_dir = Path("data/manifests") |
| 51 | + output_dir = Path("data/fbank") |
| 52 | + num_jobs = min(15, os.cpu_count()) |
| 53 | + num_mel_bins = 80 |
| 54 | + |
| 55 | + dataset_parts = ( |
| 56 | + "dev-clean", |
| 57 | + "dev-other", |
| 58 | + "test-clean", |
| 59 | + "test-other", |
| 60 | + "train-clean-100", |
| 61 | + "train-clean-360", |
| 62 | + "train-other-500", |
| 63 | + ) |
| 64 | + manifests = read_manifests_if_cached( |
| 65 | + dataset_parts=dataset_parts, output_dir=src_dir |
| 66 | + ) |
| 67 | + assert manifests is not None |
| 68 | + |
| 69 | + extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) |
| 70 | + |
| 71 | + with get_executor() as ex: # Initialize the executor only once. |
| 72 | + for partition, m in manifests.items(): |
| 73 | + if (output_dir / f"cuts_{partition}.json.gz").is_file(): |
| 74 | + print(f"{partition} already exists - skipping.") |
| 75 | + continue |
| 76 | + print("Processing", partition) |
| 77 | + cut_set = CutSet.from_manifests( |
| 78 | + recordings=m["recordings"], supervisions=m["supervisions"], |
| 79 | + ) |
| 80 | + if "train" in partition: |
| 81 | + cut_set = ( |
| 82 | + cut_set |
| 83 | + + cut_set.perturb_speed(0.9) |
| 84 | + + cut_set.perturb_speed(1.1) |
| 85 | + ) |
| 86 | + cut_set = cut_set.compute_and_store_features( |
| 87 | + extractor=extractor, |
| 88 | + storage_path=f"{output_dir}/feats_{partition}", |
| 89 | + # when an executor is specified, make more partitions |
| 90 | + num_jobs=num_jobs if ex is None else 80, |
| 91 | + executor=ex, |
| 92 | + storage_type=LilcomHdf5Writer, |
| 93 | + ) |
| 94 | + cut_set.to_json(output_dir / f"cuts_{partition}.json.gz") |
| 95 | + |
| 96 | + |
| 97 | +if __name__ == "__main__": |
| 98 | + compute_fbank_librispeech() |
0 commit comments