PageRenderTime 43ms CodeModel.GetById 16ms RepoModel.GetById 0ms app.codeStats 0ms

/wav2vec_cycle_code/fairseq/examples/wav2vec/vq-wav2vec_featurize.py

https://gitlab.com/lwd17/enhanced_examplar_ae
Python | 250 lines | 203 code | 39 blank | 8 comment | 35 complexity | 1772bb721ca612b9e44e15719da3e420 MD5 | raw file
  1. #!/usr/bin/env python3
  2. # Copyright (c) Facebook, Inc. and its affiliates.
  3. #
  4. # This source code is licensed under the MIT license found in the
  5. # LICENSE file in the root directory of this source tree.
  6. """
  7. Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
  8. """
  9. import argparse
  10. import glob
  11. import os
  12. import os.path as osp
  13. import pprint
  14. import soundfile as sf
  15. import torch
  16. import fairseq
  17. from torch import nn
  18. from torch.utils.data import DataLoader
  19. try:
  20. import tqdm
  21. except:
  22. print("Install tqdm to use --log-format=tqdm")
  23. class FilesDataset:
  24. def __init__(self, files, labels):
  25. self.files = files
  26. if labels and osp.exists(labels):
  27. with open(labels, "r") as lbl_f:
  28. self.labels = [line.rstrip() for line in lbl_f]
  29. else:
  30. self.labels = labels
  31. def __len__(self):
  32. return len(self.files)
  33. def __getitem__(self, index):
  34. fname = self.files[index]
  35. wav, sr = sf.read(fname)
  36. assert sr == 16000
  37. wav = torch.from_numpy(wav).float()
  38. lbls = None
  39. if self.labels:
  40. if isinstance(self.labels, str):
  41. lbl_file = osp.splitext(fname)[0] + "." + self.labels
  42. with open(lbl_file, "r") as lblf:
  43. lbls = lblf.readline()
  44. assert lbls is not None
  45. else:
  46. lbls = self.labels[index]
  47. return wav, lbls
  48. def collate(self, batch):
  49. return batch
  50. class ArgTypes:
  51. @staticmethod
  52. def existing_path(arg):
  53. arg = str(arg)
  54. assert osp.exists(arg), f"File {arg} does not exist"
  55. return arg
  56. @staticmethod
  57. def mkdir(arg):
  58. arg = str(arg)
  59. os.makedirs(arg, exist_ok=True)
  60. return arg
  61. class DatasetWriter:
  62. def __init__(self):
  63. self.args = self.load_config()
  64. pprint.pprint(self.args.__dict__)
  65. self.model = self.load_model()
  66. def __getattr__(self, attr):
  67. return getattr(self.args, attr)
  68. def read_manifest(self, fname):
  69. with open(fname, "r") as fp:
  70. lines = fp.read().split("\n")
  71. root = lines.pop(0).strip()
  72. fnames = [
  73. osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0
  74. ]
  75. return fnames
  76. def process_splits(self):
  77. if self.args.shard is not None or self.args.num_shards is not None:
  78. assert self.args.shard is not None and self.args.num_shards is not None
  79. for split in self.splits:
  80. print(split)
  81. if self.extension == "tsv":
  82. datadir = osp.join(self.data_dir, f"{split}.{self.extension}")
  83. print("Reading manifest file: ", datadir)
  84. files = self.read_manifest(datadir)
  85. else:
  86. datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}")
  87. files = glob.glob(datadir, recursive=True)
  88. assert len(files) > 0
  89. if self.args.shard is not None:
  90. files = files[self.args.shard :: self.args.num_shards]
  91. lbls = []
  92. with open(self.data_file(split), "w") as srcf:
  93. for line, lbl in self.iterate(files):
  94. print(line, file=srcf)
  95. if self.args.labels:
  96. lbls.append(lbl + "\n")
  97. if self.args.labels:
  98. assert all(a is not None for a in lbls)
  99. with open(self.lbl_file(split), "w") as lblf:
  100. lblf.writelines(lbls)
  101. def iterate(self, files):
  102. data = self.load_data(files)
  103. for samples in tqdm.tqdm(data, total=len(files) // 32):
  104. for wav, lbl in samples:
  105. x = wav.unsqueeze(0).float().cuda()
  106. div = 1
  107. while x.size(-1) // div > self.args.max_size:
  108. div += 1
  109. xs = x.chunk(div, dim=-1)
  110. result = []
  111. for x in xs:
  112. torch.cuda.empty_cache()
  113. x = self.model.feature_extractor(x)
  114. if self.quantize_location == "encoder":
  115. with torch.no_grad():
  116. _, idx = self.model.vector_quantizer.forward_idx(x)
  117. idx = idx.squeeze(0).cpu()
  118. else:
  119. with torch.no_grad():
  120. z = self.model.feature_aggregator(x)
  121. _, idx = self.model.vector_quantizer.forward_idx(z)
  122. idx = idx.squeeze(0).cpu()
  123. result.append(idx)
  124. idx = torch.cat(result, dim=0)
  125. yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl
  126. def lbl_file(self, name):
  127. shard_part = "" if self.args.shard is None else f".{self.args.shard}"
  128. return osp.join(self.output_dir, f"{name}.lbl{shard_part}")
  129. def data_file(self, name):
  130. shard_part = "" if self.args.shard is None else f".{self.args.shard}"
  131. return osp.join(self.output_dir, f"{name}.src{shard_part}")
  132. def var_file(self):
  133. return osp.join(self.output_dir, f"vars.pt")
  134. def load_config(self):
  135. parser = argparse.ArgumentParser("Vector Quantized wav2vec features")
  136. # Model Arguments
  137. parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True)
  138. parser.add_argument("--data-parallel", action="store_true")
  139. # Output Arguments
  140. parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True)
  141. # Data Arguments
  142. parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True)
  143. parser.add_argument("--splits", type=str, nargs="+", required=True)
  144. parser.add_argument("--extension", type=str, required=True)
  145. parser.add_argument("--labels", type=str, required=False)
  146. parser.add_argument("--shard", type=int, default=None)
  147. parser.add_argument("--num-shards", type=int, default=None)
  148. parser.add_argument("--max-size", type=int, default=1300000)
  149. # Logger Arguments
  150. parser.add_argument(
  151. "--log-format", type=str, choices=["none", "simple", "tqdm"]
  152. )
  153. return parser.parse_args()
  154. def load_data(self, fnames):
  155. dataset = FilesDataset(fnames, self.args.labels)
  156. loader = DataLoader(
  157. dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8
  158. )
  159. return loader
  160. def load_model(self):
  161. model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([self.checkpoint])
  162. model = model[0]
  163. self.quantize_location = getattr(cfg.model, "vq", "encoder")
  164. model.eval().float()
  165. model.cuda()
  166. if self.data_parallel:
  167. model = nn.DataParallel(model)
  168. return model
  169. def __call__(self):
  170. self.process_splits()
  171. if hasattr(self.model.feature_extractor, "vars") and (
  172. self.args.shard is None or self.args.shard == 0
  173. ):
  174. vars = (
  175. self.model.feature_extractor.vars.view(
  176. self.model.feature_extractor.banks,
  177. self.model.feature_extractor.num_vars,
  178. -1,
  179. )
  180. .cpu()
  181. .detach()
  182. )
  183. print("writing learned latent variable embeddings: ", vars.shape)
  184. torch.save(vars, self.var_file())
  185. if __name__ == "__main__":
  186. write_data = DatasetWriter()
  187. write_data()
  188. print("Done.")