PyTorch
ssl-aasist
custom_code
File size: 21,322 Bytes
9742bb8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": [],
      "gpuType": "T4"
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    },
    "accelerator": "GPU",
    "gpuClass": "standard"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "source": [
        "# Running MMS-LID inference in Colab"
      ],
      "metadata": {
        "id": "Rhm7khm6GskV"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Step 1: Clone fairseq-py and install latest version"
      ],
      "metadata": {
        "id": "2GfxksHDGyJv"
      }
    },
    {
      "cell_type": "code",
      "execution_count": 1,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "Cj2x80SegRzr",
        "outputId": "c81e367d-ec5f-4b17-b375-6980d6291c43"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "fatal: destination path 'fairseq' already exists and is not an empty directory.\n",
            "/content\n",
            "/content/fairseq\n",
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Obtaining file:///content/fairseq\n",
            "  Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
            "  Checking if build backend supports build_editable ... \u001b[?25l\u001b[?25hdone\n",
            "  Getting requirements to build editable ... \u001b[?25l\u001b[?25hdone\n",
            "  Preparing editable metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
            "Requirement already satisfied: cffi in /usr/local/lib/python3.10/dist-packages (from fairseq==0.12.2) (1.15.1)\n",
            "Requirement already satisfied: cython in /usr/local/lib/python3.10/dist-packages (from fairseq==0.12.2) (0.29.34)\n",
            "Requirement already satisfied: hydra-core<1.1,>=1.0.7 in /usr/local/lib/python3.10/dist-packages (from fairseq==0.12.2) (1.0.7)\n",
            "Requirement already satisfied: omegaconf<2.1 in /usr/local/lib/python3.10/dist-packages (from fairseq==0.12.2) (2.0.6)\n",
            "Requirement already satisfied: numpy>=1.21.3 in /usr/local/lib/python3.10/dist-packages (from fairseq==0.12.2) (1.22.4)\n",
            "Requirement already satisfied: regex in /usr/local/lib/python3.10/dist-packages (from fairseq==0.12.2) (2022.10.31)\n",
            "Requirement already satisfied: sacrebleu>=1.4.12 in /usr/local/lib/python3.10/dist-packages (from fairseq==0.12.2) (2.3.1)\n",
            "Requirement already satisfied: torch>=1.13 in /usr/local/lib/python3.10/dist-packages (from fairseq==0.12.2) (2.0.1+cu118)\n",
            "Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from fairseq==0.12.2) (4.65.0)\n",
            "Requirement already satisfied: bitarray in /usr/local/lib/python3.10/dist-packages (from fairseq==0.12.2) (2.7.3)\n",
            "Requirement already satisfied: torchaudio>=0.8.0 in /usr/local/lib/python3.10/dist-packages (from fairseq==0.12.2) (2.0.2+cu118)\n",
            "Requirement already satisfied: scikit-learn in /usr/local/lib/python3.10/dist-packages (from fairseq==0.12.2) (1.2.2)\n",
            "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from fairseq==0.12.2) (23.1)\n",
            "Requirement already satisfied: antlr4-python3-runtime==4.8 in /usr/local/lib/python3.10/dist-packages (from hydra-core<1.1,>=1.0.7->fairseq==0.12.2) (4.8)\n",
            "Requirement already satisfied: PyYAML>=5.1.* in /usr/local/lib/python3.10/dist-packages (from omegaconf<2.1->fairseq==0.12.2) (6.0)\n",
            "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from omegaconf<2.1->fairseq==0.12.2) (4.5.0)\n",
            "Requirement already satisfied: portalocker in /usr/local/lib/python3.10/dist-packages (from sacrebleu>=1.4.12->fairseq==0.12.2) (2.7.0)\n",
            "Requirement already satisfied: tabulate>=0.8.9 in /usr/local/lib/python3.10/dist-packages (from sacrebleu>=1.4.12->fairseq==0.12.2) (0.8.10)\n",
            "Requirement already satisfied: colorama in /usr/local/lib/python3.10/dist-packages (from sacrebleu>=1.4.12->fairseq==0.12.2) (0.4.6)\n",
            "Requirement already satisfied: lxml in /usr/local/lib/python3.10/dist-packages (from sacrebleu>=1.4.12->fairseq==0.12.2) (4.9.2)\n",
            "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch>=1.13->fairseq==0.12.2) (3.12.0)\n",
            "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.13->fairseq==0.12.2) (1.11.1)\n",
            "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=1.13->fairseq==0.12.2) (3.1)\n",
            "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.13->fairseq==0.12.2) (3.1.2)\n",
            "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.13->fairseq==0.12.2) (2.0.0)\n",
            "Requirement already satisfied: cmake in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch>=1.13->fairseq==0.12.2) (3.25.2)\n",
            "Requirement already satisfied: lit in /usr/local/lib/python3.10/dist-packages (from triton==2.0.0->torch>=1.13->fairseq==0.12.2) (16.0.5)\n",
            "Requirement already satisfied: pycparser in /usr/local/lib/python3.10/dist-packages (from cffi->fairseq==0.12.2) (2.21)\n",
            "Requirement already satisfied: scipy>=1.3.2 in /usr/local/lib/python3.10/dist-packages (from scikit-learn->fairseq==0.12.2) (1.10.1)\n",
            "Requirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-learn->fairseq==0.12.2) (1.2.0)\n",
            "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn->fairseq==0.12.2) (3.1.0)\n",
            "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.13->fairseq==0.12.2) (2.1.2)\n",
            "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.13->fairseq==0.12.2) (1.3.0)\n",
            "Building wheels for collected packages: fairseq\n",
            "  Building editable for fairseq (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
            "  Created wheel for fairseq: filename=fairseq-0.12.2-0.editable-cp310-cp310-linux_x86_64.whl size=9219 sha256=b6289e3715902d34fd7c54490679210a5be155dd4416754f0e8c376f193b5ac4\n",
            "  Stored in directory: /tmp/pip-ephem-wheel-cache-o62sj_ry/wheels/c6/d7/db/bc419b1daa8266aa8de2a7c4d29f62dbfa814e8701fe4695a2\n",
            "Successfully built fairseq\n",
            "Installing collected packages: fairseq\n",
            "  Attempting uninstall: fairseq\n",
            "    Found existing installation: fairseq 0.12.2\n",
            "    Uninstalling fairseq-0.12.2:\n",
            "      Successfully uninstalled fairseq-0.12.2\n",
            "Successfully installed fairseq-0.12.2\n",
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Requirement already satisfied: tensorboardX in /usr/local/lib/python3.10/dist-packages (2.6)\n",
            "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from tensorboardX) (1.22.4)\n",
            "Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from tensorboardX) (23.1)\n",
            "Requirement already satisfied: protobuf<4,>=3.8.0 in /usr/local/lib/python3.10/dist-packages (from tensorboardX) (3.20.3)\n"
          ]
        }
      ],
      "source": [
        "import os\n",
        "\n",
        "!git clone https://github.com/pytorch/fairseq\n",
        "\n",
        "# Change current working directory\n",
        "!pwd\n",
        "%cd \"/content/fairseq\"\n",
        "!pip install --editable ./ \n",
        "!pip install tensorboardX\n"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "## 2. Download MMS-LID model\n",
        "\n"
      ],
      "metadata": {
        "id": "cyk4JvZOHSw3"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "available_models = [\"l126\", \"l256\", \"l512\", \"l1024\", \"l2048\", \"l4017\"]\n",
        "\n",
        "# We will use L126 model which can recognize 126 languages \n",
        "model_name = available_models[0] # l126\n",
        "print(f\"Using model - {model_name}\")\n",
        "print(f\"Visit https://dl.fbaipublicfiles.com/mms/lid/mms1b_{model_name}_langs.html to check all the languages supported by this model.\")\n",
        "\n",
        "! mkdir -p /content/models_lid\n",
        "!wget -P /content/models_lid/{model_name} 'https://dl.fbaipublicfiles.com/mms/lid/mms1b_{model_name}.pt'\n",
        "!wget -P /content/models_lid/{model_name} 'https://dl.fbaipublicfiles.com/mms/lid/dict/l126/dict.lang.txt'\n",
        "\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "3uZ9WG85gZId",
        "outputId": "93f456ab-7aa1-47ac-a054-c0e3417b2e5e"
      },
      "execution_count": 5,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Using model - l126\n",
            "Visit https://dl.fbaipublicfiles.com/mms/lid/mms1b_l126_langs.html to check all the languages supported by this model.\n",
            "--2023-05-25 18:18:45--  https://dl.fbaipublicfiles.com/mms/lid/mms1b_l126.pt\n",
            "Resolving dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)... 52.84.251.15, 52.84.251.114, 52.84.251.27, ...\n",
            "Connecting to dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)|52.84.251.15|:443... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 3856229421 (3.6G) [binary/octet-stream]\n",
            "Saving to: β€˜/content/models_lid/l126/mms1b_l126.pt’\n",
            "\n",
            "mms1b_l126.pt       100%[===================>]   3.59G   198MB/s    in 24s     \n",
            "\n",
            "2023-05-25 18:19:09 (155 MB/s) - β€˜/content/models_lid/l126/mms1b_l126.pt’ saved [3856229421/3856229421]\n",
            "\n",
            "--2023-05-25 18:19:09--  https://dl.fbaipublicfiles.com/mms/lid/dict/l126/dict.lang.txt\n",
            "Resolving dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)... 52.84.251.15, 52.84.251.114, 52.84.251.27, ...\n",
            "Connecting to dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)|52.84.251.15|:443... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 882 [text/plain]\n",
            "Saving to: β€˜/content/models_lid/l126/dict.lang.txt’\n",
            "\n",
            "dict.lang.txt       100%[===================>]     882  --.-KB/s    in 0s      \n",
            "\n",
            "2023-05-25 18:19:09 (183 MB/s) - β€˜/content/models_lid/l126/dict.lang.txt’ saved [882/882]\n",
            "\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "## 3. Prepare manifest files\n",
        "Create a folder on path '/content/audio_samples/' and upload your .wav audio files that you need to recognize e.g. '/content/audio_samples/abc.wav' , '/content/audio_samples/def.wav' etc...\n",
        "\n",
        "Note: You need to make sure that the audio data you are using has a sample rate of 16kHz You can easily do this with FFMPEG like the example below that converts .mp3 file to .flac and fixing the audio sample rate\n",
        "\n",
        "Here, we use three examples - one audio file from English, Hindi, Chinese each. "
      ],
      "metadata": {
        "id": "3p5-TQvKHXjO"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "! mkdir -p /content/audio_samples/\n",
        "for key in [\"en_us\", \"hi_in\", \"cmn_hans_cn\"]:\n",
        "  !wget -O /content/audio_samples/tmp.mp3 https://datasets-server.huggingface.co/assets/google/fleurs/--/{key}/train/0/audio/audio.mp3\n",
        "  !ffmpeg -hide_banner -loglevel error -y -i   /content/audio_samples/tmp.mp3 -ar 16000 /content/audio_samples/{key}.wav\n",
        "\n",
        "! mkdir -p /content/audio_samples/\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "cnim4bokprbB",
        "outputId": "89026a92-0518-49c2-9c84-98f0966caeac"
      },
      "execution_count": 6,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "--2023-05-25 18:19:09--  https://datasets-server.huggingface.co/assets/google/fleurs/--/en_us/train/0/audio/audio.mp3\n",
            "Resolving datasets-server.huggingface.co (datasets-server.huggingface.co)... 34.200.186.24, 44.197.252.161, 54.165.66.147, ...\n",
            "Connecting to datasets-server.huggingface.co (datasets-server.huggingface.co)|34.200.186.24|:443... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 20853 (20K) [audio/mpeg]\n",
            "Saving to: β€˜/content/audio_samples/tmp.mp3’\n",
            "\n",
            "/content/audio_samp 100%[===================>]  20.36K  92.8KB/s    in 0.2s    \n",
            "\n",
            "2023-05-25 18:19:11 (92.8 KB/s) - β€˜/content/audio_samples/tmp.mp3’ saved [20853/20853]\n",
            "\n",
            "--2023-05-25 18:19:12--  https://datasets-server.huggingface.co/assets/google/fleurs/--/hi_in/train/0/audio/audio.mp3\n",
            "Resolving datasets-server.huggingface.co (datasets-server.huggingface.co)... 34.200.186.24, 44.197.252.161, 54.165.66.147, ...\n",
            "Connecting to datasets-server.huggingface.co (datasets-server.huggingface.co)|34.200.186.24|:443... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 26361 (26K) [audio/mpeg]\n",
            "Saving to: β€˜/content/audio_samples/tmp.mp3’\n",
            "\n",
            "/content/audio_samp 100%[===================>]  25.74K   116KB/s    in 0.2s    \n",
            "\n",
            "2023-05-25 18:19:13 (116 KB/s) - β€˜/content/audio_samples/tmp.mp3’ saved [26361/26361]\n",
            "\n",
            "--2023-05-25 18:19:13--  https://datasets-server.huggingface.co/assets/google/fleurs/--/cmn_hans_cn/train/0/audio/audio.mp3\n",
            "Resolving datasets-server.huggingface.co (datasets-server.huggingface.co)... 34.200.186.24, 44.197.252.161, 54.165.66.147, ...\n",
            "Connecting to datasets-server.huggingface.co (datasets-server.huggingface.co)|34.200.186.24|:443... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 23877 (23K) [audio/mpeg]\n",
            "Saving to: β€˜/content/audio_samples/tmp.mp3’\n",
            "\n",
            "/content/audio_samp 100%[===================>]  23.32K   105KB/s    in 0.2s    \n",
            "\n",
            "2023-05-25 18:19:14 (105 KB/s) - β€˜/content/audio_samples/tmp.mp3’ saved [23877/23877]\n",
            "\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "! mkdir -p /content/manifest/\n",
        "import os\n",
        "with open(\"/content/manifest/dev.tsv\", \"w\") as ftsv, open(\"/content/manifest/dev.lang\", \"w\") as flang:\n",
        "  ftsv.write(\"/\\n\")\n",
        "\n",
        "  for fl in os.listdir(\"/content/audio_samples/\"):\n",
        "    if not fl.endswith(\".wav\"):\n",
        "      continue\n",
        "    audio_path = f\"/content/audio_samples/{fl}\"\n",
        "    # duration should be number of samples in audio. For inference, using a random value should be fine. \n",
        "    duration = 1234 \n",
        "    ftsv.write(f\"{audio_path}\\t{duration}\\n\")\n",
        "    flang.write(\"eng\\n\") # This is the \"true\" language for the audio. For inference, using a random value should be fine. \n"
      ],
      "metadata": {
        "id": "C2QcjRT-BArW"
      },
      "execution_count": 7,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "# 4: Run Inference and transcribe your audio(s)\n"
      ],
      "metadata": {
        "id": "44UvHjmMI28Z"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "import os\n",
        "\n",
        "os.environ[\"PYTHONPATH\"] = \"/content/fairseq\"\n",
        "os.environ[\"PREFIX\"] = \"INFER\"\n",
        "os.environ[\"HYDRA_FULL_ERROR\"] = \"1\"\n",
        "os.environ[\"USER\"] = \"mms_lid_user\"\n",
        "\n",
        "!python3 examples/mms/lid/infer.py /content/models_lid/{model_name} --path /content/models_lid/{model_name}/mms1b_l126.pt \\\n",
        "  --task audio_classification  --infer-manifest /content/manifest/dev.tsv --output-path /content/manifest/"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "J8N1RKtBiw5V",
        "outputId": "09d3fe43-26a4-4f9b-c56d-d38b6d45cdab"
      },
      "execution_count": 8,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "2023-05-25 18:19:19.545731: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
            "To enable the following instructions: AVX2 AVX512F FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
            "2023-05-25 18:19:21.567795: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
            "| loading model from /content/models_lid/l126/mms1b_l126.pt\n",
            "2023-05-25 18:19:29 | INFO | fairseq.tasks.audio_classification | Using dict_path : /content/models_lid/l126/dict.lang.txt\n",
            "2023-05-25 18:19:29 | INFO | root | === Number of labels = 126\n",
            "2023-05-25 18:20:01 | INFO | fairseq.data.audio.raw_audio_dataset | loaded 3, skipped 0 samples\n",
            "2023-05-25 18:20:01 | INFO | fairseq.tasks.fairseq_task | can_reuse_epoch_itr = True\n",
            "2023-05-25 18:20:01 | INFO | fairseq.tasks.fairseq_task | reuse_dataloader = True\n",
            "2023-05-25 18:20:01 | INFO | fairseq.tasks.fairseq_task | rebuild_batches = True\n",
            "2023-05-25 18:20:01 | INFO | fairseq.tasks.fairseq_task | batches will be rebuilt for each epoch\n",
            "2023-05-25 18:20:01 | INFO | fairseq.tasks.fairseq_task | creating new batches for epoch 1\n",
            "/usr/local/lib/python3.10/dist-packages/torch/utils/data/dataloader.py:560: UserWarning: This DataLoader will create 4 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
            "  warnings.warn(_create_warning_msg(\n",
            "3it [00:07,  2.61s/it]\n",
            "Outputs will be located at - /content/manifest//predictions.txt\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "print(\"----- INPUT FILES -----\")\n",
        "! tail -n +2 /content/manifest/dev.tsv\n",
        "\n",
        "print(\"\\n----- TOP-K PREDICTONS WITH SCORE -----\")\n",
        "! cat /content/manifest//predictions.txt"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "5f7FROqiC-2z",
        "outputId": "3a28ceee-dbb7-4810-f9ca-d11b14a8340b"
      },
      "execution_count": 9,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "----- INPUT FILES -----\n",
            "/content/audio_samples/hi_in.wav\t1234\n",
            "/content/audio_samples/en_us.wav\t1234\n",
            "/content/audio_samples/cmn_hans_cn.wav\t1234\n",
            "\n",
            "----- TOP-K PREDICTONS WITH SCORE -----\n",
            "[[\"hin\", 0.9931250810623169], [\"urd\", 0.005808886140584946], [\"snd\", 0.0005312535213306546]]\n",
            "[[\"eng\", 0.9989539980888367], [\"fas\", 0.00036296260077506304], [\"haw\", 7.031611312413588e-05]]\n",
            "[[\"cmn\", 0.9996059536933899], [\"bod\", 0.0002111078501911834], [\"kor\", 9.211552242049947e-05]]\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [],
      "metadata": {
        "id": "TzHHmno5DZC4"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}