Compressed whisper-small

#17
Files changed (38) hide show
  1. openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
  2. openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/coremldata.bin +3 -0
  3. openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/metadata.json +70 -0
  4. openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/model.mil +0 -0
  5. openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/model.mlmodel +3 -0
  6. openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
  7. openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
  8. openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
  9. openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/metadata.json +74 -0
  10. openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/model.mil +66 -0
  11. openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
  12. openai_whisper-small.en_217MB/TextDecoder.mlmodelc/analytics/coremldata.bin +3 -0
  13. openai_whisper-small.en_217MB/TextDecoder.mlmodelc/coremldata.bin +3 -0
  14. openai_whisper-small.en_217MB/TextDecoder.mlmodelc/metadata.json +170 -0
  15. openai_whisper-small.en_217MB/TextDecoder.mlmodelc/model.mil +0 -0
  16. openai_whisper-small.en_217MB/TextDecoder.mlmodelc/model.mlmodel +3 -0
  17. openai_whisper-small.en_217MB/TextDecoder.mlmodelc/weights/weight.bin +3 -0
  18. openai_whisper-small.en_217MB/config.json +1 -0
  19. openai_whisper-small.en_217MB/generation_config.json +1 -0
  20. openai_whisper-small_216MB/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
  21. openai_whisper-small_216MB/AudioEncoder.mlmodelc/coremldata.bin +3 -0
  22. openai_whisper-small_216MB/AudioEncoder.mlmodelc/metadata.json +70 -0
  23. openai_whisper-small_216MB/AudioEncoder.mlmodelc/model.mil +0 -0
  24. openai_whisper-small_216MB/AudioEncoder.mlmodelc/model.mlmodel +3 -0
  25. openai_whisper-small_216MB/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
  26. openai_whisper-small_216MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
  27. openai_whisper-small_216MB/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
  28. openai_whisper-small_216MB/MelSpectrogram.mlmodelc/metadata.json +74 -0
  29. openai_whisper-small_216MB/MelSpectrogram.mlmodelc/model.mil +66 -0
  30. openai_whisper-small_216MB/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
  31. openai_whisper-small_216MB/TextDecoder.mlmodelc/analytics/coremldata.bin +3 -0
  32. openai_whisper-small_216MB/TextDecoder.mlmodelc/coremldata.bin +3 -0
  33. openai_whisper-small_216MB/TextDecoder.mlmodelc/metadata.json +170 -0
  34. openai_whisper-small_216MB/TextDecoder.mlmodelc/model.mil +0 -0
  35. openai_whisper-small_216MB/TextDecoder.mlmodelc/model.mlmodel +3 -0
  36. openai_whisper-small_216MB/TextDecoder.mlmodelc/weights/weight.bin +3 -0
  37. openai_whisper-small_216MB/config.json +1 -0
  38. openai_whisper-small_216MB/generation_config.json +1 -0
openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dedfa7d9707c7a23e5dac213d0a7ed1bcb770586a8010c8b97b5415c7ab03a98
3
+ size 243
openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fdc970c253d212db32bbb47c713e54d1b28677f9ff845834020dab59cfc5e9c
3
+ size 347
openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Mixed (Float16, Palettized (4 bits), Sparse)",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 768 × 1 × 1500)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 768, 1, 1500]",
13
+ "name" : "encoder_output_embeds",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 7,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios16.softmax" : 12,
23
+ "Ios16.add" : 99,
24
+ "Ios16.mul" : 12,
25
+ "Ios16.constexprSparseToDense" : 72,
26
+ "Ios16.constexprLutToDense" : 74,
27
+ "Ios16.batchNorm" : 25,
28
+ "Ios16.gelu" : 14,
29
+ "Ios16.reshape" : 48,
30
+ "Ios16.matmul" : 24,
31
+ "Ios16.layerNorm" : 25,
32
+ "Ios16.conv" : 148
33
+ },
34
+ "computePrecision" : "Mixed (Float16, Int32)",
35
+ "isUpdatable" : "0",
36
+ "stateSchema" : [
37
+
38
+ ],
39
+ "availability" : {
40
+ "macOS" : "13.0",
41
+ "tvOS" : "16.0",
42
+ "visionOS" : "1.0",
43
+ "watchOS" : "9.0",
44
+ "iOS" : "16.0",
45
+ "macCatalyst" : "16.0"
46
+ },
47
+ "modelType" : {
48
+ "name" : "MLModelType_mlProgram"
49
+ },
50
+ "userDefinedMetadata" : {
51
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
52
+ "com.github.apple.coremltools.source" : "torch==2.6.0",
53
+ "com.github.apple.coremltools.version" : "8.2"
54
+ },
55
+ "inputSchema" : [
56
+ {
57
+ "hasShapeFlexibility" : "0",
58
+ "isOptional" : "0",
59
+ "dataType" : "Float16",
60
+ "formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
61
+ "shortDescription" : "",
62
+ "shape" : "[1, 80, 1, 3000]",
63
+ "name" : "melspectrogram_features",
64
+ "type" : "MultiArray"
65
+ }
66
+ ],
67
+ "generatedClassName" : "AudioEncoder_mixedBitPalettized_4_bit",
68
+ "method" : "predict"
69
+ }
70
+ ]
openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/model.mlmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20bf50c2166e980a7b89b503a9b61e580198843408bf1a1f5493c7199f542221
3
+ size 293189
openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87be26c3c1e2a804cd739fcd24a7c97ed3b860332c46de173ed69290f5982cb0
3
+ size 62052160
openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4f367993f0198e9858a4d89fb054318982c91a9bb5946e29231421c2f1100b9
3
+ size 243
openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:806321f1034184a10b04dc50816219dec8ae9789698712050c81edecb9bb5aa7
3
+ size 328
openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/metadata.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 80, 1, 3000]",
13
+ "name" : "melspectrogram_features",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 7,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios16.reshape" : 2,
23
+ "Ios16.mul" : 2,
24
+ "SliceByIndex" : 1,
25
+ "Ios16.sub" : 1,
26
+ "Ios16.log" : 1,
27
+ "Ios16.square" : 2,
28
+ "Ios16.add" : 3,
29
+ "Squeeze" : 2,
30
+ "Ios16.matmul" : 1,
31
+ "Ios16.conv" : 2,
32
+ "Ios16.maximum" : 1,
33
+ "ExpandDims" : 4,
34
+ "Ios16.reduceMax" : 1,
35
+ "Identity" : 1,
36
+ "Pad" : 1
37
+ },
38
+ "computePrecision" : "Mixed (Float16, Int32)",
39
+ "isUpdatable" : "0",
40
+ "stateSchema" : [
41
+
42
+ ],
43
+ "availability" : {
44
+ "macOS" : "13.0",
45
+ "tvOS" : "16.0",
46
+ "visionOS" : "1.0",
47
+ "watchOS" : "9.0",
48
+ "iOS" : "16.0",
49
+ "macCatalyst" : "16.0"
50
+ },
51
+ "modelType" : {
52
+ "name" : "MLModelType_mlProgram"
53
+ },
54
+ "userDefinedMetadata" : {
55
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
56
+ "com.github.apple.coremltools.source" : "torch==2.6.0",
57
+ "com.github.apple.coremltools.version" : "8.2"
58
+ },
59
+ "inputSchema" : [
60
+ {
61
+ "hasShapeFlexibility" : "0",
62
+ "isOptional" : "0",
63
+ "dataType" : "Float16",
64
+ "formattedType" : "MultiArray (Float16 480000)",
65
+ "shortDescription" : "",
66
+ "shape" : "[480000]",
67
+ "name" : "audio",
68
+ "type" : "MultiArray"
69
+ }
70
+ ],
71
+ "generatedClassName" : "MelSpectrogram",
72
+ "method" : "predict"
73
+ }
74
+ ]
openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/model.mil ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3404.16.1"}, {"coremlc-version", "3404.23.1"}, {"coremltools-component-torch", "2.6.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.2"}})]
3
+ {
4
+ func main<ios16>(tensor<fp16, [480000]> audio) {
5
+ tensor<int32, [3]> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
6
+ tensor<fp16, [1, 1, 480000]> input_1_cast_fp16 = reshape(shape = var_10, x = audio)[name = tensor<string, []>("input_1_cast_fp16")];
7
+ tensor<int32, [6]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 200, 200])];
8
+ tensor<string, []> input_3_mode_0 = const()[name = tensor<string, []>("input_3_mode_0"), val = tensor<string, []>("reflect")];
9
+ tensor<fp16, []> const_1_to_fp16 = const()[name = tensor<string, []>("const_1_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
10
+ tensor<fp16, [1, 1, 480400]> input_3_cast_fp16 = pad(constant_val = const_1_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
11
+ tensor<int32, [1]> var_22 = const()[name = tensor<string, []>("op_22"), val = tensor<int32, [1]>([480400])];
12
+ tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x = input_3_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
13
+ tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
14
+ tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
15
+ tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
16
+ tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = tensor<string, []>("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
17
+ tensor<fp16, [1, 1, 480400]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = tensor<string, []>("expand_dims_4_cast_fp16")];
18
+ tensor<string, []> conv_0_pad_type_0 = const()[name = tensor<string, []>("conv_0_pad_type_0"), val = tensor<string, []>("valid")];
19
+ tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
20
+ tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
21
+ tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
22
+ tensor<fp16, [201, 1, 400]> expand_dims_1_to_fp16 = const()[name = tensor<string, []>("expand_dims_1_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
23
+ tensor<fp16, [1, 201, 3001]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
24
+ tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
25
+ tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
26
+ tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
27
+ tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
28
+ tensor<fp16, [201, 1, 400]> expand_dims_2_to_fp16 = const()[name = tensor<string, []>("expand_dims_2_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160960)))];
29
+ tensor<fp16, [1, 201, 3001]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
30
+ tensor<int32, [1]> squeeze_0_axes_0 = const()[name = tensor<string, []>("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
31
+ tensor<fp16, [201, 3001]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = tensor<string, []>("squeeze_0_cast_fp16")];
32
+ tensor<int32, [1]> squeeze_1_axes_0 = const()[name = tensor<string, []>("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
33
+ tensor<fp16, [201, 3001]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = tensor<string, []>("squeeze_1_cast_fp16")];
34
+ tensor<fp16, [201, 3001]> square_0_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = tensor<string, []>("square_0_cast_fp16")];
35
+ tensor<fp16, [201, 3001]> square_1_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = tensor<string, []>("square_1_cast_fp16")];
36
+ tensor<fp16, [201, 3001]> add_1_cast_fp16 = add(x = square_0_cast_fp16, y = square_1_cast_fp16)[name = tensor<string, []>("add_1_cast_fp16")];
37
+ tensor<fp16, [201, 3001]> magnitudes_1_cast_fp16 = identity(x = add_1_cast_fp16)[name = tensor<string, []>("magnitudes_1_cast_fp16")];
38
+ tensor<int32, [2]> magnitudes_begin_0 = const()[name = tensor<string, []>("magnitudes_begin_0"), val = tensor<int32, [2]>([0, 0])];
39
+ tensor<int32, [2]> magnitudes_end_0 = const()[name = tensor<string, []>("magnitudes_end_0"), val = tensor<int32, [2]>([201, 3000])];
40
+ tensor<bool, [2]> magnitudes_end_mask_0 = const()[name = tensor<string, []>("magnitudes_end_mask_0"), val = tensor<bool, [2]>([true, false])];
41
+ tensor<fp16, [201, 3000]> magnitudes_cast_fp16 = slice_by_index(begin = magnitudes_begin_0, end = magnitudes_end_0, end_mask = magnitudes_end_mask_0, x = magnitudes_1_cast_fp16)[name = tensor<string, []>("magnitudes_cast_fp16")];
42
+ tensor<bool, []> mel_spec_1_transpose_x_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_x_0"), val = tensor<bool, []>(false)];
43
+ tensor<bool, []> mel_spec_1_transpose_y_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_y_0"), val = tensor<bool, []>(false)];
44
+ tensor<fp16, [80, 201]> mel_filters_to_fp16 = const()[name = tensor<string, []>("mel_filters_to_fp16"), val = tensor<fp16, [80, 201]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(321856)))];
45
+ tensor<fp16, [80, 3000]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16, y = magnitudes_cast_fp16)[name = tensor<string, []>("mel_spec_1_cast_fp16")];
46
+ tensor<fp16, []> var_41_to_fp16 = const()[name = tensor<string, []>("op_41_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
47
+ tensor<fp16, [80, 3000]> mel_spec_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_41_to_fp16)[name = tensor<string, []>("mel_spec_cast_fp16")];
48
+ tensor<fp16, []> log_0_epsilon_0_to_fp16 = const()[name = tensor<string, []>("log_0_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
49
+ tensor<fp16, [80, 3000]> log_0_cast_fp16 = log(epsilon = log_0_epsilon_0_to_fp16, x = mel_spec_cast_fp16)[name = tensor<string, []>("log_0_cast_fp16")];
50
+ tensor<fp16, []> mul_0_y_0_to_fp16 = const()[name = tensor<string, []>("mul_0_y_0_to_fp16"), val = tensor<fp16, []>(0x1.bccp-2)];
51
+ tensor<fp16, [80, 3000]> mul_0_cast_fp16 = mul(x = log_0_cast_fp16, y = mul_0_y_0_to_fp16)[name = tensor<string, []>("mul_0_cast_fp16")];
52
+ tensor<bool, []> var_44_keep_dims_0 = const()[name = tensor<string, []>("op_44_keep_dims_0"), val = tensor<bool, []>(false)];
53
+ tensor<fp16, []> var_44_cast_fp16 = reduce_max(keep_dims = var_44_keep_dims_0, x = mul_0_cast_fp16)[name = tensor<string, []>("op_44_cast_fp16")];
54
+ tensor<fp16, []> var_46_to_fp16 = const()[name = tensor<string, []>("op_46_to_fp16"), val = tensor<fp16, []>(0x1p+3)];
55
+ tensor<fp16, []> var_47_cast_fp16 = sub(x = var_44_cast_fp16, y = var_46_to_fp16)[name = tensor<string, []>("op_47_cast_fp16")];
56
+ tensor<fp16, [80, 3000]> log_spec_3_cast_fp16 = maximum(x = mul_0_cast_fp16, y = var_47_cast_fp16)[name = tensor<string, []>("log_spec_3_cast_fp16")];
57
+ tensor<fp16, []> var_50_to_fp16 = const()[name = tensor<string, []>("op_50_to_fp16"), val = tensor<fp16, []>(0x1p+2)];
58
+ tensor<fp16, [80, 3000]> var_51_cast_fp16 = add(x = log_spec_3_cast_fp16, y = var_50_to_fp16)[name = tensor<string, []>("op_51_cast_fp16")];
59
+ tensor<fp16, []> _inversed_log_spec_y_0_to_fp16 = const()[name = tensor<string, []>("_inversed_log_spec_y_0_to_fp16"), val = tensor<fp16, []>(0x1p-2)];
60
+ tensor<fp16, [80, 3000]> _inversed_log_spec_cast_fp16 = mul(x = var_51_cast_fp16, y = _inversed_log_spec_y_0_to_fp16)[name = tensor<string, []>("_inversed_log_spec_cast_fp16")];
61
+ tensor<int32, [1]> var_55_axes_0 = const()[name = tensor<string, []>("op_55_axes_0"), val = tensor<int32, [1]>([0])];
62
+ tensor<fp16, [1, 80, 3000]> var_55_cast_fp16 = expand_dims(axes = var_55_axes_0, x = _inversed_log_spec_cast_fp16)[name = tensor<string, []>("op_55_cast_fp16")];
63
+ tensor<int32, [1]> var_62_axes_0 = const()[name = tensor<string, []>("op_62_axes_0"), val = tensor<int32, [1]>([2])];
64
+ tensor<fp16, [1, 80, 1, 3000]> melspectrogram_features = expand_dims(axes = var_62_axes_0, x = var_55_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
65
+ } -> (melspectrogram_features);
66
+ }
openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:801024dbc7a89c677be1f8b285de3409e35f7d1786c9c8d9d0d6842ac57a1c83
3
+ size 354080
openai_whisper-small.en_217MB/TextDecoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ca8f36ba20e72c389a2c207022bd645e32db617d79cda7679930099099c0691
3
+ size 243
openai_whisper-small.en_217MB/TextDecoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:572ff1890dfadfdb9193d1dc3e2bca618fbebbdf65e40d1f6ad4d5cfbee8b806
3
+ size 633
openai_whisper-small.en_217MB/TextDecoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Mixed (Float16, Palettized (4 bits), Sparse)",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1 × 51864)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1, 51864]",
13
+ "name" : "logits",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 1)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 9216, 1, 1]",
23
+ "name" : "key_cache_updates",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 1)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 9216, 1, 1]",
33
+ "name" : "value_cache_updates",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 1500)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 1500]",
43
+ "name" : "alignment_heads_weights",
44
+ "type" : "MultiArray"
45
+ }
46
+ ],
47
+ "modelParameters" : [
48
+
49
+ ],
50
+ "specificationVersion" : 7,
51
+ "mlProgramOperationTypeHistogram" : {
52
+ "Transpose" : 1,
53
+ "Squeeze" : 1,
54
+ "Ios16.gather" : 3,
55
+ "Ios16.softmax" : 24,
56
+ "Ios16.reduceMean" : 1,
57
+ "Split" : 2,
58
+ "Ios16.linear" : 1,
59
+ "Ios16.add" : 194,
60
+ "Concat" : 3,
61
+ "ExpandDims" : 6,
62
+ "Ios16.sub" : 1,
63
+ "Ios16.conv" : 240,
64
+ "Ios16.gelu" : 12,
65
+ "Ios16.constexprLutToDense" : 120,
66
+ "Ios16.constexprSparseToDense" : 121,
67
+ "Ios16.layerNorm" : 37,
68
+ "SliceByIndex" : 38,
69
+ "Ios16.matmul" : 48,
70
+ "Ios16.batchNorm" : 37,
71
+ "Ios16.reshape" : 96,
72
+ "Ios16.mul" : 72
73
+ },
74
+ "computePrecision" : "Mixed (Float16, Int32)",
75
+ "isUpdatable" : "0",
76
+ "stateSchema" : [
77
+
78
+ ],
79
+ "availability" : {
80
+ "macOS" : "13.0",
81
+ "tvOS" : "16.0",
82
+ "visionOS" : "1.0",
83
+ "watchOS" : "9.0",
84
+ "iOS" : "16.0",
85
+ "macCatalyst" : "16.0"
86
+ },
87
+ "modelType" : {
88
+ "name" : "MLModelType_mlProgram"
89
+ },
90
+ "userDefinedMetadata" : {
91
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
92
+ "com.github.apple.coremltools.source" : "torch==2.6.0",
93
+ "com.github.apple.coremltools.version" : "8.2"
94
+ },
95
+ "inputSchema" : [
96
+ {
97
+ "hasShapeFlexibility" : "0",
98
+ "isOptional" : "0",
99
+ "dataType" : "Int32",
100
+ "formattedType" : "MultiArray (Int32 1)",
101
+ "shortDescription" : "",
102
+ "shape" : "[1]",
103
+ "name" : "input_ids",
104
+ "type" : "MultiArray"
105
+ },
106
+ {
107
+ "hasShapeFlexibility" : "0",
108
+ "isOptional" : "0",
109
+ "dataType" : "Int32",
110
+ "formattedType" : "MultiArray (Int32 1)",
111
+ "shortDescription" : "",
112
+ "shape" : "[1]",
113
+ "name" : "cache_length",
114
+ "type" : "MultiArray"
115
+ },
116
+ {
117
+ "hasShapeFlexibility" : "0",
118
+ "isOptional" : "0",
119
+ "dataType" : "Float16",
120
+ "formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 448)",
121
+ "shortDescription" : "",
122
+ "shape" : "[1, 9216, 1, 448]",
123
+ "name" : "key_cache",
124
+ "type" : "MultiArray"
125
+ },
126
+ {
127
+ "hasShapeFlexibility" : "0",
128
+ "isOptional" : "0",
129
+ "dataType" : "Float16",
130
+ "formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 448)",
131
+ "shortDescription" : "",
132
+ "shape" : "[1, 9216, 1, 448]",
133
+ "name" : "value_cache",
134
+ "type" : "MultiArray"
135
+ },
136
+ {
137
+ "hasShapeFlexibility" : "0",
138
+ "isOptional" : "0",
139
+ "dataType" : "Float16",
140
+ "formattedType" : "MultiArray (Float16 1 × 448)",
141
+ "shortDescription" : "",
142
+ "shape" : "[1, 448]",
143
+ "name" : "kv_cache_update_mask",
144
+ "type" : "MultiArray"
145
+ },
146
+ {
147
+ "hasShapeFlexibility" : "0",
148
+ "isOptional" : "0",
149
+ "dataType" : "Float16",
150
+ "formattedType" : "MultiArray (Float16 1 × 768 × 1 × 1500)",
151
+ "shortDescription" : "",
152
+ "shape" : "[1, 768, 1, 1500]",
153
+ "name" : "encoder_output_embeds",
154
+ "type" : "MultiArray"
155
+ },
156
+ {
157
+ "hasShapeFlexibility" : "0",
158
+ "isOptional" : "0",
159
+ "dataType" : "Float16",
160
+ "formattedType" : "MultiArray (Float16 1 × 448)",
161
+ "shortDescription" : "",
162
+ "shape" : "[1, 448]",
163
+ "name" : "decoder_key_padding_mask",
164
+ "type" : "MultiArray"
165
+ }
166
+ ],
167
+ "generatedClassName" : "TextDecoder_mixedBitPalettized_4_bit",
168
+ "method" : "predict"
169
+ }
170
+ ]
openai_whisper-small.en_217MB/TextDecoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
openai_whisper-small.en_217MB/TextDecoder.mlmodelc/model.mlmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:768ef236a82b0836cd27b9b4d30310344766ea7d5df3385149c61082568a9a04
3
+ size 530863
openai_whisper-small.en_217MB/TextDecoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6cb706fa11e5352ad9e9c99b7e4bdbd9b0d73289c79303ada949a92a04c04af
3
+ size 153638256
openai_whisper-small.en_217MB/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_name_or_path": "openai/whisper-small.en", "activation_dropout": 0.0, "activation_function": "gelu", "architectures": ["WhisperForConditionalGeneration"], "attention_dropout": 0.0, "begin_suppress_tokens": [220, 50256], "bos_token_id": 50257, "d_model": 768, "decoder_attention_heads": 12, "decoder_ffn_dim": 3072, "decoder_layerdrop": 0.0, "decoder_layers": 12, "decoder_start_token_id": 50257, "dropout": 0.0, "encoder_attention_heads": 12, "encoder_ffn_dim": 3072, "encoder_layerdrop": 0.0, "encoder_layers": 12, "eos_token_id": 50256, "forced_decoder_ids": [[1, 50362]], "init_std": 0.02, "is_encoder_decoder": true, "max_length": 448, "max_source_positions": 1500, "max_target_positions": 448, "model_type": "whisper", "num_hidden_layers": 12, "num_mel_bins": 80, "pad_token_id": 50256, "scale_embedding": false, "suppress_tokens": [1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377, 1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211, 4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786, 11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791, 17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409, 34949, 40283, 40493, 40549, 47282, 49146, 50257, 50357, 50358, 50359, 50360, 50361], "torch_dtype": "float32", "transformers_version": "4.27.0.dev0", "use_cache": true, "vocab_size": 51864}
openai_whisper-small.en_217MB/generation_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"alignment_heads": [[6, 6], [7, 0], [7, 3], [7, 8], [8, 2], [8, 5], [8, 7], [9, 0], [9, 4], [9, 8], [9, 10], [10, 0], [10, 1], [10, 2], [10, 3], [10, 6], [10, 11], [11, 2], [11, 4]], "begin_suppress_tokens": [220, 50256], "bos_token_id": 50257, "decoder_start_token_id": 50257, "eos_token_id": 50256, "forced_decoder_ids": [[1, 50362]], "is_multilingual": false, "max_initial_timestamp_index": 50, "max_length": 448, "no_timestamps_token_id": 50362, "pad_token_id": 50256, "prev_sot_token_id": 50360, "suppress_tokens": [1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377, 1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211, 4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786, 11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791, 17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409, 34949, 40283, 40493, 40549, 47282, 49146, 50257, 50357, 50358, 50359, 50360, 50361], "transformers_version": "4.31.0.dev0"}
openai_whisper-small_216MB/AudioEncoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c33d98d0f2046b711d75041260eb53fd0ca1c3226930a779fe9082bc4763449
3
+ size 243
openai_whisper-small_216MB/AudioEncoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb991927256c8b71c8f5df3eb652bd0d67d933d7c36da4e53a5eb15a0a635ca1
3
+ size 347
openai_whisper-small_216MB/AudioEncoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Mixed (Float16, Palettized (4 bits), Sparse)",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 768 × 1 × 1500)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 768, 1, 1500]",
13
+ "name" : "encoder_output_embeds",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 7,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios16.softmax" : 12,
23
+ "Ios16.add" : 99,
24
+ "Ios16.mul" : 12,
25
+ "Ios16.constexprSparseToDense" : 72,
26
+ "Ios16.constexprLutToDense" : 74,
27
+ "Ios16.batchNorm" : 25,
28
+ "Ios16.gelu" : 14,
29
+ "Ios16.reshape" : 48,
30
+ "Ios16.matmul" : 24,
31
+ "Ios16.layerNorm" : 25,
32
+ "Ios16.conv" : 148
33
+ },
34
+ "computePrecision" : "Mixed (Float16, Int32)",
35
+ "isUpdatable" : "0",
36
+ "stateSchema" : [
37
+
38
+ ],
39
+ "availability" : {
40
+ "macOS" : "13.0",
41
+ "tvOS" : "16.0",
42
+ "visionOS" : "1.0",
43
+ "watchOS" : "9.0",
44
+ "iOS" : "16.0",
45
+ "macCatalyst" : "16.0"
46
+ },
47
+ "modelType" : {
48
+ "name" : "MLModelType_mlProgram"
49
+ },
50
+ "userDefinedMetadata" : {
51
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
52
+ "com.github.apple.coremltools.source" : "torch==2.6.0",
53
+ "com.github.apple.coremltools.version" : "8.2"
54
+ },
55
+ "inputSchema" : [
56
+ {
57
+ "hasShapeFlexibility" : "0",
58
+ "isOptional" : "0",
59
+ "dataType" : "Float16",
60
+ "formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
61
+ "shortDescription" : "",
62
+ "shape" : "[1, 80, 1, 3000]",
63
+ "name" : "melspectrogram_features",
64
+ "type" : "MultiArray"
65
+ }
66
+ ],
67
+ "generatedClassName" : "AudioEncoder_mixedBitPalettized_4_bit",
68
+ "method" : "predict"
69
+ }
70
+ ]
openai_whisper-small_216MB/AudioEncoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
openai_whisper-small_216MB/AudioEncoder.mlmodelc/model.mlmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21680c8556cfc807eb68ae78cf498b15d805b6e655350909bd6a7a0b54b2daeb
3
+ size 293185
openai_whisper-small_216MB/AudioEncoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfbda1e30a5cea269ea93e2ec69d78a6c5070c0b27982690f02d23e71fecb2d6
3
+ size 62057344
openai_whisper-small_216MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4f367993f0198e9858a4d89fb054318982c91a9bb5946e29231421c2f1100b9
3
+ size 243
openai_whisper-small_216MB/MelSpectrogram.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:806321f1034184a10b04dc50816219dec8ae9789698712050c81edecb9bb5aa7
3
+ size 328
openai_whisper-small_216MB/MelSpectrogram.mlmodelc/metadata.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 80, 1, 3000]",
13
+ "name" : "melspectrogram_features",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 7,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios16.reshape" : 2,
23
+ "Ios16.mul" : 2,
24
+ "SliceByIndex" : 1,
25
+ "Ios16.sub" : 1,
26
+ "Ios16.log" : 1,
27
+ "Ios16.square" : 2,
28
+ "Ios16.add" : 3,
29
+ "Squeeze" : 2,
30
+ "Ios16.matmul" : 1,
31
+ "Ios16.conv" : 2,
32
+ "Ios16.maximum" : 1,
33
+ "ExpandDims" : 4,
34
+ "Ios16.reduceMax" : 1,
35
+ "Identity" : 1,
36
+ "Pad" : 1
37
+ },
38
+ "computePrecision" : "Mixed (Float16, Int32)",
39
+ "isUpdatable" : "0",
40
+ "stateSchema" : [
41
+
42
+ ],
43
+ "availability" : {
44
+ "macOS" : "13.0",
45
+ "tvOS" : "16.0",
46
+ "visionOS" : "1.0",
47
+ "watchOS" : "9.0",
48
+ "iOS" : "16.0",
49
+ "macCatalyst" : "16.0"
50
+ },
51
+ "modelType" : {
52
+ "name" : "MLModelType_mlProgram"
53
+ },
54
+ "userDefinedMetadata" : {
55
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
56
+ "com.github.apple.coremltools.source" : "torch==2.6.0",
57
+ "com.github.apple.coremltools.version" : "8.2"
58
+ },
59
+ "inputSchema" : [
60
+ {
61
+ "hasShapeFlexibility" : "0",
62
+ "isOptional" : "0",
63
+ "dataType" : "Float16",
64
+ "formattedType" : "MultiArray (Float16 480000)",
65
+ "shortDescription" : "",
66
+ "shape" : "[480000]",
67
+ "name" : "audio",
68
+ "type" : "MultiArray"
69
+ }
70
+ ],
71
+ "generatedClassName" : "MelSpectrogram",
72
+ "method" : "predict"
73
+ }
74
+ ]
openai_whisper-small_216MB/MelSpectrogram.mlmodelc/model.mil ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3404.16.1"}, {"coremlc-version", "3404.23.1"}, {"coremltools-component-torch", "2.6.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.2"}})]
3
+ {
4
+ func main<ios16>(tensor<fp16, [480000]> audio) {
5
+ tensor<int32, [3]> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
6
+ tensor<fp16, [1, 1, 480000]> input_1_cast_fp16 = reshape(shape = var_10, x = audio)[name = tensor<string, []>("input_1_cast_fp16")];
7
+ tensor<int32, [6]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 200, 200])];
8
+ tensor<string, []> input_3_mode_0 = const()[name = tensor<string, []>("input_3_mode_0"), val = tensor<string, []>("reflect")];
9
+ tensor<fp16, []> const_1_to_fp16 = const()[name = tensor<string, []>("const_1_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
10
+ tensor<fp16, [1, 1, 480400]> input_3_cast_fp16 = pad(constant_val = const_1_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
11
+ tensor<int32, [1]> var_22 = const()[name = tensor<string, []>("op_22"), val = tensor<int32, [1]>([480400])];
12
+ tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x = input_3_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
13
+ tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
14
+ tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
15
+ tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
16
+ tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = tensor<string, []>("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
17
+ tensor<fp16, [1, 1, 480400]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = tensor<string, []>("expand_dims_4_cast_fp16")];
18
+ tensor<string, []> conv_0_pad_type_0 = const()[name = tensor<string, []>("conv_0_pad_type_0"), val = tensor<string, []>("valid")];
19
+ tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
20
+ tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
21
+ tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
22
+ tensor<fp16, [201, 1, 400]> expand_dims_1_to_fp16 = const()[name = tensor<string, []>("expand_dims_1_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
23
+ tensor<fp16, [1, 201, 3001]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
24
+ tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
25
+ tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
26
+ tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
27
+ tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
28
+ tensor<fp16, [201, 1, 400]> expand_dims_2_to_fp16 = const()[name = tensor<string, []>("expand_dims_2_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160960)))];
29
+ tensor<fp16, [1, 201, 3001]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
30
+ tensor<int32, [1]> squeeze_0_axes_0 = const()[name = tensor<string, []>("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
31
+ tensor<fp16, [201, 3001]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = tensor<string, []>("squeeze_0_cast_fp16")];
32
+ tensor<int32, [1]> squeeze_1_axes_0 = const()[name = tensor<string, []>("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
33
+ tensor<fp16, [201, 3001]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = tensor<string, []>("squeeze_1_cast_fp16")];
34
+ tensor<fp16, [201, 3001]> square_0_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = tensor<string, []>("square_0_cast_fp16")];
35
+ tensor<fp16, [201, 3001]> square_1_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = tensor<string, []>("square_1_cast_fp16")];
36
+ tensor<fp16, [201, 3001]> add_1_cast_fp16 = add(x = square_0_cast_fp16, y = square_1_cast_fp16)[name = tensor<string, []>("add_1_cast_fp16")];
37
+ tensor<fp16, [201, 3001]> magnitudes_1_cast_fp16 = identity(x = add_1_cast_fp16)[name = tensor<string, []>("magnitudes_1_cast_fp16")];
38
+ tensor<int32, [2]> magnitudes_begin_0 = const()[name = tensor<string, []>("magnitudes_begin_0"), val = tensor<int32, [2]>([0, 0])];
39
+ tensor<int32, [2]> magnitudes_end_0 = const()[name = tensor<string, []>("magnitudes_end_0"), val = tensor<int32, [2]>([201, 3000])];
40
+ tensor<bool, [2]> magnitudes_end_mask_0 = const()[name = tensor<string, []>("magnitudes_end_mask_0"), val = tensor<bool, [2]>([true, false])];
41
+ tensor<fp16, [201, 3000]> magnitudes_cast_fp16 = slice_by_index(begin = magnitudes_begin_0, end = magnitudes_end_0, end_mask = magnitudes_end_mask_0, x = magnitudes_1_cast_fp16)[name = tensor<string, []>("magnitudes_cast_fp16")];
42
+ tensor<bool, []> mel_spec_1_transpose_x_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_x_0"), val = tensor<bool, []>(false)];
43
+ tensor<bool, []> mel_spec_1_transpose_y_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_y_0"), val = tensor<bool, []>(false)];
44
+ tensor<fp16, [80, 201]> mel_filters_to_fp16 = const()[name = tensor<string, []>("mel_filters_to_fp16"), val = tensor<fp16, [80, 201]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(321856)))];
45
+ tensor<fp16, [80, 3000]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16, y = magnitudes_cast_fp16)[name = tensor<string, []>("mel_spec_1_cast_fp16")];
46
+ tensor<fp16, []> var_41_to_fp16 = const()[name = tensor<string, []>("op_41_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
47
+ tensor<fp16, [80, 3000]> mel_spec_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_41_to_fp16)[name = tensor<string, []>("mel_spec_cast_fp16")];
48
+ tensor<fp16, []> log_0_epsilon_0_to_fp16 = const()[name = tensor<string, []>("log_0_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
49
+ tensor<fp16, [80, 3000]> log_0_cast_fp16 = log(epsilon = log_0_epsilon_0_to_fp16, x = mel_spec_cast_fp16)[name = tensor<string, []>("log_0_cast_fp16")];
50
+ tensor<fp16, []> mul_0_y_0_to_fp16 = const()[name = tensor<string, []>("mul_0_y_0_to_fp16"), val = tensor<fp16, []>(0x1.bccp-2)];
51
+ tensor<fp16, [80, 3000]> mul_0_cast_fp16 = mul(x = log_0_cast_fp16, y = mul_0_y_0_to_fp16)[name = tensor<string, []>("mul_0_cast_fp16")];
52
+ tensor<bool, []> var_44_keep_dims_0 = const()[name = tensor<string, []>("op_44_keep_dims_0"), val = tensor<bool, []>(false)];
53
+ tensor<fp16, []> var_44_cast_fp16 = reduce_max(keep_dims = var_44_keep_dims_0, x = mul_0_cast_fp16)[name = tensor<string, []>("op_44_cast_fp16")];
54
+ tensor<fp16, []> var_46_to_fp16 = const()[name = tensor<string, []>("op_46_to_fp16"), val = tensor<fp16, []>(0x1p+3)];
55
+ tensor<fp16, []> var_47_cast_fp16 = sub(x = var_44_cast_fp16, y = var_46_to_fp16)[name = tensor<string, []>("op_47_cast_fp16")];
56
+ tensor<fp16, [80, 3000]> log_spec_3_cast_fp16 = maximum(x = mul_0_cast_fp16, y = var_47_cast_fp16)[name = tensor<string, []>("log_spec_3_cast_fp16")];
57
+ tensor<fp16, []> var_50_to_fp16 = const()[name = tensor<string, []>("op_50_to_fp16"), val = tensor<fp16, []>(0x1p+2)];
58
+ tensor<fp16, [80, 3000]> var_51_cast_fp16 = add(x = log_spec_3_cast_fp16, y = var_50_to_fp16)[name = tensor<string, []>("op_51_cast_fp16")];
59
+ tensor<fp16, []> _inversed_log_spec_y_0_to_fp16 = const()[name = tensor<string, []>("_inversed_log_spec_y_0_to_fp16"), val = tensor<fp16, []>(0x1p-2)];
60
+ tensor<fp16, [80, 3000]> _inversed_log_spec_cast_fp16 = mul(x = var_51_cast_fp16, y = _inversed_log_spec_y_0_to_fp16)[name = tensor<string, []>("_inversed_log_spec_cast_fp16")];
61
+ tensor<int32, [1]> var_55_axes_0 = const()[name = tensor<string, []>("op_55_axes_0"), val = tensor<int32, [1]>([0])];
62
+ tensor<fp16, [1, 80, 3000]> var_55_cast_fp16 = expand_dims(axes = var_55_axes_0, x = _inversed_log_spec_cast_fp16)[name = tensor<string, []>("op_55_cast_fp16")];
63
+ tensor<int32, [1]> var_62_axes_0 = const()[name = tensor<string, []>("op_62_axes_0"), val = tensor<int32, [1]>([2])];
64
+ tensor<fp16, [1, 80, 1, 3000]> melspectrogram_features = expand_dims(axes = var_62_axes_0, x = var_55_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
65
+ } -> (melspectrogram_features);
66
+ }
openai_whisper-small_216MB/MelSpectrogram.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:801024dbc7a89c677be1f8b285de3409e35f7d1786c9c8d9d0d6842ac57a1c83
3
+ size 354080
openai_whisper-small_216MB/TextDecoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7883317b9bd8a263bd395091bfd1ebbd098826dc7c75569dcaa870691ab46554
3
+ size 243
openai_whisper-small_216MB/TextDecoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab598c8f928071a2eee05cdc2163acea3b8d6c7d69b66b31a93dac6681c26a79
3
+ size 633
openai_whisper-small_216MB/TextDecoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Mixed (Float16, Palettized (4 bits), Sparse)",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1 × 51865)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1, 51865]",
13
+ "name" : "logits",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 1)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 9216, 1, 1]",
23
+ "name" : "key_cache_updates",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 1)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 9216, 1, 1]",
33
+ "name" : "value_cache_updates",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 1500)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 1500]",
43
+ "name" : "alignment_heads_weights",
44
+ "type" : "MultiArray"
45
+ }
46
+ ],
47
+ "modelParameters" : [
48
+
49
+ ],
50
+ "specificationVersion" : 7,
51
+ "mlProgramOperationTypeHistogram" : {
52
+ "Transpose" : 1,
53
+ "Squeeze" : 1,
54
+ "Ios16.gather" : 3,
55
+ "Ios16.softmax" : 24,
56
+ "Ios16.reduceMean" : 1,
57
+ "Split" : 2,
58
+ "Ios16.linear" : 1,
59
+ "Ios16.add" : 194,
60
+ "Concat" : 3,
61
+ "ExpandDims" : 6,
62
+ "Ios16.sub" : 1,
63
+ "Ios16.conv" : 240,
64
+ "Ios16.gelu" : 12,
65
+ "Ios16.constexprLutToDense" : 120,
66
+ "Ios16.constexprSparseToDense" : 121,
67
+ "Ios16.layerNorm" : 37,
68
+ "SliceByIndex" : 20,
69
+ "Ios16.matmul" : 48,
70
+ "Ios16.batchNorm" : 37,
71
+ "Ios16.reshape" : 96,
72
+ "Ios16.mul" : 72
73
+ },
74
+ "computePrecision" : "Mixed (Float16, Int32)",
75
+ "isUpdatable" : "0",
76
+ "stateSchema" : [
77
+
78
+ ],
79
+ "availability" : {
80
+ "macOS" : "13.0",
81
+ "tvOS" : "16.0",
82
+ "visionOS" : "1.0",
83
+ "watchOS" : "9.0",
84
+ "iOS" : "16.0",
85
+ "macCatalyst" : "16.0"
86
+ },
87
+ "modelType" : {
88
+ "name" : "MLModelType_mlProgram"
89
+ },
90
+ "userDefinedMetadata" : {
91
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
92
+ "com.github.apple.coremltools.source" : "torch==2.6.0",
93
+ "com.github.apple.coremltools.version" : "8.2"
94
+ },
95
+ "inputSchema" : [
96
+ {
97
+ "hasShapeFlexibility" : "0",
98
+ "isOptional" : "0",
99
+ "dataType" : "Int32",
100
+ "formattedType" : "MultiArray (Int32 1)",
101
+ "shortDescription" : "",
102
+ "shape" : "[1]",
103
+ "name" : "input_ids",
104
+ "type" : "MultiArray"
105
+ },
106
+ {
107
+ "hasShapeFlexibility" : "0",
108
+ "isOptional" : "0",
109
+ "dataType" : "Int32",
110
+ "formattedType" : "MultiArray (Int32 1)",
111
+ "shortDescription" : "",
112
+ "shape" : "[1]",
113
+ "name" : "cache_length",
114
+ "type" : "MultiArray"
115
+ },
116
+ {
117
+ "hasShapeFlexibility" : "0",
118
+ "isOptional" : "0",
119
+ "dataType" : "Float16",
120
+ "formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 448)",
121
+ "shortDescription" : "",
122
+ "shape" : "[1, 9216, 1, 448]",
123
+ "name" : "key_cache",
124
+ "type" : "MultiArray"
125
+ },
126
+ {
127
+ "hasShapeFlexibility" : "0",
128
+ "isOptional" : "0",
129
+ "dataType" : "Float16",
130
+ "formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 448)",
131
+ "shortDescription" : "",
132
+ "shape" : "[1, 9216, 1, 448]",
133
+ "name" : "value_cache",
134
+ "type" : "MultiArray"
135
+ },
136
+ {
137
+ "hasShapeFlexibility" : "0",
138
+ "isOptional" : "0",
139
+ "dataType" : "Float16",
140
+ "formattedType" : "MultiArray (Float16 1 × 448)",
141
+ "shortDescription" : "",
142
+ "shape" : "[1, 448]",
143
+ "name" : "kv_cache_update_mask",
144
+ "type" : "MultiArray"
145
+ },
146
+ {
147
+ "hasShapeFlexibility" : "0",
148
+ "isOptional" : "0",
149
+ "dataType" : "Float16",
150
+ "formattedType" : "MultiArray (Float16 1 × 768 × 1 × 1500)",
151
+ "shortDescription" : "",
152
+ "shape" : "[1, 768, 1, 1500]",
153
+ "name" : "encoder_output_embeds",
154
+ "type" : "MultiArray"
155
+ },
156
+ {
157
+ "hasShapeFlexibility" : "0",
158
+ "isOptional" : "0",
159
+ "dataType" : "Float16",
160
+ "formattedType" : "MultiArray (Float16 1 × 448)",
161
+ "shortDescription" : "",
162
+ "shape" : "[1, 448]",
163
+ "name" : "decoder_key_padding_mask",
164
+ "type" : "MultiArray"
165
+ }
166
+ ],
167
+ "generatedClassName" : "TextDecoder_mixedBitPalettized_4_bit",
168
+ "method" : "predict"
169
+ }
170
+ ]
openai_whisper-small_216MB/TextDecoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
openai_whisper-small_216MB/TextDecoder.mlmodelc/model.mlmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dac33b92f19491a71d67460384b95d56489b226f46c6074c17f3e81479bcca6
3
+ size 518452
openai_whisper-small_216MB/TextDecoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2be2985330071f8188e1c2ab029e1b69d6e8c42865c8d58b20b8d41856da9ed3
3
+ size 153130482
openai_whisper-small_216MB/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_name_or_path": "openai/whisper-small", "activation_dropout": 0.0, "activation_function": "gelu", "architectures": ["WhisperForConditionalGeneration"], "attention_dropout": 0.0, "begin_suppress_tokens": [220, 50257], "bos_token_id": 50257, "d_model": 768, "decoder_attention_heads": 12, "decoder_ffn_dim": 3072, "decoder_layerdrop": 0.0, "decoder_layers": 12, "decoder_start_token_id": 50258, "dropout": 0.0, "encoder_attention_heads": 12, "encoder_ffn_dim": 3072, "encoder_layerdrop": 0.0, "encoder_layers": 12, "eos_token_id": 50257, "forced_decoder_ids": [[1, 50259], [2, 50359], [3, 50363]], "init_std": 0.02, "is_encoder_decoder": true, "max_length": 448, "max_source_positions": 1500, "max_target_positions": 448, "model_type": "whisper", "num_hidden_layers": 12, "num_mel_bins": 80, "pad_token_id": 50257, "scale_embedding": false, "suppress_tokens": [1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793, 14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675, 22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865, 42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362], "torch_dtype": "float32", "transformers_version": "4.27.0.dev0", "use_cache": true, "vocab_size": 51865}
openai_whisper-small_216MB/generation_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"alignment_heads": [[5, 3], [5, 9], [8, 0], [8, 4], [8, 7], [8, 8], [9, 0], [9, 7], [9, 9], [10, 5]], "begin_suppress_tokens": [220, 50257], "bos_token_id": 50257, "decoder_start_token_id": 50258, "eos_token_id": 50257, "forced_decoder_ids": [[1, null], [2, 50359]], "is_multilingual": true, "lang_to_id": {"<|af|>": 50327, "<|am|>": 50334, "<|ar|>": 50272, "<|as|>": 50350, "<|az|>": 50304, "<|ba|>": 50355, "<|be|>": 50330, "<|bg|>": 50292, "<|bn|>": 50302, "<|bo|>": 50347, "<|br|>": 50309, "<|bs|>": 50315, "<|ca|>": 50270, "<|cs|>": 50283, "<|cy|>": 50297, "<|da|>": 50285, "<|de|>": 50261, "<|el|>": 50281, "<|en|>": 50259, "<|es|>": 50262, "<|et|>": 50307, "<|eu|>": 50310, "<|fa|>": 50300, "<|fi|>": 50277, "<|fo|>": 50338, "<|fr|>": 50265, "<|gl|>": 50319, "<|gu|>": 50333, "<|haw|>": 50352, "<|ha|>": 50354, "<|he|>": 50279, "<|hi|>": 50276, "<|hr|>": 50291, "<|ht|>": 50339, "<|hu|>": 50286, "<|hy|>": 50312, "<|id|>": 50275, "<|is|>": 50311, "<|it|>": 50274, "<|ja|>": 50266, "<|jw|>": 50356, "<|ka|>": 50329, "<|kk|>": 50316, "<|km|>": 50323, "<|kn|>": 50306, "<|ko|>": 50264, "<|la|>": 50294, "<|lb|>": 50345, "<|ln|>": 50353, "<|lo|>": 50336, "<|lt|>": 50293, "<|lv|>": 50301, "<|mg|>": 50349, "<|mi|>": 50295, "<|mk|>": 50308, "<|ml|>": 50296, "<|mn|>": 50314, "<|mr|>": 50320, "<|ms|>": 50282, "<|mt|>": 50343, "<|my|>": 50346, "<|ne|>": 50313, "<|nl|>": 50271, "<|nn|>": 50342, "<|no|>": 50288, "<|oc|>": 50328, "<|pa|>": 50321, "<|pl|>": 50269, "<|ps|>": 50340, "<|pt|>": 50267, "<|ro|>": 50284, "<|ru|>": 50263, "<|sa|>": 50344, "<|sd|>": 50332, "<|si|>": 50322, "<|sk|>": 50298, "<|sl|>": 50305, "<|sn|>": 50324, "<|so|>": 50326, "<|sq|>": 50317, "<|sr|>": 50303, "<|su|>": 50357, "<|sv|>": 50273, "<|sw|>": 50318, "<|ta|>": 50287, "<|te|>": 50299, "<|tg|>": 50331, "<|th|>": 50289, "<|tk|>": 50341, "<|tl|>": 50348, "<|tr|>": 50268, "<|tt|>": 50351, "<|uk|>": 50280, "<|ur|>": 50290, "<|uz|>": 50337, "<|vi|>": 50278, "<|yi|>": 50335, "<|yo|>": 50325, "<|zh|>": 50260}, "max_initial_timestamp_index": 50, "max_length": 448, "no_timestamps_token_id": 50363, "pad_token_id": 50257, "prev_sot_token_id": 50361, "return_timestamps": false, "suppress_tokens": [1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793, 14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675, 22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865, 42863, 47425, 49870, 50254, 50258, 50358, 50359, 50360, 50361, 50362], "task_to_id": {"transcribe": 50359, "translate": 50358}, "transformers_version": "4.31.0.dev0"}