smpanaro commited on
Commit
aa2d013
·
verified ·
1 Parent(s): dca2f86

Add two untested sub-900MB large-v3 models

Browse files

886MB: 4.4 Bit Cat Encoder, 4.0 Bit Cat Decoder
889MB: 4.4 Bit SplitHeadQ Encoder, 4.0 Bit Decoder

Files changed (30) hide show
  1. openai_whisper-large-v3_886MB/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
  2. openai_whisper-large-v3_886MB/AudioEncoder.mlmodelc/coremldata.bin +3 -0
  3. openai_whisper-large-v3_886MB/AudioEncoder.mlmodelc/metadata.json +68 -0
  4. openai_whisper-large-v3_886MB/AudioEncoder.mlmodelc/model.mil +0 -0
  5. openai_whisper-large-v3_886MB/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
  6. openai_whisper-large-v3_886MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
  7. openai_whisper-large-v3_886MB/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
  8. openai_whisper-large-v3_886MB/MelSpectrogram.mlmodelc/metadata.json +71 -0
  9. openai_whisper-large-v3_886MB/MelSpectrogram.mlmodelc/model.mil +66 -0
  10. openai_whisper-large-v3_886MB/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
  11. openai_whisper-large-v3_886MB/TextDecoder.mlmodelc/analytics/coremldata.bin +3 -0
  12. openai_whisper-large-v3_886MB/TextDecoder.mlmodelc/coremldata.bin +3 -0
  13. openai_whisper-large-v3_886MB/TextDecoder.mlmodelc/metadata.json +166 -0
  14. openai_whisper-large-v3_886MB/TextDecoder.mlmodelc/model.mil +0 -0
  15. openai_whisper-large-v3_886MB/TextDecoder.mlmodelc/weights/weight.bin +3 -0
  16. openai_whisper-large-v3_889MB/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
  17. openai_whisper-large-v3_889MB/AudioEncoder.mlmodelc/coremldata.bin +3 -0
  18. openai_whisper-large-v3_889MB/AudioEncoder.mlmodelc/metadata.json +70 -0
  19. openai_whisper-large-v3_889MB/AudioEncoder.mlmodelc/model.mil +0 -0
  20. openai_whisper-large-v3_889MB/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
  21. openai_whisper-large-v3_889MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
  22. openai_whisper-large-v3_889MB/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
  23. openai_whisper-large-v3_889MB/MelSpectrogram.mlmodelc/metadata.json +71 -0
  24. openai_whisper-large-v3_889MB/MelSpectrogram.mlmodelc/model.mil +66 -0
  25. openai_whisper-large-v3_889MB/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
  26. openai_whisper-large-v3_889MB/TextDecoder.mlmodelc/analytics/coremldata.bin +3 -0
  27. openai_whisper-large-v3_889MB/TextDecoder.mlmodelc/coremldata.bin +3 -0
  28. openai_whisper-large-v3_889MB/TextDecoder.mlmodelc/metadata.json +166 -0
  29. openai_whisper-large-v3_889MB/TextDecoder.mlmodelc/model.mil +0 -0
  30. openai_whisper-large-v3_889MB/TextDecoder.mlmodelc/weights/weight.bin +3 -0
openai_whisper-large-v3_886MB/AudioEncoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2858d3e2ce2e6e83aaca6fbf5d08d03374c9ea0fde204f18f58d97f3a03aaf3d
3
+ size 243
openai_whisper-large-v3_886MB/AudioEncoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:273d6cd004f95763e9d03e5d36622f11038819a81b9eafed64b1d95444e04f62
3
+ size 348
openai_whisper-large-v3_886MB/AudioEncoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Mixed (Float16, Palettized (4 bits), Palettized (6 bits))",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1280 × 1 × 1500)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1280, 1, 1500]",
13
+ "name" : "encoder_output_embeds",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 7,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios16.rsqrt" : 65,
23
+ "Ios16.mul" : 354,
24
+ "Ios16.constexprLutToDense" : 194,
25
+ "Ios16.sub" : 193,
26
+ "Ios16.conv" : 194,
27
+ "Ios16.add" : 130,
28
+ "Ios16.reduceMean" : 130,
29
+ "Ios16.matmul" : 64,
30
+ "Ios16.gelu" : 34,
31
+ "Ios16.softmax" : 32,
32
+ "Ios16.batchNorm" : 65,
33
+ "Ios16.reshape" : 128
34
+ },
35
+ "computePrecision" : "Mixed (Float16, Int32)",
36
+ "isUpdatable" : "0",
37
+ "availability" : {
38
+ "macOS" : "13.0",
39
+ "tvOS" : "16.0",
40
+ "visionOS" : "1.0",
41
+ "watchOS" : "9.0",
42
+ "iOS" : "16.0",
43
+ "macCatalyst" : "16.0"
44
+ },
45
+ "modelType" : {
46
+ "name" : "MLModelType_mlProgram"
47
+ },
48
+ "userDefinedMetadata" : {
49
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
50
+ "com.github.apple.coremltools.source" : "torch==2.2.1",
51
+ "com.github.apple.coremltools.version" : "7.1"
52
+ },
53
+ "inputSchema" : [
54
+ {
55
+ "hasShapeFlexibility" : "0",
56
+ "isOptional" : "0",
57
+ "dataType" : "Float16",
58
+ "formattedType" : "MultiArray (Float16 1 × 128 × 1 × 3000)",
59
+ "shortDescription" : "",
60
+ "shape" : "[1, 128, 1, 3000]",
61
+ "name" : "melspectrogram_features",
62
+ "type" : "MultiArray"
63
+ }
64
+ ],
65
+ "generatedClassName" : "AudioEncoder_mixedBitPalettized_4_0_bit",
66
+ "method" : "predict"
67
+ }
68
+ ]
openai_whisper-large-v3_886MB/AudioEncoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
openai_whisper-large-v3_886MB/AudioEncoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ed12d962c639e67f4e3b249715c546880740310b60cf7984820c13a0a5fdb3f
3
+ size 326453248
openai_whisper-large-v3_886MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:091a361134891f94e613562771beea0d93a9aefbc6984ba86c60f856e07a508f
3
+ size 243
openai_whisper-large-v3_886MB/MelSpectrogram.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3c5778c86d6fbc6a9817a56dbcac05a946a4d95c77f6db8355572f3be9e9a68
3
+ size 329
openai_whisper-large-v3_886MB/MelSpectrogram.mlmodelc/metadata.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 128 × 1 × 3000)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 128, 1, 3000]",
13
+ "name" : "melspectrogram_features",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 7,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Pad" : 1,
23
+ "Ios16.mul" : 2,
24
+ "SliceByIndex" : 1,
25
+ "Ios16.sub" : 1,
26
+ "Ios16.log" : 1,
27
+ "Ios16.conv" : 2,
28
+ "Ios16.add" : 3,
29
+ "Ios16.square" : 2,
30
+ "Ios16.matmul" : 1,
31
+ "Squeeze" : 2,
32
+ "Ios16.maximum" : 1,
33
+ "ExpandDims" : 4,
34
+ "Ios16.reduceMax" : 1,
35
+ "Identity" : 1,
36
+ "Ios16.reshape" : 2
37
+ },
38
+ "computePrecision" : "Mixed (Float16, Int32)",
39
+ "isUpdatable" : "0",
40
+ "availability" : {
41
+ "macOS" : "13.0",
42
+ "tvOS" : "16.0",
43
+ "visionOS" : "1.0",
44
+ "watchOS" : "9.0",
45
+ "iOS" : "16.0",
46
+ "macCatalyst" : "16.0"
47
+ },
48
+ "modelType" : {
49
+ "name" : "MLModelType_mlProgram"
50
+ },
51
+ "userDefinedMetadata" : {
52
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
53
+ "com.github.apple.coremltools.version" : "7.1",
54
+ "com.github.apple.coremltools.source" : "torch==2.2.1"
55
+ },
56
+ "inputSchema" : [
57
+ {
58
+ "hasShapeFlexibility" : "0",
59
+ "isOptional" : "0",
60
+ "dataType" : "Float16",
61
+ "formattedType" : "MultiArray (Float16 480000)",
62
+ "shortDescription" : "",
63
+ "shape" : "[480000]",
64
+ "name" : "audio",
65
+ "type" : "MultiArray"
66
+ }
67
+ ],
68
+ "generatedClassName" : "MelSpectrogram",
69
+ "method" : "predict"
70
+ }
71
+ ]
openai_whisper-large-v3_886MB/MelSpectrogram.mlmodelc/model.mil ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.2.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.1"}})]
3
+ {
4
+ func main<ios16>(tensor<fp16, [480000]> audio) {
5
+ tensor<int32, [3]> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
6
+ tensor<fp16, [1, 1, 480000]> input_1_cast_fp16 = reshape(shape = var_10, x = audio)[name = tensor<string, []>("input_1_cast_fp16")];
7
+ tensor<int32, [6]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 200, 200])];
8
+ tensor<string, []> input_3_mode_0 = const()[name = tensor<string, []>("input_3_mode_0"), val = tensor<string, []>("reflect")];
9
+ tensor<fp16, []> input_3_constant_val_0_to_fp16 = const()[name = tensor<string, []>("input_3_constant_val_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
10
+ tensor<fp16, [1, 1, 480400]> input_3_cast_fp16 = pad(constant_val = input_3_constant_val_0_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
11
+ tensor<int32, [1]> var_22 = const()[name = tensor<string, []>("op_22"), val = tensor<int32, [1]>([480400])];
12
+ tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x = input_3_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
13
+ tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
14
+ tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
15
+ tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
16
+ tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = tensor<string, []>("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
17
+ tensor<fp16, [1, 1, 480400]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = tensor<string, []>("expand_dims_4_cast_fp16")];
18
+ tensor<string, []> conv_0_pad_type_0 = const()[name = tensor<string, []>("conv_0_pad_type_0"), val = tensor<string, []>("valid")];
19
+ tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
20
+ tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
21
+ tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
22
+ tensor<fp16, [201, 1, 400]> expand_dims_1_to_fp16 = const()[name = tensor<string, []>("expand_dims_1_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
23
+ tensor<fp16, [1, 201, 3001]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
24
+ tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
25
+ tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
26
+ tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
27
+ tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
28
+ tensor<fp16, [201, 1, 400]> expand_dims_2_to_fp16 = const()[name = tensor<string, []>("expand_dims_2_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160960)))];
29
+ tensor<fp16, [1, 201, 3001]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
30
+ tensor<int32, [1]> squeeze_0_axes_0 = const()[name = tensor<string, []>("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
31
+ tensor<fp16, [201, 3001]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = tensor<string, []>("squeeze_0_cast_fp16")];
32
+ tensor<int32, [1]> squeeze_1_axes_0 = const()[name = tensor<string, []>("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
33
+ tensor<fp16, [201, 3001]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = tensor<string, []>("squeeze_1_cast_fp16")];
34
+ tensor<fp16, [201, 3001]> square_0_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = tensor<string, []>("square_0_cast_fp16")];
35
+ tensor<fp16, [201, 3001]> square_1_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = tensor<string, []>("square_1_cast_fp16")];
36
+ tensor<fp16, [201, 3001]> add_1_cast_fp16 = add(x = square_0_cast_fp16, y = square_1_cast_fp16)[name = tensor<string, []>("add_1_cast_fp16")];
37
+ tensor<fp16, [201, 3001]> magnitudes_1_cast_fp16 = identity(x = add_1_cast_fp16)[name = tensor<string, []>("magnitudes_1_cast_fp16")];
38
+ tensor<int32, [2]> magnitudes_begin_0 = const()[name = tensor<string, []>("magnitudes_begin_0"), val = tensor<int32, [2]>([0, 0])];
39
+ tensor<int32, [2]> magnitudes_end_0 = const()[name = tensor<string, []>("magnitudes_end_0"), val = tensor<int32, [2]>([201, 3000])];
40
+ tensor<bool, [2]> magnitudes_end_mask_0 = const()[name = tensor<string, []>("magnitudes_end_mask_0"), val = tensor<bool, [2]>([true, false])];
41
+ tensor<fp16, [201, 3000]> magnitudes_cast_fp16 = slice_by_index(begin = magnitudes_begin_0, end = magnitudes_end_0, end_mask = magnitudes_end_mask_0, x = magnitudes_1_cast_fp16)[name = tensor<string, []>("magnitudes_cast_fp16")];
42
+ tensor<bool, []> mel_spec_1_transpose_x_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_x_0"), val = tensor<bool, []>(false)];
43
+ tensor<bool, []> mel_spec_1_transpose_y_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_y_0"), val = tensor<bool, []>(false)];
44
+ tensor<fp16, [128, 201]> mel_filters_to_fp16 = const()[name = tensor<string, []>("mel_filters_to_fp16"), val = tensor<fp16, [128, 201]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(321856)))];
45
+ tensor<fp16, [128, 3000]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16, y = magnitudes_cast_fp16)[name = tensor<string, []>("mel_spec_1_cast_fp16")];
46
+ tensor<fp16, []> var_41_to_fp16 = const()[name = tensor<string, []>("op_41_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
47
+ tensor<fp16, [128, 3000]> mel_spec_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_41_to_fp16)[name = tensor<string, []>("mel_spec_cast_fp16")];
48
+ tensor<fp16, []> log_0_epsilon_0_to_fp16 = const()[name = tensor<string, []>("log_0_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
49
+ tensor<fp16, [128, 3000]> log_0_cast_fp16 = log(epsilon = log_0_epsilon_0_to_fp16, x = mel_spec_cast_fp16)[name = tensor<string, []>("log_0_cast_fp16")];
50
+ tensor<fp16, []> mul_0_y_0_to_fp16 = const()[name = tensor<string, []>("mul_0_y_0_to_fp16"), val = tensor<fp16, []>(0x1.bccp-2)];
51
+ tensor<fp16, [128, 3000]> mul_0_cast_fp16 = mul(x = log_0_cast_fp16, y = mul_0_y_0_to_fp16)[name = tensor<string, []>("mul_0_cast_fp16")];
52
+ tensor<bool, []> var_44_keep_dims_0 = const()[name = tensor<string, []>("op_44_keep_dims_0"), val = tensor<bool, []>(false)];
53
+ tensor<fp16, []> var_44_cast_fp16 = reduce_max(keep_dims = var_44_keep_dims_0, x = mul_0_cast_fp16)[name = tensor<string, []>("op_44_cast_fp16")];
54
+ tensor<fp16, []> var_46_to_fp16 = const()[name = tensor<string, []>("op_46_to_fp16"), val = tensor<fp16, []>(0x1p+3)];
55
+ tensor<fp16, []> var_47_cast_fp16 = sub(x = var_44_cast_fp16, y = var_46_to_fp16)[name = tensor<string, []>("op_47_cast_fp16")];
56
+ tensor<fp16, [128, 3000]> log_spec_3_cast_fp16 = maximum(x = mul_0_cast_fp16, y = var_47_cast_fp16)[name = tensor<string, []>("log_spec_3_cast_fp16")];
57
+ tensor<fp16, []> var_50_to_fp16 = const()[name = tensor<string, []>("op_50_to_fp16"), val = tensor<fp16, []>(0x1p+2)];
58
+ tensor<fp16, [128, 3000]> var_51_cast_fp16 = add(x = log_spec_3_cast_fp16, y = var_50_to_fp16)[name = tensor<string, []>("op_51_cast_fp16")];
59
+ tensor<fp16, []> _inversed_log_spec_y_0_to_fp16 = const()[name = tensor<string, []>("_inversed_log_spec_y_0_to_fp16"), val = tensor<fp16, []>(0x1p-2)];
60
+ tensor<fp16, [128, 3000]> _inversed_log_spec_cast_fp16 = mul(x = var_51_cast_fp16, y = _inversed_log_spec_y_0_to_fp16)[name = tensor<string, []>("_inversed_log_spec_cast_fp16")];
61
+ tensor<int32, [1]> var_55_axes_0 = const()[name = tensor<string, []>("op_55_axes_0"), val = tensor<int32, [1]>([0])];
62
+ tensor<fp16, [1, 128, 3000]> var_55_cast_fp16 = expand_dims(axes = var_55_axes_0, x = _inversed_log_spec_cast_fp16)[name = tensor<string, []>("op_55_cast_fp16")];
63
+ tensor<int32, [1]> var_62_axes_0 = const()[name = tensor<string, []>("op_62_axes_0"), val = tensor<int32, [1]>([2])];
64
+ tensor<fp16, [1, 128, 1, 3000]> melspectrogram_features = expand_dims(axes = var_62_axes_0, x = var_55_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
65
+ } -> (melspectrogram_features);
66
+ }
openai_whisper-large-v3_886MB/MelSpectrogram.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99c05b0a0788b892bd3baf861195f55ad3a892c3e60e0372278359ffaad371b2
3
+ size 373376
openai_whisper-large-v3_886MB/TextDecoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a22d61d41f5d9af01210bd2014544e626a1c113a9b91a9eb7db6385aa6789a9a
3
+ size 243
openai_whisper-large-v3_886MB/TextDecoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f41a7939b47f7cf127aa69dfd8c552a141dc773b936bf8621aeffcd201fb9e30
3
+ size 637
openai_whisper-large-v3_886MB/TextDecoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Mixed (Float16, Palettized (4 bits))",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1 × 51866)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1, 51866]",
13
+ "name" : "logits",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 40960 × 1 × 1)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 40960, 1, 1]",
23
+ "name" : "key_cache_updates",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 1 × 40960 × 1 × 1)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 40960, 1, 1]",
33
+ "name" : "value_cache_updates",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 1500)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 1500]",
43
+ "name" : "alignment_heads_weights",
44
+ "type" : "MultiArray"
45
+ }
46
+ ],
47
+ "modelParameters" : [
48
+
49
+ ],
50
+ "specificationVersion" : 7,
51
+ "mlProgramOperationTypeHistogram" : {
52
+ "Transpose" : 1,
53
+ "Ios16.gather" : 2,
54
+ "Squeeze" : 1,
55
+ "Ios16.reduceMean" : 195,
56
+ "Ios16.softmax" : 64,
57
+ "Split" : 2,
58
+ "Ios16.linear" : 1,
59
+ "Ios16.add" : 290,
60
+ "Concat" : 3,
61
+ "ExpandDims" : 6,
62
+ "Ios16.sub" : 291,
63
+ "Ios16.conv" : 320,
64
+ "Ios16.gelu" : 32,
65
+ "Ios16.constexprLutToDense" : 320,
66
+ "Ios16.matmul" : 128,
67
+ "Ios16.reshape" : 256,
68
+ "Ios16.batchNorm" : 97,
69
+ "Ios16.rsqrt" : 97,
70
+ "SliceByIndex" : 20,
71
+ "Ios16.mul" : 706
72
+ },
73
+ "computePrecision" : "Mixed (Float16, Int32)",
74
+ "isUpdatable" : "0",
75
+ "availability" : {
76
+ "macOS" : "13.0",
77
+ "tvOS" : "16.0",
78
+ "visionOS" : "1.0",
79
+ "watchOS" : "9.0",
80
+ "iOS" : "16.0",
81
+ "macCatalyst" : "16.0"
82
+ },
83
+ "modelType" : {
84
+ "name" : "MLModelType_mlProgram"
85
+ },
86
+ "userDefinedMetadata" : {
87
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
88
+ "com.github.apple.coremltools.source" : "torch==2.2.1",
89
+ "com.github.apple.coremltools.version" : "7.1"
90
+ },
91
+ "inputSchema" : [
92
+ {
93
+ "hasShapeFlexibility" : "0",
94
+ "isOptional" : "0",
95
+ "dataType" : "Int32",
96
+ "formattedType" : "MultiArray (Int32 1)",
97
+ "shortDescription" : "",
98
+ "shape" : "[1]",
99
+ "name" : "input_ids",
100
+ "type" : "MultiArray"
101
+ },
102
+ {
103
+ "hasShapeFlexibility" : "0",
104
+ "isOptional" : "0",
105
+ "dataType" : "Int32",
106
+ "formattedType" : "MultiArray (Int32 1)",
107
+ "shortDescription" : "",
108
+ "shape" : "[1]",
109
+ "name" : "cache_length",
110
+ "type" : "MultiArray"
111
+ },
112
+ {
113
+ "hasShapeFlexibility" : "0",
114
+ "isOptional" : "0",
115
+ "dataType" : "Float16",
116
+ "formattedType" : "MultiArray (Float16 1 × 40960 × 1 × 448)",
117
+ "shortDescription" : "",
118
+ "shape" : "[1, 40960, 1, 448]",
119
+ "name" : "key_cache",
120
+ "type" : "MultiArray"
121
+ },
122
+ {
123
+ "hasShapeFlexibility" : "0",
124
+ "isOptional" : "0",
125
+ "dataType" : "Float16",
126
+ "formattedType" : "MultiArray (Float16 1 × 40960 × 1 × 448)",
127
+ "shortDescription" : "",
128
+ "shape" : "[1, 40960, 1, 448]",
129
+ "name" : "value_cache",
130
+ "type" : "MultiArray"
131
+ },
132
+ {
133
+ "hasShapeFlexibility" : "0",
134
+ "isOptional" : "0",
135
+ "dataType" : "Float16",
136
+ "formattedType" : "MultiArray (Float16 1 × 448)",
137
+ "shortDescription" : "",
138
+ "shape" : "[1, 448]",
139
+ "name" : "kv_cache_update_mask",
140
+ "type" : "MultiArray"
141
+ },
142
+ {
143
+ "hasShapeFlexibility" : "0",
144
+ "isOptional" : "0",
145
+ "dataType" : "Float16",
146
+ "formattedType" : "MultiArray (Float16 1 × 1280 × 1 × 1500)",
147
+ "shortDescription" : "",
148
+ "shape" : "[1, 1280, 1, 1500]",
149
+ "name" : "encoder_output_embeds",
150
+ "type" : "MultiArray"
151
+ },
152
+ {
153
+ "hasShapeFlexibility" : "0",
154
+ "isOptional" : "0",
155
+ "dataType" : "Float16",
156
+ "formattedType" : "MultiArray (Float16 1 × 448)",
157
+ "shortDescription" : "",
158
+ "shape" : "[1, 448]",
159
+ "name" : "decoder_key_padding_mask",
160
+ "type" : "MultiArray"
161
+ }
162
+ ],
163
+ "generatedClassName" : "TextDecoder_mixedBitPalettized_4_0_bit",
164
+ "method" : "predict"
165
+ }
166
+ ]
openai_whisper-large-v3_886MB/TextDecoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
openai_whisper-large-v3_886MB/TextDecoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd0f0c4be2c3d179294cdc6ac01cd9ca266939cd1ddc5229d67ee529356ee04f
3
+ size 556957044
openai_whisper-large-v3_889MB/AudioEncoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28a0a20cf725ae7489eb6ea056d04f24e4c813d3164f431ebfed52a2a4c16961
3
+ size 243
openai_whisper-large-v3_889MB/AudioEncoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc0b101197d8de26d7cb78539fb035c4acbfca081f160390115514b7db5d674d
3
+ size 348
openai_whisper-large-v3_889MB/AudioEncoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Mixed (Float16, Palettized (4 bits))",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1280 × 1 × 1500)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1280, 1, 1500]",
13
+ "name" : "encoder_output_embeds",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 7,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Concat" : 672,
23
+ "Ios16.rsqrt" : 65,
24
+ "Ios16.mul" : 2882,
25
+ "SliceByIndex" : 4480,
26
+ "Ios16.constexprLutToDense" : 194,
27
+ "Ios16.sub" : 193,
28
+ "Transpose" : 32,
29
+ "Ios16.einsum" : 5120,
30
+ "Ios16.conv" : 194,
31
+ "Ios16.add" : 130,
32
+ "Ios16.reduceMean" : 130,
33
+ "Ios16.softmax" : 2560,
34
+ "Ios16.gelu" : 34,
35
+ "Ios16.batchNorm" : 65
36
+ },
37
+ "computePrecision" : "Mixed (Float16, Int32)",
38
+ "isUpdatable" : "0",
39
+ "availability" : {
40
+ "macOS" : "13.0",
41
+ "tvOS" : "16.0",
42
+ "visionOS" : "1.0",
43
+ "watchOS" : "9.0",
44
+ "iOS" : "16.0",
45
+ "macCatalyst" : "16.0"
46
+ },
47
+ "modelType" : {
48
+ "name" : "MLModelType_mlProgram"
49
+ },
50
+ "userDefinedMetadata" : {
51
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
52
+ "com.github.apple.coremltools.source" : "torch==2.2.1",
53
+ "com.github.apple.coremltools.version" : "7.1"
54
+ },
55
+ "inputSchema" : [
56
+ {
57
+ "hasShapeFlexibility" : "0",
58
+ "isOptional" : "0",
59
+ "dataType" : "Float16",
60
+ "formattedType" : "MultiArray (Float16 1 × 128 × 1 × 3000)",
61
+ "shortDescription" : "",
62
+ "shape" : "[1, 128, 1, 3000]",
63
+ "name" : "melspectrogram_features",
64
+ "type" : "MultiArray"
65
+ }
66
+ ],
67
+ "generatedClassName" : "AudioEncoder_mixedBitPalettized_4_0_bit",
68
+ "method" : "predict"
69
+ }
70
+ ]
openai_whisper-large-v3_889MB/AudioEncoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
openai_whisper-large-v3_889MB/AudioEncoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57d83578ef95e5e5b9a72c5fcdfaaaeafb75726e3bddc09ab725facd4596e979
3
+ size 323585920
openai_whisper-large-v3_889MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:091a361134891f94e613562771beea0d93a9aefbc6984ba86c60f856e07a508f
3
+ size 243
openai_whisper-large-v3_889MB/MelSpectrogram.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3c5778c86d6fbc6a9817a56dbcac05a946a4d95c77f6db8355572f3be9e9a68
3
+ size 329
openai_whisper-large-v3_889MB/MelSpectrogram.mlmodelc/metadata.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 128 × 1 × 3000)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 128, 1, 3000]",
13
+ "name" : "melspectrogram_features",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 7,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Pad" : 1,
23
+ "Ios16.mul" : 2,
24
+ "SliceByIndex" : 1,
25
+ "Ios16.sub" : 1,
26
+ "Ios16.log" : 1,
27
+ "Ios16.conv" : 2,
28
+ "Ios16.add" : 3,
29
+ "Ios16.square" : 2,
30
+ "Ios16.matmul" : 1,
31
+ "Squeeze" : 2,
32
+ "Ios16.maximum" : 1,
33
+ "ExpandDims" : 4,
34
+ "Ios16.reduceMax" : 1,
35
+ "Identity" : 1,
36
+ "Ios16.reshape" : 2
37
+ },
38
+ "computePrecision" : "Mixed (Float16, Int32)",
39
+ "isUpdatable" : "0",
40
+ "availability" : {
41
+ "macOS" : "13.0",
42
+ "tvOS" : "16.0",
43
+ "visionOS" : "1.0",
44
+ "watchOS" : "9.0",
45
+ "iOS" : "16.0",
46
+ "macCatalyst" : "16.0"
47
+ },
48
+ "modelType" : {
49
+ "name" : "MLModelType_mlProgram"
50
+ },
51
+ "userDefinedMetadata" : {
52
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
53
+ "com.github.apple.coremltools.source" : "torch==2.2.1",
54
+ "com.github.apple.coremltools.version" : "7.1"
55
+ },
56
+ "inputSchema" : [
57
+ {
58
+ "hasShapeFlexibility" : "0",
59
+ "isOptional" : "0",
60
+ "dataType" : "Float16",
61
+ "formattedType" : "MultiArray (Float16 480000)",
62
+ "shortDescription" : "",
63
+ "shape" : "[480000]",
64
+ "name" : "audio",
65
+ "type" : "MultiArray"
66
+ }
67
+ ],
68
+ "generatedClassName" : "MelSpectrogram",
69
+ "method" : "predict"
70
+ }
71
+ ]
openai_whisper-large-v3_889MB/MelSpectrogram.mlmodelc/model.mil ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.2.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.1"}})]
3
+ {
4
+ func main<ios16>(tensor<fp16, [480000]> audio) {
5
+ tensor<int32, [3]> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
6
+ tensor<fp16, [1, 1, 480000]> input_1_cast_fp16 = reshape(shape = var_10, x = audio)[name = tensor<string, []>("input_1_cast_fp16")];
7
+ tensor<int32, [6]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 200, 200])];
8
+ tensor<string, []> input_3_mode_0 = const()[name = tensor<string, []>("input_3_mode_0"), val = tensor<string, []>("reflect")];
9
+ tensor<fp16, []> input_3_constant_val_0_to_fp16 = const()[name = tensor<string, []>("input_3_constant_val_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
10
+ tensor<fp16, [1, 1, 480400]> input_3_cast_fp16 = pad(constant_val = input_3_constant_val_0_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
11
+ tensor<int32, [1]> var_22 = const()[name = tensor<string, []>("op_22"), val = tensor<int32, [1]>([480400])];
12
+ tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x = input_3_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
13
+ tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
14
+ tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
15
+ tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
16
+ tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = tensor<string, []>("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
17
+ tensor<fp16, [1, 1, 480400]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = tensor<string, []>("expand_dims_4_cast_fp16")];
18
+ tensor<string, []> conv_0_pad_type_0 = const()[name = tensor<string, []>("conv_0_pad_type_0"), val = tensor<string, []>("valid")];
19
+ tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
20
+ tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
21
+ tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
22
+ tensor<fp16, [201, 1, 400]> expand_dims_1_to_fp16 = const()[name = tensor<string, []>("expand_dims_1_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
23
+ tensor<fp16, [1, 201, 3001]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
24
+ tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
25
+ tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
26
+ tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
27
+ tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
28
+ tensor<fp16, [201, 1, 400]> expand_dims_2_to_fp16 = const()[name = tensor<string, []>("expand_dims_2_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160960)))];
29
+ tensor<fp16, [1, 201, 3001]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
30
+ tensor<int32, [1]> squeeze_0_axes_0 = const()[name = tensor<string, []>("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
31
+ tensor<fp16, [201, 3001]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = tensor<string, []>("squeeze_0_cast_fp16")];
32
+ tensor<int32, [1]> squeeze_1_axes_0 = const()[name = tensor<string, []>("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
33
+ tensor<fp16, [201, 3001]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = tensor<string, []>("squeeze_1_cast_fp16")];
34
+ tensor<fp16, [201, 3001]> square_0_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = tensor<string, []>("square_0_cast_fp16")];
35
+ tensor<fp16, [201, 3001]> square_1_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = tensor<string, []>("square_1_cast_fp16")];
36
+ tensor<fp16, [201, 3001]> add_1_cast_fp16 = add(x = square_0_cast_fp16, y = square_1_cast_fp16)[name = tensor<string, []>("add_1_cast_fp16")];
37
+ tensor<fp16, [201, 3001]> magnitudes_1_cast_fp16 = identity(x = add_1_cast_fp16)[name = tensor<string, []>("magnitudes_1_cast_fp16")];
38
+ tensor<int32, [2]> magnitudes_begin_0 = const()[name = tensor<string, []>("magnitudes_begin_0"), val = tensor<int32, [2]>([0, 0])];
39
+ tensor<int32, [2]> magnitudes_end_0 = const()[name = tensor<string, []>("magnitudes_end_0"), val = tensor<int32, [2]>([201, 3000])];
40
+ tensor<bool, [2]> magnitudes_end_mask_0 = const()[name = tensor<string, []>("magnitudes_end_mask_0"), val = tensor<bool, [2]>([true, false])];
41
+ tensor<fp16, [201, 3000]> magnitudes_cast_fp16 = slice_by_index(begin = magnitudes_begin_0, end = magnitudes_end_0, end_mask = magnitudes_end_mask_0, x = magnitudes_1_cast_fp16)[name = tensor<string, []>("magnitudes_cast_fp16")];
42
+ tensor<bool, []> mel_spec_1_transpose_x_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_x_0"), val = tensor<bool, []>(false)];
43
+ tensor<bool, []> mel_spec_1_transpose_y_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_y_0"), val = tensor<bool, []>(false)];
44
+ tensor<fp16, [128, 201]> mel_filters_to_fp16 = const()[name = tensor<string, []>("mel_filters_to_fp16"), val = tensor<fp16, [128, 201]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(321856)))];
45
+ tensor<fp16, [128, 3000]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16, y = magnitudes_cast_fp16)[name = tensor<string, []>("mel_spec_1_cast_fp16")];
46
+ tensor<fp16, []> var_41_to_fp16 = const()[name = tensor<string, []>("op_41_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
47
+ tensor<fp16, [128, 3000]> mel_spec_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_41_to_fp16)[name = tensor<string, []>("mel_spec_cast_fp16")];
48
+ tensor<fp16, []> log_0_epsilon_0_to_fp16 = const()[name = tensor<string, []>("log_0_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
49
+ tensor<fp16, [128, 3000]> log_0_cast_fp16 = log(epsilon = log_0_epsilon_0_to_fp16, x = mel_spec_cast_fp16)[name = tensor<string, []>("log_0_cast_fp16")];
50
+ tensor<fp16, []> mul_0_y_0_to_fp16 = const()[name = tensor<string, []>("mul_0_y_0_to_fp16"), val = tensor<fp16, []>(0x1.bccp-2)];
51
+ tensor<fp16, [128, 3000]> mul_0_cast_fp16 = mul(x = log_0_cast_fp16, y = mul_0_y_0_to_fp16)[name = tensor<string, []>("mul_0_cast_fp16")];
52
+ tensor<bool, []> var_44_keep_dims_0 = const()[name = tensor<string, []>("op_44_keep_dims_0"), val = tensor<bool, []>(false)];
53
+ tensor<fp16, []> var_44_cast_fp16 = reduce_max(keep_dims = var_44_keep_dims_0, x = mul_0_cast_fp16)[name = tensor<string, []>("op_44_cast_fp16")];
54
+ tensor<fp16, []> var_46_to_fp16 = const()[name = tensor<string, []>("op_46_to_fp16"), val = tensor<fp16, []>(0x1p+3)];
55
+ tensor<fp16, []> var_47_cast_fp16 = sub(x = var_44_cast_fp16, y = var_46_to_fp16)[name = tensor<string, []>("op_47_cast_fp16")];
56
+ tensor<fp16, [128, 3000]> log_spec_3_cast_fp16 = maximum(x = mul_0_cast_fp16, y = var_47_cast_fp16)[name = tensor<string, []>("log_spec_3_cast_fp16")];
57
+ tensor<fp16, []> var_50_to_fp16 = const()[name = tensor<string, []>("op_50_to_fp16"), val = tensor<fp16, []>(0x1p+2)];
58
+ tensor<fp16, [128, 3000]> var_51_cast_fp16 = add(x = log_spec_3_cast_fp16, y = var_50_to_fp16)[name = tensor<string, []>("op_51_cast_fp16")];
59
+ tensor<fp16, []> _inversed_log_spec_y_0_to_fp16 = const()[name = tensor<string, []>("_inversed_log_spec_y_0_to_fp16"), val = tensor<fp16, []>(0x1p-2)];
60
+ tensor<fp16, [128, 3000]> _inversed_log_spec_cast_fp16 = mul(x = var_51_cast_fp16, y = _inversed_log_spec_y_0_to_fp16)[name = tensor<string, []>("_inversed_log_spec_cast_fp16")];
61
+ tensor<int32, [1]> var_55_axes_0 = const()[name = tensor<string, []>("op_55_axes_0"), val = tensor<int32, [1]>([0])];
62
+ tensor<fp16, [1, 128, 3000]> var_55_cast_fp16 = expand_dims(axes = var_55_axes_0, x = _inversed_log_spec_cast_fp16)[name = tensor<string, []>("op_55_cast_fp16")];
63
+ tensor<int32, [1]> var_62_axes_0 = const()[name = tensor<string, []>("op_62_axes_0"), val = tensor<int32, [1]>([2])];
64
+ tensor<fp16, [1, 128, 1, 3000]> melspectrogram_features = expand_dims(axes = var_62_axes_0, x = var_55_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
65
+ } -> (melspectrogram_features);
66
+ }
openai_whisper-large-v3_889MB/MelSpectrogram.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36b84e22c8c8fbbee2fa1dc6b6a897918ff1a4ab0b2ce3afddffbecc3e8a3383
3
+ size 373376
openai_whisper-large-v3_889MB/TextDecoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a22d61d41f5d9af01210bd2014544e626a1c113a9b91a9eb7db6385aa6789a9a
3
+ size 243
openai_whisper-large-v3_889MB/TextDecoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f41a7939b47f7cf127aa69dfd8c552a141dc773b936bf8621aeffcd201fb9e30
3
+ size 637
openai_whisper-large-v3_889MB/TextDecoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Mixed (Float16, Palettized (4 bits))",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1 × 51866)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1, 51866]",
13
+ "name" : "logits",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 40960 × 1 × 1)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 40960, 1, 1]",
23
+ "name" : "key_cache_updates",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 1 × 40960 × 1 × 1)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 40960, 1, 1]",
33
+ "name" : "value_cache_updates",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 1500)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 1500]",
43
+ "name" : "alignment_heads_weights",
44
+ "type" : "MultiArray"
45
+ }
46
+ ],
47
+ "modelParameters" : [
48
+
49
+ ],
50
+ "specificationVersion" : 7,
51
+ "mlProgramOperationTypeHistogram" : {
52
+ "Transpose" : 1,
53
+ "Ios16.gather" : 2,
54
+ "Squeeze" : 1,
55
+ "Ios16.reduceMean" : 195,
56
+ "Ios16.softmax" : 64,
57
+ "Split" : 2,
58
+ "Ios16.linear" : 1,
59
+ "Ios16.add" : 290,
60
+ "Concat" : 3,
61
+ "ExpandDims" : 6,
62
+ "Ios16.sub" : 291,
63
+ "Ios16.conv" : 320,
64
+ "Ios16.gelu" : 32,
65
+ "Ios16.constexprLutToDense" : 320,
66
+ "Ios16.matmul" : 128,
67
+ "Ios16.reshape" : 256,
68
+ "Ios16.batchNorm" : 97,
69
+ "Ios16.rsqrt" : 97,
70
+ "SliceByIndex" : 20,
71
+ "Ios16.mul" : 706
72
+ },
73
+ "computePrecision" : "Mixed (Float16, Int32)",
74
+ "isUpdatable" : "0",
75
+ "availability" : {
76
+ "macOS" : "13.0",
77
+ "tvOS" : "16.0",
78
+ "visionOS" : "1.0",
79
+ "watchOS" : "9.0",
80
+ "iOS" : "16.0",
81
+ "macCatalyst" : "16.0"
82
+ },
83
+ "modelType" : {
84
+ "name" : "MLModelType_mlProgram"
85
+ },
86
+ "userDefinedMetadata" : {
87
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
88
+ "com.github.apple.coremltools.source" : "torch==2.2.1",
89
+ "com.github.apple.coremltools.version" : "7.1"
90
+ },
91
+ "inputSchema" : [
92
+ {
93
+ "hasShapeFlexibility" : "0",
94
+ "isOptional" : "0",
95
+ "dataType" : "Int32",
96
+ "formattedType" : "MultiArray (Int32 1)",
97
+ "shortDescription" : "",
98
+ "shape" : "[1]",
99
+ "name" : "input_ids",
100
+ "type" : "MultiArray"
101
+ },
102
+ {
103
+ "hasShapeFlexibility" : "0",
104
+ "isOptional" : "0",
105
+ "dataType" : "Int32",
106
+ "formattedType" : "MultiArray (Int32 1)",
107
+ "shortDescription" : "",
108
+ "shape" : "[1]",
109
+ "name" : "cache_length",
110
+ "type" : "MultiArray"
111
+ },
112
+ {
113
+ "hasShapeFlexibility" : "0",
114
+ "isOptional" : "0",
115
+ "dataType" : "Float16",
116
+ "formattedType" : "MultiArray (Float16 1 × 40960 × 1 × 448)",
117
+ "shortDescription" : "",
118
+ "shape" : "[1, 40960, 1, 448]",
119
+ "name" : "key_cache",
120
+ "type" : "MultiArray"
121
+ },
122
+ {
123
+ "hasShapeFlexibility" : "0",
124
+ "isOptional" : "0",
125
+ "dataType" : "Float16",
126
+ "formattedType" : "MultiArray (Float16 1 × 40960 × 1 × 448)",
127
+ "shortDescription" : "",
128
+ "shape" : "[1, 40960, 1, 448]",
129
+ "name" : "value_cache",
130
+ "type" : "MultiArray"
131
+ },
132
+ {
133
+ "hasShapeFlexibility" : "0",
134
+ "isOptional" : "0",
135
+ "dataType" : "Float16",
136
+ "formattedType" : "MultiArray (Float16 1 × 448)",
137
+ "shortDescription" : "",
138
+ "shape" : "[1, 448]",
139
+ "name" : "kv_cache_update_mask",
140
+ "type" : "MultiArray"
141
+ },
142
+ {
143
+ "hasShapeFlexibility" : "0",
144
+ "isOptional" : "0",
145
+ "dataType" : "Float16",
146
+ "formattedType" : "MultiArray (Float16 1 × 1280 × 1 × 1500)",
147
+ "shortDescription" : "",
148
+ "shape" : "[1, 1280, 1, 1500]",
149
+ "name" : "encoder_output_embeds",
150
+ "type" : "MultiArray"
151
+ },
152
+ {
153
+ "hasShapeFlexibility" : "0",
154
+ "isOptional" : "0",
155
+ "dataType" : "Float16",
156
+ "formattedType" : "MultiArray (Float16 1 × 448)",
157
+ "shortDescription" : "",
158
+ "shape" : "[1, 448]",
159
+ "name" : "decoder_key_padding_mask",
160
+ "type" : "MultiArray"
161
+ }
162
+ ],
163
+ "generatedClassName" : "TextDecoder_mixedBitPalettized_4_0_bit",
164
+ "method" : "predict"
165
+ }
166
+ ]
openai_whisper-large-v3_889MB/TextDecoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
openai_whisper-large-v3_889MB/TextDecoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd0f0c4be2c3d179294cdc6ac01cd9ca266939cd1ddc5229d67ee529356ee04f
3
+ size 556957044