Upload 15 files
Browse files- Embedding.mlmodelc/analytics/coremldata.bin +1 -1
- Embedding.mlmodelc/coremldata.bin +1 -1
- Embedding.mlmodelc/metadata.json +4 -3
- Embedding.mlmodelc/model.mil +0 -0
- Embedding.mlmodelc/weights/weight.bin +2 -2
- FBank.mlmodelc/analytics/coremldata.bin +3 -0
- FBank.mlmodelc/coremldata.bin +3 -0
- FBank.mlmodelc/metadata.json +80 -0
- FBank.mlmodelc/model.mil +96 -0
- FBank.mlmodelc/weights/weight.bin +3 -0
- Segmentation.mlmodelc/analytics/coremldata.bin +1 -1
- Segmentation.mlmodelc/coremldata.bin +1 -1
- Segmentation.mlmodelc/metadata.json +6 -5
- Segmentation.mlmodelc/model.mil +112 -104
- Segmentation.mlmodelc/weights/weight.bin +2 -2
Embedding.mlmodelc/analytics/coremldata.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 243
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b2449667626ef30bbcf9aad719b8ad5380aab470e3c3bddf5ece68c101cfd5c
|
| 3 |
size 243
|
Embedding.mlmodelc/coremldata.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 646
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8af46b5ba390f4e1898446f30c318d19f114410e91bd4e2a30839ad3900e3f12
|
| 3 |
size 646
|
Embedding.mlmodelc/metadata.json
CHANGED
|
@@ -20,7 +20,7 @@
|
|
| 20 |
],
|
| 21 |
"author" : "Fluid Inference",
|
| 22 |
"specificationVersion" : 8,
|
| 23 |
-
"storagePrecision" : "Float32",
|
| 24 |
"license" : "CC-BY-4.0",
|
| 25 |
"mlProgramOperationTypeHistogram" : {
|
| 26 |
"Ios16.reduceL2Norm" : 1,
|
|
@@ -35,14 +35,15 @@
|
|
| 35 |
"Ios17.gather" : 2,
|
| 36 |
"Pad" : 2,
|
| 37 |
"Ios17.log" : 1,
|
|
|
|
| 38 |
"Ios17.sqrt" : 1,
|
| 39 |
"Ios17.sub" : 5,
|
| 40 |
"Ios17.conv" : 41,
|
| 41 |
"Ios17.clip" : 3,
|
| 42 |
"Ios16.relu" : 33,
|
| 43 |
-
"Ios17.concat" : 1,
|
| 44 |
"Ios17.pow" : 2,
|
| 45 |
"Ios17.realDiv" : 4,
|
|
|
|
| 46 |
"Ios17.mul" : 9
|
| 47 |
},
|
| 48 |
"computePrecision" : "Mixed (Float32, Int32)",
|
|
@@ -81,7 +82,7 @@
|
|
| 81 |
"com.github.apple.coremltools.version" : "9.0b1",
|
| 82 |
"com.github.apple.coremltools.source_dialect" : "TorchScript"
|
| 83 |
},
|
| 84 |
-
"generatedClassName" : "
|
| 85 |
"method" : "predict"
|
| 86 |
}
|
| 87 |
]
|
|
|
|
| 20 |
],
|
| 21 |
"author" : "Fluid Inference",
|
| 22 |
"specificationVersion" : 8,
|
| 23 |
+
"storagePrecision" : "Mixed (Float32, Int8)",
|
| 24 |
"license" : "CC-BY-4.0",
|
| 25 |
"mlProgramOperationTypeHistogram" : {
|
| 26 |
"Ios16.reduceL2Norm" : 1,
|
|
|
|
| 35 |
"Ios17.gather" : 2,
|
| 36 |
"Pad" : 2,
|
| 37 |
"Ios17.log" : 1,
|
| 38 |
+
"Ios16.constexprAffineDequantize" : 40,
|
| 39 |
"Ios17.sqrt" : 1,
|
| 40 |
"Ios17.sub" : 5,
|
| 41 |
"Ios17.conv" : 41,
|
| 42 |
"Ios17.clip" : 3,
|
| 43 |
"Ios16.relu" : 33,
|
|
|
|
| 44 |
"Ios17.pow" : 2,
|
| 45 |
"Ios17.realDiv" : 4,
|
| 46 |
+
"Ios17.concat" : 1,
|
| 47 |
"Ios17.mul" : 9
|
| 48 |
},
|
| 49 |
"computePrecision" : "Mixed (Float32, Int32)",
|
|
|
|
| 82 |
"com.github.apple.coremltools.version" : "9.0b1",
|
| 83 |
"com.github.apple.coremltools.source_dialect" : "TorchScript"
|
| 84 |
},
|
| 85 |
+
"generatedClassName" : "embedding_community_1_int8_per_channel",
|
| 86 |
"method" : "predict"
|
| 87 |
}
|
| 88 |
]
|
Embedding.mlmodelc/model.mil
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Embedding.mlmodelc/weights/weight.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9ffc18432c23faf3d6b233c85e859876b3aaf0f1557cd4432bb24b8881eddecc
|
| 3 |
+
size 7124224
|
FBank.mlmodelc/analytics/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e8bd3a8b82ac123580989f490e4d9245127c535857630b543311268accc3f0a
|
| 3 |
+
size 243
|
FBank.mlmodelc/coremldata.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:57ac436bb0671cbb5527a339134d695f752eb77f7a18966b93c6835335595759
|
| 3 |
+
size 853
|
FBank.mlmodelc/metadata.json
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"shortDescription" : "pyannote community-1 FBANK frontend (10 s audio preprocessing to 80×998 features, batch 1-32, CPU preferred)",
|
| 4 |
+
"metadataOutputVersion" : "3.0",
|
| 5 |
+
"outputSchema" : [
|
| 6 |
+
{
|
| 7 |
+
"hasShapeFlexibility" : "0",
|
| 8 |
+
"isOptional" : "0",
|
| 9 |
+
"dataType" : "Float32",
|
| 10 |
+
"formattedType" : "MultiArray (Float32)",
|
| 11 |
+
"shortDescription" : "",
|
| 12 |
+
"shape" : "[]",
|
| 13 |
+
"name" : "fbank_features",
|
| 14 |
+
"type" : "MultiArray"
|
| 15 |
+
}
|
| 16 |
+
],
|
| 17 |
+
"version" : "pyannote-speaker-diarization-community-1",
|
| 18 |
+
"modelParameters" : [
|
| 19 |
+
|
| 20 |
+
],
|
| 21 |
+
"author" : "Fluid Inference",
|
| 22 |
+
"specificationVersion" : 8,
|
| 23 |
+
"storagePrecision" : "Float32",
|
| 24 |
+
"license" : "CC-BY-4.0",
|
| 25 |
+
"mlProgramOperationTypeHistogram" : {
|
| 26 |
+
"Ios17.mul" : 3,
|
| 27 |
+
"Ios17.transpose" : 2,
|
| 28 |
+
"Ios17.sub" : 3,
|
| 29 |
+
"Ios17.conv" : 4,
|
| 30 |
+
"Ios17.log" : 1,
|
| 31 |
+
"Ios17.sliceByIndex" : 1,
|
| 32 |
+
"Ios16.reduceMean" : 2,
|
| 33 |
+
"Ios17.add" : 2,
|
| 34 |
+
"Ios17.clip" : 1,
|
| 35 |
+
"Ios17.pow" : 2,
|
| 36 |
+
"Ios17.expandDims" : 4,
|
| 37 |
+
"Ios17.squeeze" : 4,
|
| 38 |
+
"Ios17.reshape" : 2,
|
| 39 |
+
"Pad" : 2
|
| 40 |
+
},
|
| 41 |
+
"computePrecision" : "Mixed (Float32, Int32)",
|
| 42 |
+
"stateSchema" : [
|
| 43 |
+
|
| 44 |
+
],
|
| 45 |
+
"isUpdatable" : "0",
|
| 46 |
+
"availability" : {
|
| 47 |
+
"macOS" : "14.0",
|
| 48 |
+
"tvOS" : "17.0",
|
| 49 |
+
"visionOS" : "1.0",
|
| 50 |
+
"watchOS" : "10.0",
|
| 51 |
+
"iOS" : "17.0",
|
| 52 |
+
"macCatalyst" : "17.0"
|
| 53 |
+
},
|
| 54 |
+
"modelType" : {
|
| 55 |
+
"name" : "MLModelType_mlProgram"
|
| 56 |
+
},
|
| 57 |
+
"inputSchema" : [
|
| 58 |
+
{
|
| 59 |
+
"shortDescription" : "",
|
| 60 |
+
"dataType" : "Float32",
|
| 61 |
+
"hasShapeFlexibility" : "1",
|
| 62 |
+
"isOptional" : "0",
|
| 63 |
+
"shapeFlexibility" : "1 × 1 × 160000 | 2 × 1 × 160000 | 3 × 1 × 160000 | 4 × 1 × 160000 | 5 × 1 × 160000 | 6 × 1 × 160000 | 7 × 1 × 160000 | 8 × 1 × 160000 | 9 × 1 × 160000 | 10 × 1 × 160000 | 11 × 1 × 160000 | 12 × 1 × 160000 | 13 × 1 × 160000 | 14 × 1 × 160000 | 15 × 1 × 160000 | 16 × 1 × 160000 | 17 × 1 × 160000 | 18 × 1 × 160000 | 19 × 1 × 160000 | 20 × 1 × 160000 | 21 × 1 × 160000 | 22 × 1 × 160000 | 23 × 1 × 160000 | 24 × 1 × 160000 | 25 × 1 × 160000 | 26 × 1 × 160000 | 27 × 1 × 160000 | 28 × 1 × 160000 | 29 × 1 × 160000 | 30 × 1 × 160000 | 31 × 1 × 160000 | 32 × 1 × 160000",
|
| 64 |
+
"formattedType" : "MultiArray (Float32 1 × 1 × 160000)",
|
| 65 |
+
"type" : "MultiArray",
|
| 66 |
+
"shape" : "[1, 1, 160000]",
|
| 67 |
+
"name" : "audio",
|
| 68 |
+
"enumeratedShapes" : "[[1, 1, 160000], [2, 1, 160000], [3, 1, 160000], [4, 1, 160000], [5, 1, 160000], [6, 1, 160000], [7, 1, 160000], [8, 1, 160000], [9, 1, 160000], [10, 1, 160000], [11, 1, 160000], [12, 1, 160000], [13, 1, 160000], [14, 1, 160000], [15, 1, 160000], [16, 1, 160000], [17, 1, 160000], [18, 1, 160000], [19, 1, 160000], [20, 1, 160000], [21, 1, 160000], [22, 1, 160000], [23, 1, 160000], [24, 1, 160000], [25, 1, 160000], [26, 1, 160000], [27, 1, 160000], [28, 1, 160000], [29, 1, 160000], [30, 1, 160000], [31, 1, 160000], [32, 1, 160000]]"
|
| 69 |
+
}
|
| 70 |
+
],
|
| 71 |
+
"userDefinedMetadata" : {
|
| 72 |
+
"com.github.apple.coremltools.conversion_date" : "2025-10-15",
|
| 73 |
+
"com.github.apple.coremltools.source" : "torch==2.8.0",
|
| 74 |
+
"com.github.apple.coremltools.version" : "9.0b1",
|
| 75 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript"
|
| 76 |
+
},
|
| 77 |
+
"generatedClassName" : "fbank_community_1",
|
| 78 |
+
"method" : "predict"
|
| 79 |
+
}
|
| 80 |
+
]
|
FBank.mlmodelc/model.mil
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
program(1.0)
|
| 2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3500.14.1"}, {"coremlc-version", "3500.32.1"}, {"coremltools-component-torch", "2.8.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "9.0b1"}})]
|
| 3 |
+
{
|
| 4 |
+
func main<ios17>(tensor<fp32, [?, 1, 160000]> audio) [FlexibleShapeInformation = tuple<tuple<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>, tuple<tensor<string, []>, dict<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>>>((("DefaultShapes", {{"audio", [1, 1, 160000]}}), ("EnumeratedShapes", {{"audio_1_1_10_1_160000_", {{"audio", [10, 1, 160000]}}}, {"audio_1_1_11_1_160000_", {{"audio", [11, 1, 160000]}}}, {"audio_1_1_12_1_160000_", {{"audio", [12, 1, 160000]}}}, {"audio_1_1_13_1_160000_", {{"audio", [13, 1, 160000]}}}, {"audio_1_1_14_1_160000_", {{"audio", [14, 1, 160000]}}}, {"audio_1_1_15_1_160000_", {{"audio", [15, 1, 160000]}}}, {"audio_1_1_16_1_160000_", {{"audio", [16, 1, 160000]}}}, {"audio_1_1_17_1_160000_", {{"audio", [17, 1, 160000]}}}, {"audio_1_1_18_1_160000_", {{"audio", [18, 1, 160000]}}}, {"audio_1_1_19_1_160000_", {{"audio", [19, 1, 160000]}}}, {"audio_1_1_1_1_160000_", {{"audio", [1, 1, 160000]}}}, {"audio_1_1_20_1_160000_", {{"audio", [20, 1, 160000]}}}, {"audio_1_1_21_1_160000_", {{"audio", [21, 1, 160000]}}}, {"audio_1_1_22_1_160000_", {{"audio", [22, 1, 160000]}}}, {"audio_1_1_23_1_160000_", {{"audio", [23, 1, 160000]}}}, {"audio_1_1_24_1_160000_", {{"audio", [24, 1, 160000]}}}, {"audio_1_1_25_1_160000_", {{"audio", [25, 1, 160000]}}}, {"audio_1_1_26_1_160000_", {{"audio", [26, 1, 160000]}}}, {"audio_1_1_27_1_160000_", {{"audio", [27, 1, 160000]}}}, {"audio_1_1_28_1_160000_", {{"audio", [28, 1, 160000]}}}, {"audio_1_1_29_1_160000_", {{"audio", [29, 1, 160000]}}}, {"audio_1_1_2_1_160000_", {{"audio", [2, 1, 160000]}}}, {"audio_1_1_30_1_160000_", {{"audio", [30, 1, 160000]}}}, {"audio_1_1_31_1_160000_", {{"audio", [31, 1, 160000]}}}, {"audio_1_1_32_1_160000_", {{"audio", [32, 1, 160000]}}}, {"audio_1_1_3_1_160000_", {{"audio", [3, 1, 160000]}}}, {"audio_1_1_4_1_160000_", {{"audio", [4, 1, 160000]}}}, {"audio_1_1_5_1_160000_", {{"audio", [5, 1, 160000]}}}, {"audio_1_1_6_1_160000_", {{"audio", [6, 1, 160000]}}}, {"audio_1_1_7_1_160000_", {{"audio", [7, 1, 160000]}}}, {"audio_1_1_8_1_160000_", {{"audio", [8, 1, 160000]}}}, {"audio_1_1_9_1_160000_", {{"audio", [9, 1, 160000]}}}})))] {
|
| 5 |
+
tensor<fp32, [80, 257, 1]> _fbank_mel_weight = const()[name = tensor<string, []>("_fbank_mel_weight"), val = tensor<fp32, [80, 257, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
| 6 |
+
tensor<fp32, [257, 1, 512]> _fbank_dft_imag_weight = const()[name = tensor<string, []>("_fbank_dft_imag_weight"), val = tensor<fp32, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82368)))];
|
| 7 |
+
tensor<fp32, [257, 1, 512]> _fbank_dft_real_weight = const()[name = tensor<string, []>("_fbank_dft_real_weight"), val = tensor<fp32, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(608768)))];
|
| 8 |
+
tensor<fp32, [1, 400]> _fbank_window = const()[name = tensor<string, []>("_fbank_window"), val = tensor<fp32, [1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1135168)))];
|
| 9 |
+
tensor<fp32, []> _fbank_eps = const()[name = tensor<string, []>("_fbank_eps"), val = tensor<fp32, []>(0x1.0c6f7ap-20)];
|
| 10 |
+
tensor<fp32, [400, 1, 400]> _fbank_frame_kernel = const()[name = tensor<string, []>("_fbank_frame_kernel"), val = tensor<fp32, [400, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1136832)))];
|
| 11 |
+
tensor<fp32, []> var_3_promoted = const()[name = tensor<string, []>("op_3_promoted"), val = tensor<fp32, []>(0x1p+15)];
|
| 12 |
+
tensor<fp32, [?, 1, 160000]> waveforms_3 = mul(x = audio, y = var_3_promoted)[name = tensor<string, []>("waveforms_3")];
|
| 13 |
+
tensor<string, []> frames_1_pad_type_0 = const()[name = tensor<string, []>("frames_1_pad_type_0"), val = tensor<string, []>("valid")];
|
| 14 |
+
tensor<int32, [1]> frames_1_strides_0 = const()[name = tensor<string, []>("frames_1_strides_0"), val = tensor<int32, [1]>([160])];
|
| 15 |
+
tensor<int32, [2]> frames_1_pad_0 = const()[name = tensor<string, []>("frames_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 16 |
+
tensor<int32, [1]> frames_1_dilations_0 = const()[name = tensor<string, []>("frames_1_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 17 |
+
tensor<int32, []> frames_1_groups_0 = const()[name = tensor<string, []>("frames_1_groups_0"), val = tensor<int32, []>(1)];
|
| 18 |
+
tensor<fp32, [?, 400, 998]> frames_1 = conv(dilations = frames_1_dilations_0, groups = frames_1_groups_0, pad = frames_1_pad_0, pad_type = frames_1_pad_type_0, strides = frames_1_strides_0, weight = _fbank_frame_kernel, x = waveforms_3)[name = tensor<string, []>("frames_1")];
|
| 19 |
+
tensor<int32, [3]> frames_3_perm_0 = const()[name = tensor<string, []>("frames_3_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
|
| 20 |
+
tensor<int32, [2]> concat_0x = const()[name = tensor<string, []>("concat_0x"), val = tensor<int32, [2]>([-1, 400])];
|
| 21 |
+
tensor<fp32, [?, 998, 400]> frames_3 = transpose(perm = frames_3_perm_0, x = frames_1)[name = tensor<string, []>("transpose_1")];
|
| 22 |
+
tensor<fp32, [?, 400]> frames_5 = reshape(shape = concat_0x, x = frames_3)[name = tensor<string, []>("frames_5")];
|
| 23 |
+
tensor<int32, [1]> var_53_axes_0 = const()[name = tensor<string, []>("op_53_axes_0"), val = tensor<int32, [1]>([1])];
|
| 24 |
+
tensor<bool, []> var_53_keep_dims_0 = const()[name = tensor<string, []>("op_53_keep_dims_0"), val = tensor<bool, []>(true)];
|
| 25 |
+
tensor<fp32, [?, 1]> var_53 = reduce_mean(axes = var_53_axes_0, keep_dims = var_53_keep_dims_0, x = frames_5)[name = tensor<string, []>("op_53")];
|
| 26 |
+
tensor<fp32, [?, 400]> frames_7 = sub(x = frames_5, y = var_53)[name = tensor<string, []>("frames_7")];
|
| 27 |
+
tensor<int32, [1]> input_1_axes_0 = const()[name = tensor<string, []>("input_1_axes_0"), val = tensor<int32, [1]>([1])];
|
| 28 |
+
tensor<fp32, [?, 1, 400]> input_1 = expand_dims(axes = input_1_axes_0, x = frames_7)[name = tensor<string, []>("input_1")];
|
| 29 |
+
tensor<fp32, []> const_0 = const()[name = tensor<string, []>("const_0"), val = tensor<fp32, []>(0x0p+0)];
|
| 30 |
+
tensor<int32, [6]> var_57_pad_0 = const()[name = tensor<string, []>("op_57_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 1, 0])];
|
| 31 |
+
tensor<string, []> var_57_mode_0 = const()[name = tensor<string, []>("op_57_mode_0"), val = tensor<string, []>("replicate")];
|
| 32 |
+
tensor<fp32, [?, 1, 401]> var_57 = pad(constant_val = const_0, mode = var_57_mode_0, pad = var_57_pad_0, x = input_1)[name = tensor<string, []>("op_57")];
|
| 33 |
+
tensor<int32, [1]> padded_axes_0 = const()[name = tensor<string, []>("padded_axes_0"), val = tensor<int32, [1]>([1])];
|
| 34 |
+
tensor<fp32, [?, 401]> padded = squeeze(axes = padded_axes_0, x = var_57)[name = tensor<string, []>("padded")];
|
| 35 |
+
tensor<int32, [2]> var_60_begin_0 = const()[name = tensor<string, []>("op_60_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
| 36 |
+
tensor<int32, [2]> var_60_end_0 = const()[name = tensor<string, []>("op_60_end_0"), val = tensor<int32, [2]>([0, 400])];
|
| 37 |
+
tensor<bool, [2]> var_60_end_mask_0 = const()[name = tensor<string, []>("op_60_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
| 38 |
+
tensor<fp32, [?, 400]> var_60 = slice_by_index(begin = var_60_begin_0, end = var_60_end_0, end_mask = var_60_end_mask_0, x = padded)[name = tensor<string, []>("op_60")];
|
| 39 |
+
tensor<fp32, []> var_61 = const()[name = tensor<string, []>("op_61"), val = tensor<fp32, []>(0x1.f0a3d8p-1)];
|
| 40 |
+
tensor<fp32, [?, 400]> var_62 = mul(x = var_60, y = var_61)[name = tensor<string, []>("op_62")];
|
| 41 |
+
tensor<fp32, [?, 400]> frames_9 = sub(x = frames_7, y = var_62)[name = tensor<string, []>("frames_9")];
|
| 42 |
+
tensor<fp32, [?, 400]> frames_11 = mul(x = frames_9, y = _fbank_window)[name = tensor<string, []>("frames_11")];
|
| 43 |
+
tensor<int32, [1]> input_axes_0 = const()[name = tensor<string, []>("input_axes_0"), val = tensor<int32, [1]>([1])];
|
| 44 |
+
tensor<fp32, [?, 1, 400]> input = expand_dims(axes = input_axes_0, x = frames_11)[name = tensor<string, []>("input")];
|
| 45 |
+
tensor<fp32, []> const_1 = const()[name = tensor<string, []>("const_1"), val = tensor<fp32, []>(0x0p+0)];
|
| 46 |
+
tensor<int32, [6]> var_67_pad_0 = const()[name = tensor<string, []>("op_67_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 0, 112])];
|
| 47 |
+
tensor<string, []> var_67_mode_0 = const()[name = tensor<string, []>("op_67_mode_0"), val = tensor<string, []>("constant")];
|
| 48 |
+
tensor<fp32, [?, 1, 512]> var_67 = pad(constant_val = const_1, mode = var_67_mode_0, pad = var_67_pad_0, x = input)[name = tensor<string, []>("op_67")];
|
| 49 |
+
tensor<string, []> var_74_pad_type_0 = const()[name = tensor<string, []>("op_74_pad_type_0"), val = tensor<string, []>("valid")];
|
| 50 |
+
tensor<int32, [1]> var_74_strides_0 = const()[name = tensor<string, []>("op_74_strides_0"), val = tensor<int32, [1]>([1])];
|
| 51 |
+
tensor<int32, [2]> var_74_pad_0 = const()[name = tensor<string, []>("op_74_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 52 |
+
tensor<int32, [1]> var_74_dilations_0 = const()[name = tensor<string, []>("op_74_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 53 |
+
tensor<int32, []> var_74_groups_0 = const()[name = tensor<string, []>("op_74_groups_0"), val = tensor<int32, []>(1)];
|
| 54 |
+
tensor<fp32, [?, 257, 1]> var_74 = conv(dilations = var_74_dilations_0, groups = var_74_groups_0, pad = var_74_pad_0, pad_type = var_74_pad_type_0, strides = var_74_strides_0, weight = _fbank_dft_real_weight, x = var_67)[name = tensor<string, []>("op_74")];
|
| 55 |
+
tensor<int32, [1]> real_axes_0 = const()[name = tensor<string, []>("real_axes_0"), val = tensor<int32, [1]>([-1])];
|
| 56 |
+
tensor<fp32, [?, 257]> real = squeeze(axes = real_axes_0, x = var_74)[name = tensor<string, []>("real")];
|
| 57 |
+
tensor<string, []> var_80_pad_type_0 = const()[name = tensor<string, []>("op_80_pad_type_0"), val = tensor<string, []>("valid")];
|
| 58 |
+
tensor<int32, [1]> var_80_strides_0 = const()[name = tensor<string, []>("op_80_strides_0"), val = tensor<int32, [1]>([1])];
|
| 59 |
+
tensor<int32, [2]> var_80_pad_0 = const()[name = tensor<string, []>("op_80_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 60 |
+
tensor<int32, [1]> var_80_dilations_0 = const()[name = tensor<string, []>("op_80_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 61 |
+
tensor<int32, []> var_80_groups_0 = const()[name = tensor<string, []>("op_80_groups_0"), val = tensor<int32, []>(1)];
|
| 62 |
+
tensor<fp32, [?, 257, 1]> var_80 = conv(dilations = var_80_dilations_0, groups = var_80_groups_0, pad = var_80_pad_0, pad_type = var_80_pad_type_0, strides = var_80_strides_0, weight = _fbank_dft_imag_weight, x = var_67)[name = tensor<string, []>("op_80")];
|
| 63 |
+
tensor<int32, [1]> imag_axes_0 = const()[name = tensor<string, []>("imag_axes_0"), val = tensor<int32, [1]>([-1])];
|
| 64 |
+
tensor<fp32, [?, 257]> imag = squeeze(axes = imag_axes_0, x = var_80)[name = tensor<string, []>("imag")];
|
| 65 |
+
tensor<fp32, []> var_22_promoted = const()[name = tensor<string, []>("op_22_promoted"), val = tensor<fp32, []>(0x1p+1)];
|
| 66 |
+
tensor<fp32, [?, 257]> var_82 = pow(x = real, y = var_22_promoted)[name = tensor<string, []>("op_82")];
|
| 67 |
+
tensor<fp32, []> var_22_promoted_1 = const()[name = tensor<string, []>("op_22_promoted_1"), val = tensor<fp32, []>(0x1p+1)];
|
| 68 |
+
tensor<fp32, [?, 257]> var_83 = pow(x = imag, y = var_22_promoted_1)[name = tensor<string, []>("op_83")];
|
| 69 |
+
tensor<fp32, [?, 257]> power = add(x = var_82, y = var_83)[name = tensor<string, []>("power")];
|
| 70 |
+
tensor<int32, [1]> var_85_axes_0 = const()[name = tensor<string, []>("op_85_axes_0"), val = tensor<int32, [1]>([-1])];
|
| 71 |
+
tensor<fp32, [?, 257, 1]> var_85 = expand_dims(axes = var_85_axes_0, x = power)[name = tensor<string, []>("op_85")];
|
| 72 |
+
tensor<string, []> var_90_pad_type_0 = const()[name = tensor<string, []>("op_90_pad_type_0"), val = tensor<string, []>("valid")];
|
| 73 |
+
tensor<int32, [1]> var_90_strides_0 = const()[name = tensor<string, []>("op_90_strides_0"), val = tensor<int32, [1]>([1])];
|
| 74 |
+
tensor<int32, [2]> var_90_pad_0 = const()[name = tensor<string, []>("op_90_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 75 |
+
tensor<int32, [1]> var_90_dilations_0 = const()[name = tensor<string, []>("op_90_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 76 |
+
tensor<int32, []> var_90_groups_0 = const()[name = tensor<string, []>("op_90_groups_0"), val = tensor<int32, []>(1)];
|
| 77 |
+
tensor<fp32, [?, 80, 1]> var_90 = conv(dilations = var_90_dilations_0, groups = var_90_groups_0, pad = var_90_pad_0, pad_type = var_90_pad_type_0, strides = var_90_strides_0, weight = _fbank_mel_weight, x = var_85)[name = tensor<string, []>("op_90")];
|
| 78 |
+
tensor<int32, [1]> mel_1_axes_0 = const()[name = tensor<string, []>("mel_1_axes_0"), val = tensor<int32, [1]>([-1])];
|
| 79 |
+
tensor<fp32, [?, 80]> mel_1 = squeeze(axes = mel_1_axes_0, x = var_90)[name = tensor<string, []>("mel_1")];
|
| 80 |
+
tensor<fp32, [?, 80]> mel_3 = add(x = mel_1, y = _fbank_eps)[name = tensor<string, []>("mel_3")];
|
| 81 |
+
tensor<fp32, []> const_2 = const()[name = tensor<string, []>("const_2"), val = tensor<fp32, []>(0x1.fffffep+127)];
|
| 82 |
+
tensor<fp32, [?, 80]> clip_0 = clip(alpha = _fbank_eps, beta = const_2, x = mel_3)[name = tensor<string, []>("clip_0")];
|
| 83 |
+
tensor<fp32, []> mel_epsilon_0 = const()[name = tensor<string, []>("mel_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
|
| 84 |
+
tensor<fp32, [?, 80]> mel = log(epsilon = mel_epsilon_0, x = clip_0)[name = tensor<string, []>("mel")];
|
| 85 |
+
tensor<int32, [3]> concat_1x = const()[name = tensor<string, []>("concat_1x"), val = tensor<int32, [3]>([-1, 998, 80])];
|
| 86 |
+
tensor<fp32, [?, 998, 80]> var_96 = reshape(shape = concat_1x, x = mel)[name = tensor<string, []>("op_96")];
|
| 87 |
+
tensor<int32, [1]> centered_axes_0 = const()[name = tensor<string, []>("centered_axes_0"), val = tensor<int32, [1]>([1])];
|
| 88 |
+
tensor<bool, []> centered_keep_dims_0 = const()[name = tensor<string, []>("centered_keep_dims_0"), val = tensor<bool, []>(true)];
|
| 89 |
+
tensor<fp32, [?, 1, 80]> centered = reduce_mean(axes = centered_axes_0, keep_dims = centered_keep_dims_0, x = var_96)[name = tensor<string, []>("centered")];
|
| 90 |
+
tensor<fp32, [?, 998, 80]> features = sub(x = var_96, y = centered)[name = tensor<string, []>("features")];
|
| 91 |
+
tensor<int32, [3]> var_115 = const()[name = tensor<string, []>("op_115"), val = tensor<int32, [3]>([0, 2, 1])];
|
| 92 |
+
tensor<int32, [1]> var_118_axes_0 = const()[name = tensor<string, []>("op_118_axes_0"), val = tensor<int32, [1]>([1])];
|
| 93 |
+
tensor<fp32, [?, 80, 998]> var_116 = transpose(perm = var_115, x = features)[name = tensor<string, []>("transpose_0")];
|
| 94 |
+
tensor<fp32, [?, 1, 80, 998]> fbank_features = expand_dims(axes = var_118_axes_0, x = var_116)[name = tensor<string, []>("op_118")];
|
| 95 |
+
} -> (fbank_features);
|
| 96 |
+
}
|
FBank.mlmodelc/weights/weight.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e83fdd3ea78064b078069e4d9141603c61c47a27fd19e7e3142ff7476f8db36
|
| 3 |
+
size 1776896
|
Segmentation.mlmodelc/analytics/coremldata.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 243
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:518731a13df50d0924c86c919d8b587280dfc40b2f4cd7b7ef3aac75a037305e
|
| 3 |
size 243
|
Segmentation.mlmodelc/coremldata.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 812
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:59adec00fd697d87742db7052380da0ac91bde91acc37ac48a843f0ff07517bb
|
| 3 |
size 812
|
Segmentation.mlmodelc/metadata.json
CHANGED
|
@@ -20,7 +20,7 @@
|
|
| 20 |
],
|
| 21 |
"author" : "Fluid Inference",
|
| 22 |
"specificationVersion" : 8,
|
| 23 |
-
"storagePrecision" : "
|
| 24 |
"license" : "CC-BY-4.0",
|
| 25 |
"mlProgramOperationTypeHistogram" : {
|
| 26 |
"Ios17.linear" : 3,
|
|
@@ -31,16 +31,17 @@
|
|
| 31 |
"Ios17.leakyRelu" : 5,
|
| 32 |
"Ios17.gather" : 1,
|
| 33 |
"Ios17.concat" : 9,
|
| 34 |
-
"Fill" : 1,
|
| 35 |
-
"Ios17.abs" : 1,
|
| 36 |
"Ios16.maxPool" : 3,
|
|
|
|
|
|
|
| 37 |
"Ios17.lstm" : 4,
|
| 38 |
"Ios16.softmax" : 1,
|
| 39 |
"Ios17.instanceNorm" : 4,
|
|
|
|
| 40 |
"Split" : 10,
|
| 41 |
"Ios17.squeeze" : 8
|
| 42 |
},
|
| 43 |
-
"computePrecision" : "Mixed (Float32, Int32)",
|
| 44 |
"stateSchema" : [
|
| 45 |
|
| 46 |
],
|
|
@@ -71,7 +72,7 @@
|
|
| 71 |
}
|
| 72 |
],
|
| 73 |
"userDefinedMetadata" : {
|
| 74 |
-
"com.github.apple.coremltools.conversion_date" : "2025-10-
|
| 75 |
"com.github.apple.coremltools.source" : "torch==2.8.0",
|
| 76 |
"com.github.apple.coremltools.version" : "9.0b1",
|
| 77 |
"com.github.apple.coremltools.source_dialect" : "TorchScript"
|
|
|
|
| 20 |
],
|
| 21 |
"author" : "Fluid Inference",
|
| 22 |
"specificationVersion" : 8,
|
| 23 |
+
"storagePrecision" : "Float16",
|
| 24 |
"license" : "CC-BY-4.0",
|
| 25 |
"mlProgramOperationTypeHistogram" : {
|
| 26 |
"Ios17.linear" : 3,
|
|
|
|
| 31 |
"Ios17.leakyRelu" : 5,
|
| 32 |
"Ios17.gather" : 1,
|
| 33 |
"Ios17.concat" : 9,
|
|
|
|
|
|
|
| 34 |
"Ios16.maxPool" : 3,
|
| 35 |
+
"Ios17.abs" : 1,
|
| 36 |
+
"Fill" : 1,
|
| 37 |
"Ios17.lstm" : 4,
|
| 38 |
"Ios16.softmax" : 1,
|
| 39 |
"Ios17.instanceNorm" : 4,
|
| 40 |
+
"Ios17.cast" : 4,
|
| 41 |
"Split" : 10,
|
| 42 |
"Ios17.squeeze" : 8
|
| 43 |
},
|
| 44 |
+
"computePrecision" : "Mixed (Float16, Float32, Int16, Int32, UInt16)",
|
| 45 |
"stateSchema" : [
|
| 46 |
|
| 47 |
],
|
|
|
|
| 72 |
}
|
| 73 |
],
|
| 74 |
"userDefinedMetadata" : {
|
| 75 |
+
"com.github.apple.coremltools.conversion_date" : "2025-10-15",
|
| 76 |
"com.github.apple.coremltools.source" : "torch==2.8.0",
|
| 77 |
"com.github.apple.coremltools.version" : "9.0b1",
|
| 78 |
"com.github.apple.coremltools.source_dialect" : "TorchScript"
|
Segmentation.mlmodelc/model.mil
CHANGED
|
@@ -2,217 +2,225 @@ program(1.0)
|
|
| 2 |
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3500.14.1"}, {"coremlc-version", "3500.32.1"}, {"coremltools-component-torch", "2.8.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "9.0b1"}})]
|
| 3 |
{
|
| 4 |
func main<ios17>(tensor<fp32, [?, 1, 160000]> audio) [FlexibleShapeInformation = tuple<tuple<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>, tuple<tensor<string, []>, dict<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>>>((("DefaultShapes", {{"audio", [32, 1, 160000]}}), ("EnumeratedShapes", {{"audio_1_1_10_1_160000_", {{"audio", [10, 1, 160000]}}}, {"audio_1_1_11_1_160000_", {{"audio", [11, 1, 160000]}}}, {"audio_1_1_12_1_160000_", {{"audio", [12, 1, 160000]}}}, {"audio_1_1_13_1_160000_", {{"audio", [13, 1, 160000]}}}, {"audio_1_1_14_1_160000_", {{"audio", [14, 1, 160000]}}}, {"audio_1_1_15_1_160000_", {{"audio", [15, 1, 160000]}}}, {"audio_1_1_16_1_160000_", {{"audio", [16, 1, 160000]}}}, {"audio_1_1_17_1_160000_", {{"audio", [17, 1, 160000]}}}, {"audio_1_1_18_1_160000_", {{"audio", [18, 1, 160000]}}}, {"audio_1_1_19_1_160000_", {{"audio", [19, 1, 160000]}}}, {"audio_1_1_1_1_160000_", {{"audio", [1, 1, 160000]}}}, {"audio_1_1_20_1_160000_", {{"audio", [20, 1, 160000]}}}, {"audio_1_1_21_1_160000_", {{"audio", [21, 1, 160000]}}}, {"audio_1_1_22_1_160000_", {{"audio", [22, 1, 160000]}}}, {"audio_1_1_23_1_160000_", {{"audio", [23, 1, 160000]}}}, {"audio_1_1_24_1_160000_", {{"audio", [24, 1, 160000]}}}, {"audio_1_1_25_1_160000_", {{"audio", [25, 1, 160000]}}}, {"audio_1_1_26_1_160000_", {{"audio", [26, 1, 160000]}}}, {"audio_1_1_27_1_160000_", {{"audio", [27, 1, 160000]}}}, {"audio_1_1_28_1_160000_", {{"audio", [28, 1, 160000]}}}, {"audio_1_1_29_1_160000_", {{"audio", [29, 1, 160000]}}}, {"audio_1_1_2_1_160000_", {{"audio", [2, 1, 160000]}}}, {"audio_1_1_30_1_160000_", {{"audio", [30, 1, 160000]}}}, {"audio_1_1_31_1_160000_", {{"audio", [31, 1, 160000]}}}, {"audio_1_1_32_1_160000_", {{"audio", [32, 1, 160000]}}}, {"audio_1_1_3_1_160000_", {{"audio", [3, 1, 160000]}}}, {"audio_1_1_4_1_160000_", {{"audio", [4, 1, 160000]}}}, {"audio_1_1_5_1_160000_", {{"audio", [5, 1, 160000]}}}, {"audio_1_1_6_1_160000_", {{"audio", [6, 1, 160000]}}}, {"audio_1_1_7_1_160000_", {{"audio", [7, 1, 160000]}}}, {"audio_1_1_8_1_160000_", {{"audio", [8, 1, 160000]}}}, {"audio_1_1_9_1_160000_", {{"audio", [9, 1, 160000]}}}})))] {
|
| 5 |
-
tensor<fp32, [1]> sincnet_wav_norm1d_bias = const()[name = tensor<string, []>("sincnet_wav_norm1d_bias"), val = tensor<fp32, [1]>([0x1.73505ep-5])];
|
| 6 |
-
tensor<fp32, [1]> sincnet_wav_norm1d_weight = const()[name = tensor<string, []>("sincnet_wav_norm1d_weight"), val = tensor<fp32, [1]>([0x1.43f862p-7])];
|
| 7 |
-
tensor<fp32, [80]> sincnet_norm1d_0_bias = const()[name = tensor<string, []>("sincnet_norm1d_0_bias"), val = tensor<fp32, [80]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
| 8 |
-
tensor<fp32, [80]> sincnet_norm1d_0_weight = const()[name = tensor<string, []>("sincnet_norm1d_0_weight"), val = tensor<fp32, [80]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(448)))];
|
| 9 |
-
tensor<fp32, [60]> sincnet_conv1d_1_bias = const()[name = tensor<string, []>("sincnet_conv1d_1_bias"), val = tensor<fp32, [60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(832)))];
|
| 10 |
-
tensor<fp32, [60, 80, 5]> sincnet_conv1d_1_weight = const()[name = tensor<string, []>("sincnet_conv1d_1_weight"), val = tensor<fp32, [60, 80, 5]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1152)))];
|
| 11 |
-
tensor<fp32, [60]> sincnet_norm1d_1_bias = const()[name = tensor<string, []>("sincnet_norm1d_1_bias"), val = tensor<fp32, [60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97216)))];
|
| 12 |
-
tensor<fp32, [60]> sincnet_norm1d_1_weight = const()[name = tensor<string, []>("sincnet_norm1d_1_weight"), val = tensor<fp32, [60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97536)))];
|
| 13 |
-
tensor<fp32, [60]> sincnet_conv1d_2_bias = const()[name = tensor<string, []>("sincnet_conv1d_2_bias"), val = tensor<fp32, [60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97856)))];
|
| 14 |
-
tensor<fp32, [60, 60, 5]> sincnet_conv1d_2_weight = const()[name = tensor<string, []>("sincnet_conv1d_2_weight"), val = tensor<fp32, [60, 60, 5]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98176)))];
|
| 15 |
-
tensor<fp32, [60]> sincnet_norm1d_2_bias = const()[name = tensor<string, []>("sincnet_norm1d_2_bias"), val = tensor<fp32, [60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(170240)))];
|
| 16 |
-
tensor<fp32, [60]> sincnet_norm1d_2_weight = const()[name = tensor<string, []>("sincnet_norm1d_2_weight"), val = tensor<fp32, [60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(170560)))];
|
| 17 |
-
tensor<fp32, [128]> linear_0_bias = const()[name = tensor<string, []>("linear_0_bias"), val = tensor<fp32, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(170880)))];
|
| 18 |
-
tensor<fp32, [128, 256]> linear_0_weight = const()[name = tensor<string, []>("linear_0_weight"), val = tensor<fp32, [128, 256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(171456)))];
|
| 19 |
-
tensor<fp32, [128]> linear_1_bias = const()[name = tensor<string, []>("linear_1_bias"), val = tensor<fp32, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(302592)))];
|
| 20 |
-
tensor<fp32, [128, 128]> linear_1_weight = const()[name = tensor<string, []>("linear_1_weight"), val = tensor<fp32, [128, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303168)))];
|
| 21 |
-
tensor<fp32, [7]> classifier_bias = const()[name = tensor<string, []>("classifier_bias"), val = tensor<fp32, [7]>([-0x1.00e888p+0, 0x1.67cb52p-2, 0x1.3d87fp-1, 0x1.c8aa8p-2, -0x1.445f5ep-2, -0x1.591274p-1, -0x1.8fb70ep-2])];
|
| 22 |
-
tensor<fp32, [7, 128]> classifier_weight = const()[name = tensor<string, []>("classifier_weight"), val = tensor<fp32, [7, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(368768)))];
|
| 23 |
tensor<fp32, []> var_9 = const()[name = tensor<string, []>("op_9"), val = tensor<fp32, []>(0x1.47ae14p-7)];
|
| 24 |
-
tensor<
|
| 25 |
-
tensor<
|
| 26 |
-
tensor<
|
|
|
|
|
|
|
|
|
|
| 27 |
tensor<string, []> outputs_pad_type_0 = const()[name = tensor<string, []>("outputs_pad_type_0"), val = tensor<string, []>("valid")];
|
| 28 |
tensor<int32, [1]> outputs_strides_0 = const()[name = tensor<string, []>("outputs_strides_0"), val = tensor<int32, [1]>([10])];
|
| 29 |
tensor<int32, [2]> outputs_pad_0 = const()[name = tensor<string, []>("outputs_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 30 |
tensor<int32, [1]> outputs_dilations_0 = const()[name = tensor<string, []>("outputs_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 31 |
tensor<int32, []> outputs_groups_0 = const()[name = tensor<string, []>("outputs_groups_0"), val = tensor<int32, []>(1)];
|
| 32 |
-
tensor<
|
| 33 |
-
tensor<
|
|
|
|
| 34 |
tensor<int32, [1]> var_119 = const()[name = tensor<string, []>("op_119"), val = tensor<int32, [1]>([3])];
|
| 35 |
tensor<int32, [1]> var_120 = const()[name = tensor<string, []>("op_120"), val = tensor<int32, [1]>([3])];
|
| 36 |
tensor<string, []> input_3_pad_type_0 = const()[name = tensor<string, []>("input_3_pad_type_0"), val = tensor<string, []>("custom")];
|
| 37 |
tensor<int32, [2]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 38 |
tensor<bool, []> input_3_ceil_mode_0 = const()[name = tensor<string, []>("input_3_ceil_mode_0"), val = tensor<bool, []>(false)];
|
| 39 |
-
tensor<
|
| 40 |
-
tensor<
|
| 41 |
-
tensor<
|
|
|
|
|
|
|
| 42 |
tensor<string, []> input_9_pad_type_0 = const()[name = tensor<string, []>("input_9_pad_type_0"), val = tensor<string, []>("valid")];
|
| 43 |
tensor<int32, [1]> input_9_strides_0 = const()[name = tensor<string, []>("input_9_strides_0"), val = tensor<int32, [1]>([1])];
|
| 44 |
tensor<int32, [2]> input_9_pad_0 = const()[name = tensor<string, []>("input_9_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 45 |
tensor<int32, [1]> input_9_dilations_0 = const()[name = tensor<string, []>("input_9_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 46 |
tensor<int32, []> input_9_groups_0 = const()[name = tensor<string, []>("input_9_groups_0"), val = tensor<int32, []>(1)];
|
| 47 |
-
tensor<
|
|
|
|
|
|
|
| 48 |
tensor<int32, [1]> var_135 = const()[name = tensor<string, []>("op_135"), val = tensor<int32, [1]>([3])];
|
| 49 |
tensor<int32, [1]> var_136 = const()[name = tensor<string, []>("op_136"), val = tensor<int32, [1]>([3])];
|
| 50 |
tensor<string, []> input_11_pad_type_0 = const()[name = tensor<string, []>("input_11_pad_type_0"), val = tensor<string, []>("custom")];
|
| 51 |
tensor<int32, [2]> input_11_pad_0 = const()[name = tensor<string, []>("input_11_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 52 |
tensor<bool, []> input_11_ceil_mode_0 = const()[name = tensor<string, []>("input_11_ceil_mode_0"), val = tensor<bool, []>(false)];
|
| 53 |
-
tensor<
|
| 54 |
-
tensor<
|
| 55 |
-
tensor<
|
|
|
|
|
|
|
| 56 |
tensor<string, []> input_17_pad_type_0 = const()[name = tensor<string, []>("input_17_pad_type_0"), val = tensor<string, []>("valid")];
|
| 57 |
tensor<int32, [1]> input_17_strides_0 = const()[name = tensor<string, []>("input_17_strides_0"), val = tensor<int32, [1]>([1])];
|
| 58 |
tensor<int32, [2]> input_17_pad_0 = const()[name = tensor<string, []>("input_17_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 59 |
tensor<int32, [1]> input_17_dilations_0 = const()[name = tensor<string, []>("input_17_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 60 |
tensor<int32, []> input_17_groups_0 = const()[name = tensor<string, []>("input_17_groups_0"), val = tensor<int32, []>(1)];
|
| 61 |
-
tensor<
|
|
|
|
|
|
|
| 62 |
tensor<int32, [1]> var_151 = const()[name = tensor<string, []>("op_151"), val = tensor<int32, [1]>([3])];
|
| 63 |
tensor<int32, [1]> var_152 = const()[name = tensor<string, []>("op_152"), val = tensor<int32, [1]>([3])];
|
| 64 |
tensor<string, []> input_19_pad_type_0 = const()[name = tensor<string, []>("input_19_pad_type_0"), val = tensor<string, []>("custom")];
|
| 65 |
tensor<int32, [2]> input_19_pad_0 = const()[name = tensor<string, []>("input_19_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 66 |
tensor<bool, []> input_19_ceil_mode_0 = const()[name = tensor<string, []>("input_19_ceil_mode_0"), val = tensor<bool, []>(false)];
|
| 67 |
-
tensor<
|
| 68 |
-
tensor<
|
| 69 |
-
tensor<
|
|
|
|
|
|
|
| 70 |
tensor<int32, [3]> var_163 = const()[name = tensor<string, []>("op_163"), val = tensor<int32, [3]>([0, 2, 1])];
|
| 71 |
tensor<int32, []> var_172 = const()[name = tensor<string, []>("op_172"), val = tensor<int32, []>(128)];
|
| 72 |
tensor<int32, []> var_173 = const()[name = tensor<string, []>("op_173"), val = tensor<int32, []>(8)];
|
| 73 |
-
tensor<
|
| 74 |
-
tensor<int32, [3]>
|
|
|
|
| 75 |
tensor<int32, []> gather_0_batch_dims_0 = const()[name = tensor<string, []>("gather_0_batch_dims_0"), val = tensor<int32, []>(0)];
|
| 76 |
tensor<bool, []> gather_0_validate_indices_0 = const()[name = tensor<string, []>("gather_0_validate_indices_0"), val = tensor<bool, []>(false)];
|
| 77 |
-
tensor<
|
| 78 |
-
tensor<
|
| 79 |
-
tensor<
|
|
|
|
|
|
|
| 80 |
tensor<int32, []> concat_0_axis_0 = const()[name = tensor<string, []>("concat_0_axis_0"), val = tensor<int32, []>(0)];
|
| 81 |
tensor<bool, []> concat_0_interleave_0 = const()[name = tensor<string, []>("concat_0_interleave_0"), val = tensor<bool, []>(false)];
|
| 82 |
-
tensor<int32, [
|
| 83 |
-
tensor<
|
| 84 |
-
tensor<
|
|
|
|
| 85 |
tensor<int32, [3]> input_23_batch_first_transpose_perm_0 = const()[name = tensor<string, []>("input_23_batch_first_transpose_perm_0"), val = tensor<int32, [3]>([1, 0, 2])];
|
| 86 |
tensor<int32, []> split_0_num_splits_0 = const()[name = tensor<string, []>("split_0_num_splits_0"), val = tensor<int32, []>(4)];
|
| 87 |
tensor<int32, []> split_0_axis_0 = const()[name = tensor<string, []>("split_0_axis_0"), val = tensor<int32, []>(0)];
|
| 88 |
-
tensor<
|
| 89 |
tensor<int32, []> split_1_num_splits_0 = const()[name = tensor<string, []>("split_1_num_splits_0"), val = tensor<int32, []>(4)];
|
| 90 |
tensor<int32, []> split_1_axis_0 = const()[name = tensor<string, []>("split_1_axis_0"), val = tensor<int32, []>(0)];
|
| 91 |
-
tensor<
|
| 92 |
-
tensor<fp32, [512]> add_0 = const()[name = tensor<string, []>("add_0"), val = tensor<fp32, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(452800)))];
|
| 93 |
-
tensor<fp32, [512]> add_1 = const()[name = tensor<string, []>("add_1"), val = tensor<fp32, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(454912)))];
|
| 94 |
-
tensor<fp32, [512, 60]> concat_6 = const()[name = tensor<string, []>("concat_6"), val = tensor<fp32, [512, 60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(457024)))];
|
| 95 |
-
tensor<fp32, [512, 128]> concat_7 = const()[name = tensor<string, []>("concat_7"), val = tensor<fp32, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(579968)))];
|
| 96 |
-
tensor<fp32, [512, 60]> concat_8 = const()[name = tensor<string, []>("concat_8"), val = tensor<fp32, [512, 60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(842176)))];
|
| 97 |
-
tensor<fp32, [512, 128]> concat_9 = const()[name = tensor<string, []>("concat_9"), val = tensor<fp32, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(965120)))];
|
| 98 |
tensor<int32, [2]> split_10_split_sizes_0 = const()[name = tensor<string, []>("split_10_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 99 |
tensor<int32, []> split_10_axis_0 = const()[name = tensor<string, []>("split_10_axis_0"), val = tensor<int32, []>(0)];
|
| 100 |
-
tensor<
|
| 101 |
tensor<int32, []> concat_10_axis_0 = const()[name = tensor<string, []>("concat_10_axis_0"), val = tensor<int32, []>(2)];
|
| 102 |
tensor<bool, []> concat_10_interleave_0 = const()[name = tensor<string, []>("concat_10_interleave_0"), val = tensor<bool, []>(false)];
|
| 103 |
-
tensor<
|
| 104 |
tensor<int32, [1]> input_25_lstm_layer_0_lstm_h0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_lstm_h0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 105 |
-
tensor<
|
| 106 |
tensor<int32, [2]> split_11_split_sizes_0 = const()[name = tensor<string, []>("split_11_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 107 |
tensor<int32, []> split_11_axis_0 = const()[name = tensor<string, []>("split_11_axis_0"), val = tensor<int32, []>(0)];
|
| 108 |
-
tensor<
|
| 109 |
tensor<int32, []> concat_11_axis_0 = const()[name = tensor<string, []>("concat_11_axis_0"), val = tensor<int32, []>(2)];
|
| 110 |
tensor<bool, []> concat_11_interleave_0 = const()[name = tensor<string, []>("concat_11_interleave_0"), val = tensor<bool, []>(false)];
|
| 111 |
-
tensor<
|
| 112 |
tensor<int32, [1]> input_25_lstm_layer_0_lstm_c0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_lstm_c0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 113 |
-
tensor<
|
| 114 |
tensor<string, []> input_25_lstm_layer_0_direction_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_direction_0"), val = tensor<string, []>("bidirectional")];
|
| 115 |
tensor<bool, []> input_25_lstm_layer_0_output_sequence_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_output_sequence_0"), val = tensor<bool, []>(true)];
|
| 116 |
tensor<string, []> input_25_lstm_layer_0_recurrent_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
|
| 117 |
tensor<string, []> input_25_lstm_layer_0_cell_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_cell_activation_0"), val = tensor<string, []>("tanh")];
|
| 118 |
tensor<string, []> input_25_lstm_layer_0_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_activation_0"), val = tensor<string, []>("tanh")];
|
| 119 |
-
tensor<
|
| 120 |
-
tensor<
|
| 121 |
-
tensor<
|
| 122 |
-
tensor<
|
| 123 |
-
tensor<
|
| 124 |
-
tensor<
|
| 125 |
-
tensor<
|
| 126 |
-
tensor<
|
| 127 |
tensor<int32, [2]> split_20_split_sizes_0 = const()[name = tensor<string, []>("split_20_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 128 |
tensor<int32, []> split_20_axis_0 = const()[name = tensor<string, []>("split_20_axis_0"), val = tensor<int32, []>(0)];
|
| 129 |
-
tensor<
|
| 130 |
tensor<int32, []> concat_20_axis_0 = const()[name = tensor<string, []>("concat_20_axis_0"), val = tensor<int32, []>(2)];
|
| 131 |
tensor<bool, []> concat_20_interleave_0 = const()[name = tensor<string, []>("concat_20_interleave_0"), val = tensor<bool, []>(false)];
|
| 132 |
-
tensor<
|
| 133 |
tensor<int32, [1]> input_25_lstm_layer_1_lstm_h0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_lstm_h0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 134 |
-
tensor<
|
| 135 |
tensor<int32, [2]> split_21_split_sizes_0 = const()[name = tensor<string, []>("split_21_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 136 |
tensor<int32, []> split_21_axis_0 = const()[name = tensor<string, []>("split_21_axis_0"), val = tensor<int32, []>(0)];
|
| 137 |
-
tensor<
|
| 138 |
tensor<int32, []> concat_21_axis_0 = const()[name = tensor<string, []>("concat_21_axis_0"), val = tensor<int32, []>(2)];
|
| 139 |
tensor<bool, []> concat_21_interleave_0 = const()[name = tensor<string, []>("concat_21_interleave_0"), val = tensor<bool, []>(false)];
|
| 140 |
-
tensor<
|
| 141 |
tensor<int32, [1]> input_25_lstm_layer_1_lstm_c0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_lstm_c0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 142 |
-
tensor<
|
| 143 |
tensor<string, []> input_25_lstm_layer_1_direction_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_direction_0"), val = tensor<string, []>("bidirectional")];
|
| 144 |
tensor<bool, []> input_25_lstm_layer_1_output_sequence_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_output_sequence_0"), val = tensor<bool, []>(true)];
|
| 145 |
tensor<string, []> input_25_lstm_layer_1_recurrent_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
|
| 146 |
tensor<string, []> input_25_lstm_layer_1_cell_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_cell_activation_0"), val = tensor<string, []>("tanh")];
|
| 147 |
tensor<string, []> input_25_lstm_layer_1_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_activation_0"), val = tensor<string, []>("tanh")];
|
| 148 |
-
tensor<
|
| 149 |
-
tensor<
|
| 150 |
-
tensor<
|
| 151 |
-
tensor<
|
| 152 |
-
tensor<
|
| 153 |
-
tensor<
|
| 154 |
-
tensor<
|
| 155 |
tensor<int32, [2]> split_30_split_sizes_0 = const()[name = tensor<string, []>("split_30_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 156 |
tensor<int32, []> split_30_axis_0 = const()[name = tensor<string, []>("split_30_axis_0"), val = tensor<int32, []>(0)];
|
| 157 |
-
tensor<
|
| 158 |
tensor<int32, []> concat_30_axis_0 = const()[name = tensor<string, []>("concat_30_axis_0"), val = tensor<int32, []>(2)];
|
| 159 |
tensor<bool, []> concat_30_interleave_0 = const()[name = tensor<string, []>("concat_30_interleave_0"), val = tensor<bool, []>(false)];
|
| 160 |
-
tensor<
|
| 161 |
tensor<int32, [1]> input_25_lstm_layer_2_lstm_h0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_lstm_h0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 162 |
-
tensor<
|
| 163 |
tensor<int32, [2]> split_31_split_sizes_0 = const()[name = tensor<string, []>("split_31_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 164 |
tensor<int32, []> split_31_axis_0 = const()[name = tensor<string, []>("split_31_axis_0"), val = tensor<int32, []>(0)];
|
| 165 |
-
tensor<
|
| 166 |
tensor<int32, []> concat_31_axis_0 = const()[name = tensor<string, []>("concat_31_axis_0"), val = tensor<int32, []>(2)];
|
| 167 |
tensor<bool, []> concat_31_interleave_0 = const()[name = tensor<string, []>("concat_31_interleave_0"), val = tensor<bool, []>(false)];
|
| 168 |
-
tensor<
|
| 169 |
tensor<int32, [1]> input_25_lstm_layer_2_lstm_c0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_lstm_c0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 170 |
-
tensor<
|
| 171 |
tensor<string, []> input_25_lstm_layer_2_direction_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_direction_0"), val = tensor<string, []>("bidirectional")];
|
| 172 |
tensor<bool, []> input_25_lstm_layer_2_output_sequence_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_output_sequence_0"), val = tensor<bool, []>(true)];
|
| 173 |
tensor<string, []> input_25_lstm_layer_2_recurrent_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
|
| 174 |
tensor<string, []> input_25_lstm_layer_2_cell_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_cell_activation_0"), val = tensor<string, []>("tanh")];
|
| 175 |
tensor<string, []> input_25_lstm_layer_2_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_activation_0"), val = tensor<string, []>("tanh")];
|
| 176 |
-
tensor<
|
| 177 |
-
tensor<
|
| 178 |
-
tensor<
|
| 179 |
-
tensor<
|
| 180 |
-
tensor<
|
| 181 |
-
tensor<
|
| 182 |
-
tensor<
|
| 183 |
tensor<int32, [2]> split_40_split_sizes_0 = const()[name = tensor<string, []>("split_40_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 184 |
tensor<int32, []> split_40_axis_0 = const()[name = tensor<string, []>("split_40_axis_0"), val = tensor<int32, []>(0)];
|
| 185 |
-
tensor<
|
| 186 |
tensor<int32, []> concat_40_axis_0 = const()[name = tensor<string, []>("concat_40_axis_0"), val = tensor<int32, []>(2)];
|
| 187 |
tensor<bool, []> concat_40_interleave_0 = const()[name = tensor<string, []>("concat_40_interleave_0"), val = tensor<bool, []>(false)];
|
| 188 |
-
tensor<
|
| 189 |
tensor<int32, [1]> input_25_batch_first_lstm_h0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_batch_first_lstm_h0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 190 |
-
tensor<
|
| 191 |
tensor<int32, [2]> split_41_split_sizes_0 = const()[name = tensor<string, []>("split_41_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 192 |
tensor<int32, []> split_41_axis_0 = const()[name = tensor<string, []>("split_41_axis_0"), val = tensor<int32, []>(0)];
|
| 193 |
-
tensor<
|
| 194 |
tensor<int32, []> concat_41_axis_0 = const()[name = tensor<string, []>("concat_41_axis_0"), val = tensor<int32, []>(2)];
|
| 195 |
tensor<bool, []> concat_41_interleave_0 = const()[name = tensor<string, []>("concat_41_interleave_0"), val = tensor<bool, []>(false)];
|
| 196 |
-
tensor<
|
| 197 |
tensor<int32, [1]> input_25_batch_first_lstm_c0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_batch_first_lstm_c0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 198 |
-
tensor<
|
| 199 |
tensor<string, []> input_25_batch_first_direction_0 = const()[name = tensor<string, []>("input_25_batch_first_direction_0"), val = tensor<string, []>("bidirectional")];
|
| 200 |
tensor<bool, []> input_25_batch_first_output_sequence_0 = const()[name = tensor<string, []>("input_25_batch_first_output_sequence_0"), val = tensor<bool, []>(true)];
|
| 201 |
tensor<string, []> input_25_batch_first_recurrent_activation_0 = const()[name = tensor<string, []>("input_25_batch_first_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
|
| 202 |
tensor<string, []> input_25_batch_first_cell_activation_0 = const()[name = tensor<string, []>("input_25_batch_first_cell_activation_0"), val = tensor<string, []>("tanh")];
|
| 203 |
tensor<string, []> input_25_batch_first_activation_0 = const()[name = tensor<string, []>("input_25_batch_first_activation_0"), val = tensor<string, []>("tanh")];
|
| 204 |
-
tensor<
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
tensor<int32, [3]> input_25_perm_0 = const()[name = tensor<string, []>("input_25_perm_0"), val = tensor<int32, [3]>([1, 0, 2])];
|
| 206 |
-
tensor<
|
| 207 |
-
tensor<
|
|
|
|
|
|
|
| 208 |
tensor<fp32, []> var_220 = const()[name = tensor<string, []>("op_220"), val = tensor<fp32, []>(0x1.47ae14p-7)];
|
| 209 |
-
tensor<
|
| 210 |
-
tensor<
|
|
|
|
|
|
|
| 211 |
tensor<fp32, []> var_225 = const()[name = tensor<string, []>("op_225"), val = tensor<fp32, []>(0x1.47ae14p-7)];
|
| 212 |
-
tensor<
|
| 213 |
-
tensor<
|
|
|
|
|
|
|
|
|
|
| 214 |
tensor<int32, []> var_231 = const()[name = tensor<string, []>("op_231"), val = tensor<int32, []>(-1)];
|
| 215 |
-
tensor<fp32, [?, 589, 7]>
|
|
|
|
| 216 |
tensor<fp32, []> var_232_epsilon_0 = const()[name = tensor<string, []>("op_232_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
|
| 217 |
tensor<fp32, [?, 589, 7]> log_probs = log(epsilon = var_232_epsilon_0, x = var_232_softmax)[name = tensor<string, []>("op_232")];
|
| 218 |
} -> (log_probs);
|
|
|
|
| 2 |
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3500.14.1"}, {"coremlc-version", "3500.32.1"}, {"coremltools-component-torch", "2.8.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "9.0b1"}})]
|
| 3 |
{
|
| 4 |
func main<ios17>(tensor<fp32, [?, 1, 160000]> audio) [FlexibleShapeInformation = tuple<tuple<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>, tuple<tensor<string, []>, dict<tensor<string, []>, dict<tensor<string, []>, tensor<int32, [?]>>>>>((("DefaultShapes", {{"audio", [32, 1, 160000]}}), ("EnumeratedShapes", {{"audio_1_1_10_1_160000_", {{"audio", [10, 1, 160000]}}}, {"audio_1_1_11_1_160000_", {{"audio", [11, 1, 160000]}}}, {"audio_1_1_12_1_160000_", {{"audio", [12, 1, 160000]}}}, {"audio_1_1_13_1_160000_", {{"audio", [13, 1, 160000]}}}, {"audio_1_1_14_1_160000_", {{"audio", [14, 1, 160000]}}}, {"audio_1_1_15_1_160000_", {{"audio", [15, 1, 160000]}}}, {"audio_1_1_16_1_160000_", {{"audio", [16, 1, 160000]}}}, {"audio_1_1_17_1_160000_", {{"audio", [17, 1, 160000]}}}, {"audio_1_1_18_1_160000_", {{"audio", [18, 1, 160000]}}}, {"audio_1_1_19_1_160000_", {{"audio", [19, 1, 160000]}}}, {"audio_1_1_1_1_160000_", {{"audio", [1, 1, 160000]}}}, {"audio_1_1_20_1_160000_", {{"audio", [20, 1, 160000]}}}, {"audio_1_1_21_1_160000_", {{"audio", [21, 1, 160000]}}}, {"audio_1_1_22_1_160000_", {{"audio", [22, 1, 160000]}}}, {"audio_1_1_23_1_160000_", {{"audio", [23, 1, 160000]}}}, {"audio_1_1_24_1_160000_", {{"audio", [24, 1, 160000]}}}, {"audio_1_1_25_1_160000_", {{"audio", [25, 1, 160000]}}}, {"audio_1_1_26_1_160000_", {{"audio", [26, 1, 160000]}}}, {"audio_1_1_27_1_160000_", {{"audio", [27, 1, 160000]}}}, {"audio_1_1_28_1_160000_", {{"audio", [28, 1, 160000]}}}, {"audio_1_1_29_1_160000_", {{"audio", [29, 1, 160000]}}}, {"audio_1_1_2_1_160000_", {{"audio", [2, 1, 160000]}}}, {"audio_1_1_30_1_160000_", {{"audio", [30, 1, 160000]}}}, {"audio_1_1_31_1_160000_", {{"audio", [31, 1, 160000]}}}, {"audio_1_1_32_1_160000_", {{"audio", [32, 1, 160000]}}}, {"audio_1_1_3_1_160000_", {{"audio", [3, 1, 160000]}}}, {"audio_1_1_4_1_160000_", {{"audio", [4, 1, 160000]}}}, {"audio_1_1_5_1_160000_", {{"audio", [5, 1, 160000]}}}, {"audio_1_1_6_1_160000_", {{"audio", [6, 1, 160000]}}}, {"audio_1_1_7_1_160000_", {{"audio", [7, 1, 160000]}}}, {"audio_1_1_8_1_160000_", {{"audio", [8, 1, 160000]}}}, {"audio_1_1_9_1_160000_", {{"audio", [9, 1, 160000]}}}})))] {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
tensor<fp32, []> var_9 = const()[name = tensor<string, []>("op_9"), val = tensor<fp32, []>(0x1.47ae14p-7)];
|
| 6 |
+
tensor<string, []> audio_to_fp16_dtype_0 = const()[name = tensor<string, []>("audio_to_fp16_dtype_0"), val = tensor<string, []>("fp16")];
|
| 7 |
+
tensor<fp16, [1]> sincnet_wav_norm1d_weight_to_fp16 = const()[name = tensor<string, []>("sincnet_wav_norm1d_weight_to_fp16"), val = tensor<fp16, [1]>([0x1.44p-7])];
|
| 8 |
+
tensor<fp16, [1]> sincnet_wav_norm1d_bias_to_fp16 = const()[name = tensor<string, []>("sincnet_wav_norm1d_bias_to_fp16"), val = tensor<fp16, [1]>([0x1.734p-5])];
|
| 9 |
+
tensor<fp16, []> var_24_to_fp16 = const()[name = tensor<string, []>("op_24_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 10 |
+
tensor<fp16, [?, 1, 160000]> audio_to_fp16 = cast(dtype = audio_to_fp16_dtype_0, x = audio)[name = tensor<string, []>("cast_19")];
|
| 11 |
+
tensor<fp16, [?, 1, 160000]> waveform_cast_fp16 = instance_norm(beta = sincnet_wav_norm1d_bias_to_fp16, epsilon = var_24_to_fp16, gamma = sincnet_wav_norm1d_weight_to_fp16, x = audio_to_fp16)[name = tensor<string, []>("waveform_cast_fp16")];
|
| 12 |
tensor<string, []> outputs_pad_type_0 = const()[name = tensor<string, []>("outputs_pad_type_0"), val = tensor<string, []>("valid")];
|
| 13 |
tensor<int32, [1]> outputs_strides_0 = const()[name = tensor<string, []>("outputs_strides_0"), val = tensor<int32, [1]>([10])];
|
| 14 |
tensor<int32, [2]> outputs_pad_0 = const()[name = tensor<string, []>("outputs_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 15 |
tensor<int32, [1]> outputs_dilations_0 = const()[name = tensor<string, []>("outputs_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 16 |
tensor<int32, []> outputs_groups_0 = const()[name = tensor<string, []>("outputs_groups_0"), val = tensor<int32, []>(1)];
|
| 17 |
+
tensor<fp16, [80, 1, 251]> filters_to_fp16 = const()[name = tensor<string, []>("filters_to_fp16"), val = tensor<fp16, [80, 1, 251]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
| 18 |
+
tensor<fp16, [?, 80, 15975]> outputs_cast_fp16 = conv(dilations = outputs_dilations_0, groups = outputs_groups_0, pad = outputs_pad_0, pad_type = outputs_pad_type_0, strides = outputs_strides_0, weight = filters_to_fp16, x = waveform_cast_fp16)[name = tensor<string, []>("outputs_cast_fp16")];
|
| 19 |
+
tensor<fp16, [?, 80, 15975]> input_1_cast_fp16 = abs(x = outputs_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
|
| 20 |
tensor<int32, [1]> var_119 = const()[name = tensor<string, []>("op_119"), val = tensor<int32, [1]>([3])];
|
| 21 |
tensor<int32, [1]> var_120 = const()[name = tensor<string, []>("op_120"), val = tensor<int32, [1]>([3])];
|
| 22 |
tensor<string, []> input_3_pad_type_0 = const()[name = tensor<string, []>("input_3_pad_type_0"), val = tensor<string, []>("custom")];
|
| 23 |
tensor<int32, [2]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 24 |
tensor<bool, []> input_3_ceil_mode_0 = const()[name = tensor<string, []>("input_3_ceil_mode_0"), val = tensor<bool, []>(false)];
|
| 25 |
+
tensor<fp16, [?, 80, 5325]> input_3_cast_fp16 = max_pool(ceil_mode = input_3_ceil_mode_0, kernel_sizes = var_119, pad = input_3_pad_0, pad_type = input_3_pad_type_0, strides = var_120, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
| 26 |
+
tensor<fp16, [80]> sincnet_norm1d_0_weight_to_fp16 = const()[name = tensor<string, []>("sincnet_norm1d_0_weight_to_fp16"), val = tensor<fp16, [80]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40320)))];
|
| 27 |
+
tensor<fp16, [80]> sincnet_norm1d_0_bias_to_fp16 = const()[name = tensor<string, []>("sincnet_norm1d_0_bias_to_fp16"), val = tensor<fp16, [80]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40576)))];
|
| 28 |
+
tensor<fp16, [?, 80, 5325]> input_5_cast_fp16 = instance_norm(beta = sincnet_norm1d_0_bias_to_fp16, epsilon = var_24_to_fp16, gamma = sincnet_norm1d_0_weight_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
| 29 |
+
tensor<fp16, [?, 80, 5325]> input_7_cast_fp16 = leaky_relu(alpha = var_9, x = input_5_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
| 30 |
tensor<string, []> input_9_pad_type_0 = const()[name = tensor<string, []>("input_9_pad_type_0"), val = tensor<string, []>("valid")];
|
| 31 |
tensor<int32, [1]> input_9_strides_0 = const()[name = tensor<string, []>("input_9_strides_0"), val = tensor<int32, [1]>([1])];
|
| 32 |
tensor<int32, [2]> input_9_pad_0 = const()[name = tensor<string, []>("input_9_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 33 |
tensor<int32, [1]> input_9_dilations_0 = const()[name = tensor<string, []>("input_9_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 34 |
tensor<int32, []> input_9_groups_0 = const()[name = tensor<string, []>("input_9_groups_0"), val = tensor<int32, []>(1)];
|
| 35 |
+
tensor<fp16, [60, 80, 5]> sincnet_conv1d_1_weight_to_fp16 = const()[name = tensor<string, []>("sincnet_conv1d_1_weight_to_fp16"), val = tensor<fp16, [60, 80, 5]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40832)))];
|
| 36 |
+
tensor<fp16, [60]> sincnet_conv1d_1_bias_to_fp16 = const()[name = tensor<string, []>("sincnet_conv1d_1_bias_to_fp16"), val = tensor<fp16, [60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(88896)))];
|
| 37 |
+
tensor<fp16, [?, 60, 5321]> input_9_cast_fp16 = conv(bias = sincnet_conv1d_1_bias_to_fp16, dilations = input_9_dilations_0, groups = input_9_groups_0, pad = input_9_pad_0, pad_type = input_9_pad_type_0, strides = input_9_strides_0, weight = sincnet_conv1d_1_weight_to_fp16, x = input_7_cast_fp16)[name = tensor<string, []>("input_9_cast_fp16")];
|
| 38 |
tensor<int32, [1]> var_135 = const()[name = tensor<string, []>("op_135"), val = tensor<int32, [1]>([3])];
|
| 39 |
tensor<int32, [1]> var_136 = const()[name = tensor<string, []>("op_136"), val = tensor<int32, [1]>([3])];
|
| 40 |
tensor<string, []> input_11_pad_type_0 = const()[name = tensor<string, []>("input_11_pad_type_0"), val = tensor<string, []>("custom")];
|
| 41 |
tensor<int32, [2]> input_11_pad_0 = const()[name = tensor<string, []>("input_11_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 42 |
tensor<bool, []> input_11_ceil_mode_0 = const()[name = tensor<string, []>("input_11_ceil_mode_0"), val = tensor<bool, []>(false)];
|
| 43 |
+
tensor<fp16, [?, 60, 1773]> input_11_cast_fp16 = max_pool(ceil_mode = input_11_ceil_mode_0, kernel_sizes = var_135, pad = input_11_pad_0, pad_type = input_11_pad_type_0, strides = var_136, x = input_9_cast_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
|
| 44 |
+
tensor<fp16, [60]> sincnet_norm1d_1_weight_to_fp16 = const()[name = tensor<string, []>("sincnet_norm1d_1_weight_to_fp16"), val = tensor<fp16, [60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(89088)))];
|
| 45 |
+
tensor<fp16, [60]> sincnet_norm1d_1_bias_to_fp16 = const()[name = tensor<string, []>("sincnet_norm1d_1_bias_to_fp16"), val = tensor<fp16, [60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(89280)))];
|
| 46 |
+
tensor<fp16, [?, 60, 1773]> input_13_cast_fp16 = instance_norm(beta = sincnet_norm1d_1_bias_to_fp16, epsilon = var_24_to_fp16, gamma = sincnet_norm1d_1_weight_to_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
|
| 47 |
+
tensor<fp16, [?, 60, 1773]> input_15_cast_fp16 = leaky_relu(alpha = var_9, x = input_13_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
|
| 48 |
tensor<string, []> input_17_pad_type_0 = const()[name = tensor<string, []>("input_17_pad_type_0"), val = tensor<string, []>("valid")];
|
| 49 |
tensor<int32, [1]> input_17_strides_0 = const()[name = tensor<string, []>("input_17_strides_0"), val = tensor<int32, [1]>([1])];
|
| 50 |
tensor<int32, [2]> input_17_pad_0 = const()[name = tensor<string, []>("input_17_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 51 |
tensor<int32, [1]> input_17_dilations_0 = const()[name = tensor<string, []>("input_17_dilations_0"), val = tensor<int32, [1]>([1])];
|
| 52 |
tensor<int32, []> input_17_groups_0 = const()[name = tensor<string, []>("input_17_groups_0"), val = tensor<int32, []>(1)];
|
| 53 |
+
tensor<fp16, [60, 60, 5]> sincnet_conv1d_2_weight_to_fp16 = const()[name = tensor<string, []>("sincnet_conv1d_2_weight_to_fp16"), val = tensor<fp16, [60, 60, 5]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(89472)))];
|
| 54 |
+
tensor<fp16, [60]> sincnet_conv1d_2_bias_to_fp16 = const()[name = tensor<string, []>("sincnet_conv1d_2_bias_to_fp16"), val = tensor<fp16, [60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(125568)))];
|
| 55 |
+
tensor<fp16, [?, 60, 1769]> input_17_cast_fp16 = conv(bias = sincnet_conv1d_2_bias_to_fp16, dilations = input_17_dilations_0, groups = input_17_groups_0, pad = input_17_pad_0, pad_type = input_17_pad_type_0, strides = input_17_strides_0, weight = sincnet_conv1d_2_weight_to_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")];
|
| 56 |
tensor<int32, [1]> var_151 = const()[name = tensor<string, []>("op_151"), val = tensor<int32, [1]>([3])];
|
| 57 |
tensor<int32, [1]> var_152 = const()[name = tensor<string, []>("op_152"), val = tensor<int32, [1]>([3])];
|
| 58 |
tensor<string, []> input_19_pad_type_0 = const()[name = tensor<string, []>("input_19_pad_type_0"), val = tensor<string, []>("custom")];
|
| 59 |
tensor<int32, [2]> input_19_pad_0 = const()[name = tensor<string, []>("input_19_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
| 60 |
tensor<bool, []> input_19_ceil_mode_0 = const()[name = tensor<string, []>("input_19_ceil_mode_0"), val = tensor<bool, []>(false)];
|
| 61 |
+
tensor<fp16, [?, 60, 589]> input_19_cast_fp16 = max_pool(ceil_mode = input_19_ceil_mode_0, kernel_sizes = var_151, pad = input_19_pad_0, pad_type = input_19_pad_type_0, strides = var_152, x = input_17_cast_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
|
| 62 |
+
tensor<fp16, [60]> sincnet_norm1d_2_weight_to_fp16 = const()[name = tensor<string, []>("sincnet_norm1d_2_weight_to_fp16"), val = tensor<fp16, [60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(125760)))];
|
| 63 |
+
tensor<fp16, [60]> sincnet_norm1d_2_bias_to_fp16 = const()[name = tensor<string, []>("sincnet_norm1d_2_bias_to_fp16"), val = tensor<fp16, [60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(125952)))];
|
| 64 |
+
tensor<fp16, [?, 60, 589]> input_21_cast_fp16 = instance_norm(beta = sincnet_norm1d_2_bias_to_fp16, epsilon = var_24_to_fp16, gamma = sincnet_norm1d_2_weight_to_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("input_21_cast_fp16")];
|
| 65 |
+
tensor<fp16, [?, 60, 589]> x_cast_fp16 = leaky_relu(alpha = var_9, x = input_21_cast_fp16)[name = tensor<string, []>("x_cast_fp16")];
|
| 66 |
tensor<int32, [3]> var_163 = const()[name = tensor<string, []>("op_163"), val = tensor<int32, [3]>([0, 2, 1])];
|
| 67 |
tensor<int32, []> var_172 = const()[name = tensor<string, []>("op_172"), val = tensor<int32, []>(128)];
|
| 68 |
tensor<int32, []> var_173 = const()[name = tensor<string, []>("op_173"), val = tensor<int32, []>(8)];
|
| 69 |
+
tensor<fp16, [?, 589, 60]> input_23_cast_fp16 = transpose(perm = var_163, x = x_cast_fp16)[name = tensor<string, []>("transpose_6")];
|
| 70 |
+
tensor<int32, [3]> var_207_shape_cast_fp16 = shape(x = input_23_cast_fp16)[name = tensor<string, []>("op_207_shape_cast_fp16")];
|
| 71 |
+
tensor<int32, []> gather_0_axis_0 = const()[name = tensor<string, []>("gather_0_axis_0"), val = tensor<int32, []>(0)];
|
| 72 |
tensor<int32, []> gather_0_batch_dims_0 = const()[name = tensor<string, []>("gather_0_batch_dims_0"), val = tensor<int32, []>(0)];
|
| 73 |
tensor<bool, []> gather_0_validate_indices_0 = const()[name = tensor<string, []>("gather_0_validate_indices_0"), val = tensor<bool, []>(false)];
|
| 74 |
+
tensor<string, []> var_207_shape_cast_fp16_to_int16_dtype_0 = const()[name = tensor<string, []>("op_207_shape_cast_fp16_to_int16_dtype_0"), val = tensor<string, []>("int16")];
|
| 75 |
+
tensor<uint16, []> gather_0_indices_0_to_uint16 = const()[name = tensor<string, []>("gather_0_indices_0_to_uint16"), val = tensor<uint16, []>(0)];
|
| 76 |
+
tensor<int16, [3]> var_207_shape_cast_fp16_to_int16 = cast(dtype = var_207_shape_cast_fp16_to_int16_dtype_0, x = var_207_shape_cast_fp16)[name = tensor<string, []>("cast_18")];
|
| 77 |
+
tensor<int16, []> gather_0_cast_uint16 = gather(axis = gather_0_axis_0, batch_dims = gather_0_batch_dims_0, indices = gather_0_indices_0_to_uint16, validate_indices = gather_0_validate_indices_0, x = var_207_shape_cast_fp16_to_int16)[name = tensor<string, []>("gather_0_cast_uint16")];
|
| 78 |
+
tensor<string, []> gather_0_cast_uint16_to_int32_dtype_0 = const()[name = tensor<string, []>("gather_0_cast_uint16_to_int32_dtype_0"), val = tensor<string, []>("int32")];
|
| 79 |
tensor<int32, []> concat_0_axis_0 = const()[name = tensor<string, []>("concat_0_axis_0"), val = tensor<int32, []>(0)];
|
| 80 |
tensor<bool, []> concat_0_interleave_0 = const()[name = tensor<string, []>("concat_0_interleave_0"), val = tensor<bool, []>(false)];
|
| 81 |
+
tensor<int32, []> gather_0_cast_uint16_to_int32 = cast(dtype = gather_0_cast_uint16_to_int32_dtype_0, x = gather_0_cast_uint16)[name = tensor<string, []>("cast_17")];
|
| 82 |
+
tensor<int32, [3]> concat_0 = concat(axis = concat_0_axis_0, interleave = concat_0_interleave_0, values = (var_173, gather_0_cast_uint16_to_int32, var_172))[name = tensor<string, []>("concat_0")];
|
| 83 |
+
tensor<fp16, []> hx_1_value_0_to_fp16 = const()[name = tensor<string, []>("hx_1_value_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
| 84 |
+
tensor<fp16, [8, ?, 128]> hx_1_cast_fp16 = fill(shape = concat_0, value = hx_1_value_0_to_fp16)[name = tensor<string, []>("hx_1_cast_fp16")];
|
| 85 |
tensor<int32, [3]> input_23_batch_first_transpose_perm_0 = const()[name = tensor<string, []>("input_23_batch_first_transpose_perm_0"), val = tensor<int32, [3]>([1, 0, 2])];
|
| 86 |
tensor<int32, []> split_0_num_splits_0 = const()[name = tensor<string, []>("split_0_num_splits_0"), val = tensor<int32, []>(4)];
|
| 87 |
tensor<int32, []> split_0_axis_0 = const()[name = tensor<string, []>("split_0_axis_0"), val = tensor<int32, []>(0)];
|
| 88 |
+
tensor<fp16, [2, ?, 128]> split_0_cast_fp16_0, tensor<fp16, [2, ?, 128]> split_0_cast_fp16_1, tensor<fp16, [2, ?, 128]> split_0_cast_fp16_2, tensor<fp16, [2, ?, 128]> split_0_cast_fp16_3 = split(axis = split_0_axis_0, num_splits = split_0_num_splits_0, x = hx_1_cast_fp16)[name = tensor<string, []>("split_0_cast_fp16")];
|
| 89 |
tensor<int32, []> split_1_num_splits_0 = const()[name = tensor<string, []>("split_1_num_splits_0"), val = tensor<int32, []>(4)];
|
| 90 |
tensor<int32, []> split_1_axis_0 = const()[name = tensor<string, []>("split_1_axis_0"), val = tensor<int32, []>(0)];
|
| 91 |
+
tensor<fp16, [2, ?, 128]> split_1_cast_fp16_0, tensor<fp16, [2, ?, 128]> split_1_cast_fp16_1, tensor<fp16, [2, ?, 128]> split_1_cast_fp16_2, tensor<fp16, [2, ?, 128]> split_1_cast_fp16_3 = split(axis = split_1_axis_0, num_splits = split_1_num_splits_0, x = hx_1_cast_fp16)[name = tensor<string, []>("split_1_cast_fp16")];
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
tensor<int32, [2]> split_10_split_sizes_0 = const()[name = tensor<string, []>("split_10_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 93 |
tensor<int32, []> split_10_axis_0 = const()[name = tensor<string, []>("split_10_axis_0"), val = tensor<int32, []>(0)];
|
| 94 |
+
tensor<fp16, [1, ?, 128]> split_10_cast_fp16_0, tensor<fp16, [1, ?, 128]> split_10_cast_fp16_1 = split(axis = split_10_axis_0, split_sizes = split_10_split_sizes_0, x = split_0_cast_fp16_0)[name = tensor<string, []>("split_10_cast_fp16")];
|
| 95 |
tensor<int32, []> concat_10_axis_0 = const()[name = tensor<string, []>("concat_10_axis_0"), val = tensor<int32, []>(2)];
|
| 96 |
tensor<bool, []> concat_10_interleave_0 = const()[name = tensor<string, []>("concat_10_interleave_0"), val = tensor<bool, []>(false)];
|
| 97 |
+
tensor<fp16, [1, ?, 256]> concat_10_cast_fp16 = concat(axis = concat_10_axis_0, interleave = concat_10_interleave_0, values = (split_10_cast_fp16_0, split_10_cast_fp16_1))[name = tensor<string, []>("concat_10_cast_fp16")];
|
| 98 |
tensor<int32, [1]> input_25_lstm_layer_0_lstm_h0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_lstm_h0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 99 |
+
tensor<fp16, [?, 256]> input_25_lstm_layer_0_lstm_h0_reshaped_cast_fp16 = squeeze(axes = input_25_lstm_layer_0_lstm_h0_reshaped_axes_0, x = concat_10_cast_fp16)[name = tensor<string, []>("input_25_lstm_layer_0_lstm_h0_reshaped_cast_fp16")];
|
| 100 |
tensor<int32, [2]> split_11_split_sizes_0 = const()[name = tensor<string, []>("split_11_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 101 |
tensor<int32, []> split_11_axis_0 = const()[name = tensor<string, []>("split_11_axis_0"), val = tensor<int32, []>(0)];
|
| 102 |
+
tensor<fp16, [1, ?, 128]> split_11_cast_fp16_0, tensor<fp16, [1, ?, 128]> split_11_cast_fp16_1 = split(axis = split_11_axis_0, split_sizes = split_11_split_sizes_0, x = split_1_cast_fp16_0)[name = tensor<string, []>("split_11_cast_fp16")];
|
| 103 |
tensor<int32, []> concat_11_axis_0 = const()[name = tensor<string, []>("concat_11_axis_0"), val = tensor<int32, []>(2)];
|
| 104 |
tensor<bool, []> concat_11_interleave_0 = const()[name = tensor<string, []>("concat_11_interleave_0"), val = tensor<bool, []>(false)];
|
| 105 |
+
tensor<fp16, [1, ?, 256]> concat_11_cast_fp16 = concat(axis = concat_11_axis_0, interleave = concat_11_interleave_0, values = (split_11_cast_fp16_0, split_11_cast_fp16_1))[name = tensor<string, []>("concat_11_cast_fp16")];
|
| 106 |
tensor<int32, [1]> input_25_lstm_layer_0_lstm_c0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_lstm_c0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 107 |
+
tensor<fp16, [?, 256]> input_25_lstm_layer_0_lstm_c0_reshaped_cast_fp16 = squeeze(axes = input_25_lstm_layer_0_lstm_c0_reshaped_axes_0, x = concat_11_cast_fp16)[name = tensor<string, []>("input_25_lstm_layer_0_lstm_c0_reshaped_cast_fp16")];
|
| 108 |
tensor<string, []> input_25_lstm_layer_0_direction_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_direction_0"), val = tensor<string, []>("bidirectional")];
|
| 109 |
tensor<bool, []> input_25_lstm_layer_0_output_sequence_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_output_sequence_0"), val = tensor<bool, []>(true)];
|
| 110 |
tensor<string, []> input_25_lstm_layer_0_recurrent_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
|
| 111 |
tensor<string, []> input_25_lstm_layer_0_cell_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_cell_activation_0"), val = tensor<string, []>("tanh")];
|
| 112 |
tensor<string, []> input_25_lstm_layer_0_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_0_activation_0"), val = tensor<string, []>("tanh")];
|
| 113 |
+
tensor<fp16, [512, 60]> concat_6_to_fp16 = const()[name = tensor<string, []>("concat_6_to_fp16"), val = tensor<fp16, [512, 60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126144)))];
|
| 114 |
+
tensor<fp16, [512, 128]> concat_7_to_fp16 = const()[name = tensor<string, []>("concat_7_to_fp16"), val = tensor<fp16, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(187648)))];
|
| 115 |
+
tensor<fp16, [512]> add_0_to_fp16 = const()[name = tensor<string, []>("add_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(318784)))];
|
| 116 |
+
tensor<fp16, [512, 60]> concat_8_to_fp16 = const()[name = tensor<string, []>("concat_8_to_fp16"), val = tensor<fp16, [512, 60]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(319872)))];
|
| 117 |
+
tensor<fp16, [512, 128]> concat_9_to_fp16 = const()[name = tensor<string, []>("concat_9_to_fp16"), val = tensor<fp16, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(381376)))];
|
| 118 |
+
tensor<fp16, [512]> add_1_to_fp16 = const()[name = tensor<string, []>("add_1_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(512512)))];
|
| 119 |
+
tensor<fp16, [589, ?, 60]> input_23_batch_first_transpose_cast_fp16 = transpose(perm = input_23_batch_first_transpose_perm_0, x = input_23_cast_fp16)[name = tensor<string, []>("transpose_5")];
|
| 120 |
+
tensor<fp16, [589, ?, 256]> input_25_lstm_layer_0_cast_fp16_0, tensor<fp16, [?, 256]> input_25_lstm_layer_0_cast_fp16_1, tensor<fp16, [?, 256]> input_25_lstm_layer_0_cast_fp16_2 = lstm(activation = input_25_lstm_layer_0_activation_0, bias = add_0_to_fp16, bias_back = add_1_to_fp16, cell_activation = input_25_lstm_layer_0_cell_activation_0, direction = input_25_lstm_layer_0_direction_0, initial_c = input_25_lstm_layer_0_lstm_c0_reshaped_cast_fp16, initial_h = input_25_lstm_layer_0_lstm_h0_reshaped_cast_fp16, output_sequence = input_25_lstm_layer_0_output_sequence_0, recurrent_activation = input_25_lstm_layer_0_recurrent_activation_0, weight_hh = concat_7_to_fp16, weight_hh_back = concat_9_to_fp16, weight_ih = concat_6_to_fp16, weight_ih_back = concat_8_to_fp16, x = input_23_batch_first_transpose_cast_fp16)[name = tensor<string, []>("input_25_lstm_layer_0_cast_fp16")];
|
| 121 |
tensor<int32, [2]> split_20_split_sizes_0 = const()[name = tensor<string, []>("split_20_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 122 |
tensor<int32, []> split_20_axis_0 = const()[name = tensor<string, []>("split_20_axis_0"), val = tensor<int32, []>(0)];
|
| 123 |
+
tensor<fp16, [1, ?, 128]> split_20_cast_fp16_0, tensor<fp16, [1, ?, 128]> split_20_cast_fp16_1 = split(axis = split_20_axis_0, split_sizes = split_20_split_sizes_0, x = split_0_cast_fp16_1)[name = tensor<string, []>("split_20_cast_fp16")];
|
| 124 |
tensor<int32, []> concat_20_axis_0 = const()[name = tensor<string, []>("concat_20_axis_0"), val = tensor<int32, []>(2)];
|
| 125 |
tensor<bool, []> concat_20_interleave_0 = const()[name = tensor<string, []>("concat_20_interleave_0"), val = tensor<bool, []>(false)];
|
| 126 |
+
tensor<fp16, [1, ?, 256]> concat_20_cast_fp16 = concat(axis = concat_20_axis_0, interleave = concat_20_interleave_0, values = (split_20_cast_fp16_0, split_20_cast_fp16_1))[name = tensor<string, []>("concat_20_cast_fp16")];
|
| 127 |
tensor<int32, [1]> input_25_lstm_layer_1_lstm_h0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_lstm_h0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 128 |
+
tensor<fp16, [?, 256]> input_25_lstm_layer_1_lstm_h0_reshaped_cast_fp16 = squeeze(axes = input_25_lstm_layer_1_lstm_h0_reshaped_axes_0, x = concat_20_cast_fp16)[name = tensor<string, []>("input_25_lstm_layer_1_lstm_h0_reshaped_cast_fp16")];
|
| 129 |
tensor<int32, [2]> split_21_split_sizes_0 = const()[name = tensor<string, []>("split_21_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 130 |
tensor<int32, []> split_21_axis_0 = const()[name = tensor<string, []>("split_21_axis_0"), val = tensor<int32, []>(0)];
|
| 131 |
+
tensor<fp16, [1, ?, 128]> split_21_cast_fp16_0, tensor<fp16, [1, ?, 128]> split_21_cast_fp16_1 = split(axis = split_21_axis_0, split_sizes = split_21_split_sizes_0, x = split_1_cast_fp16_1)[name = tensor<string, []>("split_21_cast_fp16")];
|
| 132 |
tensor<int32, []> concat_21_axis_0 = const()[name = tensor<string, []>("concat_21_axis_0"), val = tensor<int32, []>(2)];
|
| 133 |
tensor<bool, []> concat_21_interleave_0 = const()[name = tensor<string, []>("concat_21_interleave_0"), val = tensor<bool, []>(false)];
|
| 134 |
+
tensor<fp16, [1, ?, 256]> concat_21_cast_fp16 = concat(axis = concat_21_axis_0, interleave = concat_21_interleave_0, values = (split_21_cast_fp16_0, split_21_cast_fp16_1))[name = tensor<string, []>("concat_21_cast_fp16")];
|
| 135 |
tensor<int32, [1]> input_25_lstm_layer_1_lstm_c0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_lstm_c0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 136 |
+
tensor<fp16, [?, 256]> input_25_lstm_layer_1_lstm_c0_reshaped_cast_fp16 = squeeze(axes = input_25_lstm_layer_1_lstm_c0_reshaped_axes_0, x = concat_21_cast_fp16)[name = tensor<string, []>("input_25_lstm_layer_1_lstm_c0_reshaped_cast_fp16")];
|
| 137 |
tensor<string, []> input_25_lstm_layer_1_direction_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_direction_0"), val = tensor<string, []>("bidirectional")];
|
| 138 |
tensor<bool, []> input_25_lstm_layer_1_output_sequence_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_output_sequence_0"), val = tensor<bool, []>(true)];
|
| 139 |
tensor<string, []> input_25_lstm_layer_1_recurrent_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
|
| 140 |
tensor<string, []> input_25_lstm_layer_1_cell_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_cell_activation_0"), val = tensor<string, []>("tanh")];
|
| 141 |
tensor<string, []> input_25_lstm_layer_1_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_1_activation_0"), val = tensor<string, []>("tanh")];
|
| 142 |
+
tensor<fp16, [512, 256]> concat_16_to_fp16 = const()[name = tensor<string, []>("concat_16_to_fp16"), val = tensor<fp16, [512, 256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(513600)))];
|
| 143 |
+
tensor<fp16, [512, 128]> concat_17_to_fp16 = const()[name = tensor<string, []>("concat_17_to_fp16"), val = tensor<fp16, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(775808)))];
|
| 144 |
+
tensor<fp16, [512]> add_2_to_fp16 = const()[name = tensor<string, []>("add_2_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(906944)))];
|
| 145 |
+
tensor<fp16, [512, 256]> concat_18_to_fp16 = const()[name = tensor<string, []>("concat_18_to_fp16"), val = tensor<fp16, [512, 256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(908032)))];
|
| 146 |
+
tensor<fp16, [512, 128]> concat_19_to_fp16 = const()[name = tensor<string, []>("concat_19_to_fp16"), val = tensor<fp16, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1170240)))];
|
| 147 |
+
tensor<fp16, [512]> add_3_to_fp16 = const()[name = tensor<string, []>("add_3_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1301376)))];
|
| 148 |
+
tensor<fp16, [589, ?, 256]> input_25_lstm_layer_1_cast_fp16_0, tensor<fp16, [?, 256]> input_25_lstm_layer_1_cast_fp16_1, tensor<fp16, [?, 256]> input_25_lstm_layer_1_cast_fp16_2 = lstm(activation = input_25_lstm_layer_1_activation_0, bias = add_2_to_fp16, bias_back = add_3_to_fp16, cell_activation = input_25_lstm_layer_1_cell_activation_0, direction = input_25_lstm_layer_1_direction_0, initial_c = input_25_lstm_layer_1_lstm_c0_reshaped_cast_fp16, initial_h = input_25_lstm_layer_1_lstm_h0_reshaped_cast_fp16, output_sequence = input_25_lstm_layer_1_output_sequence_0, recurrent_activation = input_25_lstm_layer_1_recurrent_activation_0, weight_hh = concat_17_to_fp16, weight_hh_back = concat_19_to_fp16, weight_ih = concat_16_to_fp16, weight_ih_back = concat_18_to_fp16, x = input_25_lstm_layer_0_cast_fp16_0)[name = tensor<string, []>("input_25_lstm_layer_1_cast_fp16")];
|
| 149 |
tensor<int32, [2]> split_30_split_sizes_0 = const()[name = tensor<string, []>("split_30_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 150 |
tensor<int32, []> split_30_axis_0 = const()[name = tensor<string, []>("split_30_axis_0"), val = tensor<int32, []>(0)];
|
| 151 |
+
tensor<fp16, [1, ?, 128]> split_30_cast_fp16_0, tensor<fp16, [1, ?, 128]> split_30_cast_fp16_1 = split(axis = split_30_axis_0, split_sizes = split_30_split_sizes_0, x = split_0_cast_fp16_2)[name = tensor<string, []>("split_30_cast_fp16")];
|
| 152 |
tensor<int32, []> concat_30_axis_0 = const()[name = tensor<string, []>("concat_30_axis_0"), val = tensor<int32, []>(2)];
|
| 153 |
tensor<bool, []> concat_30_interleave_0 = const()[name = tensor<string, []>("concat_30_interleave_0"), val = tensor<bool, []>(false)];
|
| 154 |
+
tensor<fp16, [1, ?, 256]> concat_30_cast_fp16 = concat(axis = concat_30_axis_0, interleave = concat_30_interleave_0, values = (split_30_cast_fp16_0, split_30_cast_fp16_1))[name = tensor<string, []>("concat_30_cast_fp16")];
|
| 155 |
tensor<int32, [1]> input_25_lstm_layer_2_lstm_h0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_lstm_h0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 156 |
+
tensor<fp16, [?, 256]> input_25_lstm_layer_2_lstm_h0_reshaped_cast_fp16 = squeeze(axes = input_25_lstm_layer_2_lstm_h0_reshaped_axes_0, x = concat_30_cast_fp16)[name = tensor<string, []>("input_25_lstm_layer_2_lstm_h0_reshaped_cast_fp16")];
|
| 157 |
tensor<int32, [2]> split_31_split_sizes_0 = const()[name = tensor<string, []>("split_31_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 158 |
tensor<int32, []> split_31_axis_0 = const()[name = tensor<string, []>("split_31_axis_0"), val = tensor<int32, []>(0)];
|
| 159 |
+
tensor<fp16, [1, ?, 128]> split_31_cast_fp16_0, tensor<fp16, [1, ?, 128]> split_31_cast_fp16_1 = split(axis = split_31_axis_0, split_sizes = split_31_split_sizes_0, x = split_1_cast_fp16_2)[name = tensor<string, []>("split_31_cast_fp16")];
|
| 160 |
tensor<int32, []> concat_31_axis_0 = const()[name = tensor<string, []>("concat_31_axis_0"), val = tensor<int32, []>(2)];
|
| 161 |
tensor<bool, []> concat_31_interleave_0 = const()[name = tensor<string, []>("concat_31_interleave_0"), val = tensor<bool, []>(false)];
|
| 162 |
+
tensor<fp16, [1, ?, 256]> concat_31_cast_fp16 = concat(axis = concat_31_axis_0, interleave = concat_31_interleave_0, values = (split_31_cast_fp16_0, split_31_cast_fp16_1))[name = tensor<string, []>("concat_31_cast_fp16")];
|
| 163 |
tensor<int32, [1]> input_25_lstm_layer_2_lstm_c0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_lstm_c0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 164 |
+
tensor<fp16, [?, 256]> input_25_lstm_layer_2_lstm_c0_reshaped_cast_fp16 = squeeze(axes = input_25_lstm_layer_2_lstm_c0_reshaped_axes_0, x = concat_31_cast_fp16)[name = tensor<string, []>("input_25_lstm_layer_2_lstm_c0_reshaped_cast_fp16")];
|
| 165 |
tensor<string, []> input_25_lstm_layer_2_direction_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_direction_0"), val = tensor<string, []>("bidirectional")];
|
| 166 |
tensor<bool, []> input_25_lstm_layer_2_output_sequence_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_output_sequence_0"), val = tensor<bool, []>(true)];
|
| 167 |
tensor<string, []> input_25_lstm_layer_2_recurrent_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
|
| 168 |
tensor<string, []> input_25_lstm_layer_2_cell_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_cell_activation_0"), val = tensor<string, []>("tanh")];
|
| 169 |
tensor<string, []> input_25_lstm_layer_2_activation_0 = const()[name = tensor<string, []>("input_25_lstm_layer_2_activation_0"), val = tensor<string, []>("tanh")];
|
| 170 |
+
tensor<fp16, [512, 256]> concat_26_to_fp16 = const()[name = tensor<string, []>("concat_26_to_fp16"), val = tensor<fp16, [512, 256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1302464)))];
|
| 171 |
+
tensor<fp16, [512, 128]> concat_27_to_fp16 = const()[name = tensor<string, []>("concat_27_to_fp16"), val = tensor<fp16, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1564672)))];
|
| 172 |
+
tensor<fp16, [512]> add_4_to_fp16 = const()[name = tensor<string, []>("add_4_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1695808)))];
|
| 173 |
+
tensor<fp16, [512, 256]> concat_28_to_fp16 = const()[name = tensor<string, []>("concat_28_to_fp16"), val = tensor<fp16, [512, 256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1696896)))];
|
| 174 |
+
tensor<fp16, [512, 128]> concat_29_to_fp16 = const()[name = tensor<string, []>("concat_29_to_fp16"), val = tensor<fp16, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1959104)))];
|
| 175 |
+
tensor<fp16, [512]> add_5_to_fp16 = const()[name = tensor<string, []>("add_5_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2090240)))];
|
| 176 |
+
tensor<fp16, [589, ?, 256]> input_25_lstm_layer_2_cast_fp16_0, tensor<fp16, [?, 256]> input_25_lstm_layer_2_cast_fp16_1, tensor<fp16, [?, 256]> input_25_lstm_layer_2_cast_fp16_2 = lstm(activation = input_25_lstm_layer_2_activation_0, bias = add_4_to_fp16, bias_back = add_5_to_fp16, cell_activation = input_25_lstm_layer_2_cell_activation_0, direction = input_25_lstm_layer_2_direction_0, initial_c = input_25_lstm_layer_2_lstm_c0_reshaped_cast_fp16, initial_h = input_25_lstm_layer_2_lstm_h0_reshaped_cast_fp16, output_sequence = input_25_lstm_layer_2_output_sequence_0, recurrent_activation = input_25_lstm_layer_2_recurrent_activation_0, weight_hh = concat_27_to_fp16, weight_hh_back = concat_29_to_fp16, weight_ih = concat_26_to_fp16, weight_ih_back = concat_28_to_fp16, x = input_25_lstm_layer_1_cast_fp16_0)[name = tensor<string, []>("input_25_lstm_layer_2_cast_fp16")];
|
| 177 |
tensor<int32, [2]> split_40_split_sizes_0 = const()[name = tensor<string, []>("split_40_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 178 |
tensor<int32, []> split_40_axis_0 = const()[name = tensor<string, []>("split_40_axis_0"), val = tensor<int32, []>(0)];
|
| 179 |
+
tensor<fp16, [1, ?, 128]> split_40_cast_fp16_0, tensor<fp16, [1, ?, 128]> split_40_cast_fp16_1 = split(axis = split_40_axis_0, split_sizes = split_40_split_sizes_0, x = split_0_cast_fp16_3)[name = tensor<string, []>("split_40_cast_fp16")];
|
| 180 |
tensor<int32, []> concat_40_axis_0 = const()[name = tensor<string, []>("concat_40_axis_0"), val = tensor<int32, []>(2)];
|
| 181 |
tensor<bool, []> concat_40_interleave_0 = const()[name = tensor<string, []>("concat_40_interleave_0"), val = tensor<bool, []>(false)];
|
| 182 |
+
tensor<fp16, [1, ?, 256]> concat_40_cast_fp16 = concat(axis = concat_40_axis_0, interleave = concat_40_interleave_0, values = (split_40_cast_fp16_0, split_40_cast_fp16_1))[name = tensor<string, []>("concat_40_cast_fp16")];
|
| 183 |
tensor<int32, [1]> input_25_batch_first_lstm_h0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_batch_first_lstm_h0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 184 |
+
tensor<fp16, [?, 256]> input_25_batch_first_lstm_h0_reshaped_cast_fp16 = squeeze(axes = input_25_batch_first_lstm_h0_reshaped_axes_0, x = concat_40_cast_fp16)[name = tensor<string, []>("input_25_batch_first_lstm_h0_reshaped_cast_fp16")];
|
| 185 |
tensor<int32, [2]> split_41_split_sizes_0 = const()[name = tensor<string, []>("split_41_split_sizes_0"), val = tensor<int32, [2]>([1, 1])];
|
| 186 |
tensor<int32, []> split_41_axis_0 = const()[name = tensor<string, []>("split_41_axis_0"), val = tensor<int32, []>(0)];
|
| 187 |
+
tensor<fp16, [1, ?, 128]> split_41_cast_fp16_0, tensor<fp16, [1, ?, 128]> split_41_cast_fp16_1 = split(axis = split_41_axis_0, split_sizes = split_41_split_sizes_0, x = split_1_cast_fp16_3)[name = tensor<string, []>("split_41_cast_fp16")];
|
| 188 |
tensor<int32, []> concat_41_axis_0 = const()[name = tensor<string, []>("concat_41_axis_0"), val = tensor<int32, []>(2)];
|
| 189 |
tensor<bool, []> concat_41_interleave_0 = const()[name = tensor<string, []>("concat_41_interleave_0"), val = tensor<bool, []>(false)];
|
| 190 |
+
tensor<fp16, [1, ?, 256]> concat_41_cast_fp16 = concat(axis = concat_41_axis_0, interleave = concat_41_interleave_0, values = (split_41_cast_fp16_0, split_41_cast_fp16_1))[name = tensor<string, []>("concat_41_cast_fp16")];
|
| 191 |
tensor<int32, [1]> input_25_batch_first_lstm_c0_reshaped_axes_0 = const()[name = tensor<string, []>("input_25_batch_first_lstm_c0_reshaped_axes_0"), val = tensor<int32, [1]>([0])];
|
| 192 |
+
tensor<fp16, [?, 256]> input_25_batch_first_lstm_c0_reshaped_cast_fp16 = squeeze(axes = input_25_batch_first_lstm_c0_reshaped_axes_0, x = concat_41_cast_fp16)[name = tensor<string, []>("input_25_batch_first_lstm_c0_reshaped_cast_fp16")];
|
| 193 |
tensor<string, []> input_25_batch_first_direction_0 = const()[name = tensor<string, []>("input_25_batch_first_direction_0"), val = tensor<string, []>("bidirectional")];
|
| 194 |
tensor<bool, []> input_25_batch_first_output_sequence_0 = const()[name = tensor<string, []>("input_25_batch_first_output_sequence_0"), val = tensor<bool, []>(true)];
|
| 195 |
tensor<string, []> input_25_batch_first_recurrent_activation_0 = const()[name = tensor<string, []>("input_25_batch_first_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
|
| 196 |
tensor<string, []> input_25_batch_first_cell_activation_0 = const()[name = tensor<string, []>("input_25_batch_first_cell_activation_0"), val = tensor<string, []>("tanh")];
|
| 197 |
tensor<string, []> input_25_batch_first_activation_0 = const()[name = tensor<string, []>("input_25_batch_first_activation_0"), val = tensor<string, []>("tanh")];
|
| 198 |
+
tensor<fp16, [512, 256]> concat_36_to_fp16 = const()[name = tensor<string, []>("concat_36_to_fp16"), val = tensor<fp16, [512, 256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2091328)))];
|
| 199 |
+
tensor<fp16, [512, 128]> concat_37_to_fp16 = const()[name = tensor<string, []>("concat_37_to_fp16"), val = tensor<fp16, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2353536)))];
|
| 200 |
+
tensor<fp16, [512]> add_6_to_fp16 = const()[name = tensor<string, []>("add_6_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2484672)))];
|
| 201 |
+
tensor<fp16, [512, 256]> concat_38_to_fp16 = const()[name = tensor<string, []>("concat_38_to_fp16"), val = tensor<fp16, [512, 256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2485760)))];
|
| 202 |
+
tensor<fp16, [512, 128]> concat_39_to_fp16 = const()[name = tensor<string, []>("concat_39_to_fp16"), val = tensor<fp16, [512, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2747968)))];
|
| 203 |
+
tensor<fp16, [512]> add_7_to_fp16 = const()[name = tensor<string, []>("add_7_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2879104)))];
|
| 204 |
+
tensor<fp16, [589, ?, 256]> input_25_batch_first_cast_fp16_0, tensor<fp16, [?, 256]> input_25_batch_first_cast_fp16_1, tensor<fp16, [?, 256]> input_25_batch_first_cast_fp16_2 = lstm(activation = input_25_batch_first_activation_0, bias = add_6_to_fp16, bias_back = add_7_to_fp16, cell_activation = input_25_batch_first_cell_activation_0, direction = input_25_batch_first_direction_0, initial_c = input_25_batch_first_lstm_c0_reshaped_cast_fp16, initial_h = input_25_batch_first_lstm_h0_reshaped_cast_fp16, output_sequence = input_25_batch_first_output_sequence_0, recurrent_activation = input_25_batch_first_recurrent_activation_0, weight_hh = concat_37_to_fp16, weight_hh_back = concat_39_to_fp16, weight_ih = concat_36_to_fp16, weight_ih_back = concat_38_to_fp16, x = input_25_lstm_layer_2_cast_fp16_0)[name = tensor<string, []>("input_25_batch_first_cast_fp16")];
|
| 205 |
tensor<int32, [3]> input_25_perm_0 = const()[name = tensor<string, []>("input_25_perm_0"), val = tensor<int32, [3]>([1, 0, 2])];
|
| 206 |
+
tensor<fp16, [128, 256]> linear_0_weight_to_fp16 = const()[name = tensor<string, []>("linear_0_weight_to_fp16"), val = tensor<fp16, [128, 256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2880192)))];
|
| 207 |
+
tensor<fp16, [128]> linear_0_bias_to_fp16 = const()[name = tensor<string, []>("linear_0_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2945792)))];
|
| 208 |
+
tensor<fp16, [?, 589, 256]> input_25_cast_fp16 = transpose(perm = input_25_perm_0, x = input_25_batch_first_cast_fp16_0)[name = tensor<string, []>("transpose_4")];
|
| 209 |
+
tensor<fp16, [?, 589, 128]> linear_0_cast_fp16 = linear(bias = linear_0_bias_to_fp16, weight = linear_0_weight_to_fp16, x = input_25_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
|
| 210 |
tensor<fp32, []> var_220 = const()[name = tensor<string, []>("op_220"), val = tensor<fp32, []>(0x1.47ae14p-7)];
|
| 211 |
+
tensor<fp16, [?, 589, 128]> input_29_cast_fp16 = leaky_relu(alpha = var_220, x = linear_0_cast_fp16)[name = tensor<string, []>("input_29_cast_fp16")];
|
| 212 |
+
tensor<fp16, [128, 128]> linear_1_weight_to_fp16 = const()[name = tensor<string, []>("linear_1_weight_to_fp16"), val = tensor<fp16, [128, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2946112)))];
|
| 213 |
+
tensor<fp16, [128]> linear_1_bias_to_fp16 = const()[name = tensor<string, []>("linear_1_bias_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2978944)))];
|
| 214 |
+
tensor<fp16, [?, 589, 128]> linear_1_cast_fp16 = linear(bias = linear_1_bias_to_fp16, weight = linear_1_weight_to_fp16, x = input_29_cast_fp16)[name = tensor<string, []>("linear_1_cast_fp16")];
|
| 215 |
tensor<fp32, []> var_225 = const()[name = tensor<string, []>("op_225"), val = tensor<fp32, []>(0x1.47ae14p-7)];
|
| 216 |
+
tensor<fp16, [?, 589, 128]> input_33_cast_fp16 = leaky_relu(alpha = var_225, x = linear_1_cast_fp16)[name = tensor<string, []>("input_33_cast_fp16")];
|
| 217 |
+
tensor<fp16, [7, 128]> classifier_weight_to_fp16 = const()[name = tensor<string, []>("classifier_weight_to_fp16"), val = tensor<fp16, [7, 128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2979264)))];
|
| 218 |
+
tensor<fp16, [7]> classifier_bias_to_fp16 = const()[name = tensor<string, []>("classifier_bias_to_fp16"), val = tensor<fp16, [7]>([-0x1.01p+0, 0x1.67cp-2, 0x1.3d8p-1, 0x1.c8cp-2, -0x1.444p-2, -0x1.59p-1, -0x1.8fcp-2])];
|
| 219 |
+
tensor<fp16, [?, 589, 7]> linear_2_cast_fp16 = linear(bias = classifier_bias_to_fp16, weight = classifier_weight_to_fp16, x = input_33_cast_fp16)[name = tensor<string, []>("linear_2_cast_fp16")];
|
| 220 |
+
tensor<string, []> linear_2_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("linear_2_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
|
| 221 |
tensor<int32, []> var_231 = const()[name = tensor<string, []>("op_231"), val = tensor<int32, []>(-1)];
|
| 222 |
+
tensor<fp32, [?, 589, 7]> linear_2_cast_fp16_to_fp32 = cast(dtype = linear_2_cast_fp16_to_fp32_dtype_0, x = linear_2_cast_fp16)[name = tensor<string, []>("cast_16")];
|
| 223 |
+
tensor<fp32, [?, 589, 7]> var_232_softmax = softmax(axis = var_231, x = linear_2_cast_fp16_to_fp32)[name = tensor<string, []>("op_232_softmax")];
|
| 224 |
tensor<fp32, []> var_232_epsilon_0 = const()[name = tensor<string, []>("op_232_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
|
| 225 |
tensor<fp32, [?, 589, 7]> log_probs = log(epsilon = var_232_epsilon_0, x = var_232_softmax)[name = tensor<string, []>("op_232")];
|
| 226 |
} -> (log_probs);
|
Segmentation.mlmodelc/weights/weight.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0026d3483c74bc989fdd1649c5765ca5395235a6d140a698a2d87b95cddf56ae
|
| 3 |
+
size 2981120
|