program(1.0) [buildInfo = dict, tensor>({{"coremlc-component-MIL", "3405.2.1"}, {"coremlc-version", "3405.2.1"}, {"coremltools-component-torch", "2.5.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.3.0"}})] { func main(tensor c_in, tensor h_in, tensor target_lengths, tensor targets) [FlexibleShapeInformation = tuple, dict, tensor>>, tuple, dict, list, ?>>>>((("DefaultShapes", {{"targets", [1, 1]}}), ("RangeDims", {{"targets", [[1, 1], [1, 1000]]}})))] { tensor input_axis_0 = const()[name = tensor("input_axis_0"), val = tensor(0)]; tensor embed_weight_to_fp16 = const()[name = tensor("embed_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; tensor input_cast_fp16 = gather(axis = input_axis_0, indices = targets, x = embed_weight_to_fp16)[name = tensor("input_cast_fp16")]; tensor input_cast_fp16_to_fp32_dtype_0 = const()[name = tensor("input_cast_fp16_to_fp32_dtype_0"), val = tensor("fp32")]; tensor split_0_num_splits_0 = const()[name = tensor("split_0_num_splits_0"), val = tensor(2)]; tensor split_0_axis_0 = const()[name = tensor("split_0_axis_0"), val = tensor(0)]; tensor h_in_to_fp16_dtype_0 = const()[name = tensor("h_in_to_fp16_dtype_0"), val = tensor("fp16")]; tensor h_in_to_fp16 = cast(dtype = h_in_to_fp16_dtype_0, x = h_in)[name = tensor("cast_12")]; tensor split_0_cast_fp16_0, tensor split_0_cast_fp16_1 = split(axis = split_0_axis_0, num_splits = split_0_num_splits_0, x = h_in_to_fp16)[name = tensor("split_0_cast_fp16")]; tensor split_1_num_splits_0 = const()[name = tensor("split_1_num_splits_0"), val = tensor(2)]; tensor split_1_axis_0 = const()[name = tensor("split_1_axis_0"), val = tensor(0)]; tensor c_in_to_fp16_dtype_0 = const()[name = tensor("c_in_to_fp16_dtype_0"), val = tensor("fp16")]; tensor c_in_to_fp16 = cast(dtype = c_in_to_fp16_dtype_0, x = c_in)[name = tensor("cast_11")]; tensor split_1_cast_fp16_0, tensor split_1_cast_fp16_1 = split(axis = split_1_axis_0, num_splits = split_1_num_splits_0, x = c_in_to_fp16)[name = tensor("split_1_cast_fp16")]; tensor concat_0 = const()[name = tensor("concat_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(10487168)))]; tensor concat_1 = const()[name = tensor("concat_1"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(10497472)))]; tensor concat_2 = const()[name = tensor("concat_2"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(17051136)))]; tensor var_25_lstm_layer_0_lstm_h0_squeeze_axes_0 = const()[name = tensor("op_25_lstm_layer_0_lstm_h0_squeeze_axes_0"), val = tensor([0])]; tensor var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16 = squeeze(axes = var_25_lstm_layer_0_lstm_h0_squeeze_axes_0, x = split_0_cast_fp16_0)[name = tensor("op_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16")]; tensor var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor("op_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor("fp32")]; tensor var_25_lstm_layer_0_lstm_c0_squeeze_axes_0 = const()[name = tensor("op_25_lstm_layer_0_lstm_c0_squeeze_axes_0"), val = tensor([0])]; tensor var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16 = squeeze(axes = var_25_lstm_layer_0_lstm_c0_squeeze_axes_0, x = split_1_cast_fp16_0)[name = tensor("op_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16")]; tensor var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor("op_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor("fp32")]; tensor var_25_lstm_layer_0_direction_0 = const()[name = tensor("op_25_lstm_layer_0_direction_0"), val = tensor("forward")]; tensor var_25_lstm_layer_0_output_sequence_0 = const()[name = tensor("op_25_lstm_layer_0_output_sequence_0"), val = tensor(true)]; tensor var_25_lstm_layer_0_recurrent_activation_0 = const()[name = tensor("op_25_lstm_layer_0_recurrent_activation_0"), val = tensor("sigmoid")]; tensor var_25_lstm_layer_0_cell_activation_0 = const()[name = tensor("op_25_lstm_layer_0_cell_activation_0"), val = tensor("tanh")]; tensor var_25_lstm_layer_0_activation_0 = const()[name = tensor("op_25_lstm_layer_0_activation_0"), val = tensor("tanh")]; tensor var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16_to_fp32 = cast(dtype = var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0, x = var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16)[name = tensor("cast_9")]; tensor var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16_to_fp32 = cast(dtype = var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0, x = var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16)[name = tensor("cast_10")]; tensor input_cast_fp16_to_fp32 = cast(dtype = input_cast_fp16_to_fp32_dtype_0, x = input_cast_fp16)[name = tensor("cast_13")]; tensor var_25_lstm_layer_0_0, tensor var_25_lstm_layer_0_1, tensor var_25_lstm_layer_0_2 = lstm(activation = var_25_lstm_layer_0_activation_0, bias = concat_0, cell_activation = var_25_lstm_layer_0_cell_activation_0, direction = var_25_lstm_layer_0_direction_0, initial_c = var_25_lstm_layer_0_lstm_c0_squeeze_cast_fp16_to_fp32, initial_h = var_25_lstm_layer_0_lstm_h0_squeeze_cast_fp16_to_fp32, output_sequence = var_25_lstm_layer_0_output_sequence_0, recurrent_activation = var_25_lstm_layer_0_recurrent_activation_0, weight_hh = concat_2, weight_ih = concat_1, x = input_cast_fp16_to_fp32)[name = tensor("op_25_lstm_layer_0")]; tensor concat_3 = const()[name = tensor("concat_3"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23604800)))]; tensor concat_4 = const()[name = tensor("concat_4"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23615104)))]; tensor concat_5 = const()[name = tensor("concat_5"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30168768)))]; tensor var_25_lstm_h0_squeeze_axes_0 = const()[name = tensor("op_25_lstm_h0_squeeze_axes_0"), val = tensor([0])]; tensor var_25_lstm_h0_squeeze_cast_fp16 = squeeze(axes = var_25_lstm_h0_squeeze_axes_0, x = split_0_cast_fp16_1)[name = tensor("op_25_lstm_h0_squeeze_cast_fp16")]; tensor var_25_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor("op_25_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor("fp32")]; tensor var_25_lstm_c0_squeeze_axes_0 = const()[name = tensor("op_25_lstm_c0_squeeze_axes_0"), val = tensor([0])]; tensor var_25_lstm_c0_squeeze_cast_fp16 = squeeze(axes = var_25_lstm_c0_squeeze_axes_0, x = split_1_cast_fp16_1)[name = tensor("op_25_lstm_c0_squeeze_cast_fp16")]; tensor var_25_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0 = const()[name = tensor("op_25_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0"), val = tensor("fp32")]; tensor var_25_direction_0 = const()[name = tensor("op_25_direction_0"), val = tensor("forward")]; tensor var_25_output_sequence_0 = const()[name = tensor("op_25_output_sequence_0"), val = tensor(true)]; tensor var_25_recurrent_activation_0 = const()[name = tensor("op_25_recurrent_activation_0"), val = tensor("sigmoid")]; tensor var_25_cell_activation_0 = const()[name = tensor("op_25_cell_activation_0"), val = tensor("tanh")]; tensor var_25_activation_0 = const()[name = tensor("op_25_activation_0"), val = tensor("tanh")]; tensor var_25_lstm_c0_squeeze_cast_fp16_to_fp32 = cast(dtype = var_25_lstm_c0_squeeze_cast_fp16_to_fp32_dtype_0, x = var_25_lstm_c0_squeeze_cast_fp16)[name = tensor("cast_7")]; tensor var_25_lstm_h0_squeeze_cast_fp16_to_fp32 = cast(dtype = var_25_lstm_h0_squeeze_cast_fp16_to_fp32_dtype_0, x = var_25_lstm_h0_squeeze_cast_fp16)[name = tensor("cast_8")]; tensor decoder_output, tensor var_25_1, tensor var_25_2 = lstm(activation = var_25_activation_0, bias = concat_3, cell_activation = var_25_cell_activation_0, direction = var_25_direction_0, initial_c = var_25_lstm_c0_squeeze_cast_fp16_to_fp32, initial_h = var_25_lstm_h0_squeeze_cast_fp16_to_fp32, output_sequence = var_25_output_sequence_0, recurrent_activation = var_25_recurrent_activation_0, weight_hh = concat_5, weight_ih = concat_4, x = var_25_lstm_layer_0_0)[name = tensor("op_25")]; tensor var_26_axis_0 = const()[name = tensor("op_26_axis_0"), val = tensor(0)]; tensor var_25_lstm_layer_0_1_to_fp16_dtype_0 = const()[name = tensor("op_25_lstm_layer_0_1_to_fp16_dtype_0"), val = tensor("fp16")]; tensor var_25_1_to_fp16_dtype_0 = const()[name = tensor("op_25_1_to_fp16_dtype_0"), val = tensor("fp16")]; tensor var_25_1_to_fp16 = cast(dtype = var_25_1_to_fp16_dtype_0, x = var_25_1)[name = tensor("cast_5")]; tensor var_25_lstm_layer_0_1_to_fp16 = cast(dtype = var_25_lstm_layer_0_1_to_fp16_dtype_0, x = var_25_lstm_layer_0_1)[name = tensor("cast_6")]; tensor var_26_cast_fp16 = stack(axis = var_26_axis_0, values = (var_25_lstm_layer_0_1_to_fp16, var_25_1_to_fp16))[name = tensor("op_26_cast_fp16")]; tensor var_26_cast_fp16_to_fp32_dtype_0 = const()[name = tensor("op_26_cast_fp16_to_fp32_dtype_0"), val = tensor("fp32")]; tensor var_27_axis_0 = const()[name = tensor("op_27_axis_0"), val = tensor(0)]; tensor var_25_lstm_layer_0_2_to_fp16_dtype_0 = const()[name = tensor("op_25_lstm_layer_0_2_to_fp16_dtype_0"), val = tensor("fp16")]; tensor var_25_2_to_fp16_dtype_0 = const()[name = tensor("op_25_2_to_fp16_dtype_0"), val = tensor("fp16")]; tensor var_25_2_to_fp16 = cast(dtype = var_25_2_to_fp16_dtype_0, x = var_25_2)[name = tensor("cast_2")]; tensor var_25_lstm_layer_0_2_to_fp16 = cast(dtype = var_25_lstm_layer_0_2_to_fp16_dtype_0, x = var_25_lstm_layer_0_2)[name = tensor("cast_3")]; tensor var_27_cast_fp16 = stack(axis = var_27_axis_0, values = (var_25_lstm_layer_0_2_to_fp16, var_25_2_to_fp16))[name = tensor("op_27_cast_fp16")]; tensor var_27_cast_fp16_to_fp32_dtype_0 = const()[name = tensor("op_27_cast_fp16_to_fp32_dtype_0"), val = tensor("fp32")]; tensor c_out = cast(dtype = var_27_cast_fp16_to_fp32_dtype_0, x = var_27_cast_fp16)[name = tensor("cast_1")]; tensor h_out = cast(dtype = var_26_cast_fp16_to_fp32_dtype_0, x = var_26_cast_fp16)[name = tensor("cast_4")]; tensor target_lengths_tmp = identity(x = target_lengths)[name = tensor("target_lengths_tmp")]; } -> (decoder_output, h_out, c_out); }