diff --git "a/openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil" "b/openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil" --- "a/openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil" +++ "b/openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil" @@ -1,5 +1,5 @@ program(1.0) -[buildInfo = dict, tensor>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0b2"}})] +[buildInfo = dict, tensor>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})] { func main(tensor cache_length, tensor decoder_key_padding_mask, tensor encoder_output_embeds, tensor input_ids, tensor key_cache, tensor kv_cache_update_mask, tensor value_cache) { tensor var_24_axis_0 = const()[name = tensor("op_24_axis_0"), val = tensor(0)]; @@ -22,7 +22,6 @@ program(1.0) tensor var_54_axis_0 = const()[name = tensor("op_54_axis_0"), val = tensor(1)]; tensor var_54_cast_fp16_0, tensor var_54_cast_fp16_1, tensor var_54_cast_fp16_2, tensor var_54_cast_fp16_3 = split(axis = var_54_axis_0, split_sizes = tile_1, x = value_cache)[name = tensor("op_54_cast_fp16")]; tensor var_64 = const()[name = tensor("op_64"), val = tensor(3)]; - tensor var_71 = const()[name = tensor("op_71"), val = tensor(1)]; tensor out_1_axes_0 = const()[name = tensor("out_1_axes_0"), val = tensor([1])]; tensor var_90_to_fp16 = const()[name = tensor("op_90_to_fp16"), val = tensor(0x1.5p-17)]; tensor out_1_cast_fp16 = layer_norm(axes = out_1_axes_0, epsilon = var_90_to_fp16, x = inputs_1_cast_fp16)[name = tensor("out_1_cast_fp16")]; @@ -32,26 +31,29 @@ program(1.0) tensor obj_1_beta_0_to_fp16 = const()[name = tensor("obj_1_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40178304)))]; tensor obj_1_epsilon_0_to_fp16 = const()[name = tensor("obj_1_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; tensor obj_1_cast_fp16 = batch_norm(beta = obj_1_beta_0_to_fp16, epsilon = obj_1_epsilon_0_to_fp16, gamma = obj_1_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_1_cast_fp16)[name = tensor("obj_1_cast_fp16")]; - tensor var_106 = const()[name = tensor("op_106"), val = tensor([1, 1])]; - tensor var_108 = const()[name = tensor("op_108"), val = tensor([1, 1])]; - tensor query_1_pad_type_0 = const()[name = tensor("query_1_pad_type_0"), val = tensor("custom")]; + tensor query_1_pad_type_0 = const()[name = tensor("query_1_pad_type_0"), val = tensor("valid")]; + tensor query_1_strides_0 = const()[name = tensor("query_1_strides_0"), val = tensor([1, 1])]; tensor query_1_pad_0 = const()[name = tensor("query_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor query_1_dilations_0 = const()[name = tensor("query_1_dilations_0"), val = tensor([1, 1])]; + tensor query_1_groups_0 = const()[name = tensor("query_1_groups_0"), val = tensor(1)]; tensor layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40179136)))]; tensor layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40474112)))]; - tensor query_1_cast_fp16 = conv(bias = layers_0_self_attn_q_proj_bias_to_fp16, dilations = var_108, groups = var_71, pad = query_1_pad_0, pad_type = query_1_pad_type_0, strides = var_106, weight = layers_0_self_attn_q_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor("query_1_cast_fp16")]; - tensor var_112 = const()[name = tensor("op_112"), val = tensor([1, 1])]; - tensor var_114 = const()[name = tensor("op_114"), val = tensor([1, 1])]; - tensor current_key_1_pad_type_0 = const()[name = tensor("current_key_1_pad_type_0"), val = tensor("custom")]; + tensor query_1_cast_fp16 = conv(bias = layers_0_self_attn_q_proj_bias_to_fp16, dilations = query_1_dilations_0, groups = query_1_groups_0, pad = query_1_pad_0, pad_type = query_1_pad_type_0, strides = query_1_strides_0, weight = layers_0_self_attn_q_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor("query_1_cast_fp16")]; + tensor current_key_1_pad_type_0 = const()[name = tensor("current_key_1_pad_type_0"), val = tensor("valid")]; + tensor current_key_1_strides_0 = const()[name = tensor("current_key_1_strides_0"), val = tensor([1, 1])]; tensor current_key_1_pad_0 = const()[name = tensor("current_key_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor current_key_1_dilations_0 = const()[name = tensor("current_key_1_dilations_0"), val = tensor([1, 1])]; + tensor current_key_1_groups_0 = const()[name = tensor("current_key_1_groups_0"), val = tensor(1)]; tensor layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40474944)))]; - tensor current_key_1_cast_fp16 = conv(dilations = var_114, groups = var_71, pad = current_key_1_pad_0, pad_type = current_key_1_pad_type_0, strides = var_112, weight = layers_0_self_attn_k_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor("current_key_1_cast_fp16")]; - tensor var_119 = const()[name = tensor("op_119"), val = tensor([1, 1])]; - tensor var_121 = const()[name = tensor("op_121"), val = tensor([1, 1])]; - tensor current_value_1_pad_type_0 = const()[name = tensor("current_value_1_pad_type_0"), val = tensor("custom")]; + tensor current_key_1_cast_fp16 = conv(dilations = current_key_1_dilations_0, groups = current_key_1_groups_0, pad = current_key_1_pad_0, pad_type = current_key_1_pad_type_0, strides = current_key_1_strides_0, weight = layers_0_self_attn_k_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor("current_key_1_cast_fp16")]; + tensor current_value_1_pad_type_0 = const()[name = tensor("current_value_1_pad_type_0"), val = tensor("valid")]; + tensor current_value_1_strides_0 = const()[name = tensor("current_value_1_strides_0"), val = tensor([1, 1])]; tensor current_value_1_pad_0 = const()[name = tensor("current_value_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor current_value_1_dilations_0 = const()[name = tensor("current_value_1_dilations_0"), val = tensor([1, 1])]; + tensor current_value_1_groups_0 = const()[name = tensor("current_value_1_groups_0"), val = tensor(1)]; tensor layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40769920)))]; tensor layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41064896)))]; - tensor current_value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations = var_121, groups = var_71, pad = current_value_1_pad_0, pad_type = current_value_1_pad_type_0, strides = var_119, weight = layers_0_self_attn_v_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor("current_value_1_cast_fp16")]; + tensor current_value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations = current_value_1_dilations_0, groups = current_value_1_groups_0, pad = current_value_1_pad_0, pad_type = current_value_1_pad_type_0, strides = current_value_1_strides_0, weight = layers_0_self_attn_v_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor("current_value_1_cast_fp16")]; tensor var_125_axes_0 = const()[name = tensor("op_125_axes_0"), val = tensor([1])]; tensor var_125_cast_fp16 = expand_dims(axes = var_125_axes_0, x = kv_cache_update_mask)[name = tensor("op_125_cast_fp16")]; tensor var_126_axes_0 = const()[name = tensor("op_126_axes_0"), val = tensor([2])]; @@ -86,13 +88,14 @@ program(1.0) tensor attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_152_cast_fp16, y = var_150_cast_fp16)[name = tensor("attn_1_cast_fp16")]; tensor var_155 = const()[name = tensor("op_155"), val = tensor([1, 384, 1, -1])]; tensor input_1_cast_fp16 = reshape(shape = var_155, x = attn_1_cast_fp16)[name = tensor("input_1_cast_fp16")]; - tensor var_159 = const()[name = tensor("op_159"), val = tensor([1, 1])]; - tensor var_161 = const()[name = tensor("op_161"), val = tensor([1, 1])]; - tensor obj_7_pad_type_0 = const()[name = tensor("obj_7_pad_type_0"), val = tensor("custom")]; + tensor obj_7_pad_type_0 = const()[name = tensor("obj_7_pad_type_0"), val = tensor("valid")]; + tensor obj_7_strides_0 = const()[name = tensor("obj_7_strides_0"), val = tensor([1, 1])]; tensor obj_7_pad_0 = const()[name = tensor("obj_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor obj_7_dilations_0 = const()[name = tensor("obj_7_dilations_0"), val = tensor([1, 1])]; + tensor obj_7_groups_0 = const()[name = tensor("obj_7_groups_0"), val = tensor(1)]; tensor layers_0_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_0_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41065728)))]; tensor layers_0_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_0_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41360704)))]; - tensor obj_7_cast_fp16 = conv(bias = layers_0_self_attn_o_proj_bias_to_fp16, dilations = var_161, groups = var_71, pad = obj_7_pad_0, pad_type = obj_7_pad_type_0, strides = var_159, weight = layers_0_self_attn_o_proj_weight_to_fp16, x = input_1_cast_fp16)[name = tensor("obj_7_cast_fp16")]; + tensor obj_7_cast_fp16 = conv(bias = layers_0_self_attn_o_proj_bias_to_fp16, dilations = obj_7_dilations_0, groups = obj_7_groups_0, pad = obj_7_pad_0, pad_type = obj_7_pad_type_0, strides = obj_7_strides_0, weight = layers_0_self_attn_o_proj_weight_to_fp16, x = input_1_cast_fp16)[name = tensor("obj_7_cast_fp16")]; tensor inputs_3_cast_fp16 = add(x = inputs_1_cast_fp16, y = obj_7_cast_fp16)[name = tensor("inputs_3_cast_fp16")]; tensor out_3_axes_0 = const()[name = tensor("out_3_axes_0"), val = tensor([1])]; tensor var_177_to_fp16 = const()[name = tensor("op_177_to_fp16"), val = tensor(0x1.5p-17)]; @@ -101,26 +104,29 @@ program(1.0) tensor obj_9_beta_0_to_fp16 = const()[name = tensor("obj_9_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41362368)))]; tensor obj_9_epsilon_0_to_fp16 = const()[name = tensor("obj_9_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; tensor obj_9_cast_fp16 = batch_norm(beta = obj_9_beta_0_to_fp16, epsilon = obj_9_epsilon_0_to_fp16, gamma = obj_9_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_3_cast_fp16)[name = tensor("obj_9_cast_fp16")]; - tensor var_193 = const()[name = tensor("op_193"), val = tensor([1, 1])]; - tensor var_195 = const()[name = tensor("op_195"), val = tensor([1, 1])]; - tensor query_3_pad_type_0 = const()[name = tensor("query_3_pad_type_0"), val = tensor("custom")]; + tensor query_3_pad_type_0 = const()[name = tensor("query_3_pad_type_0"), val = tensor("valid")]; + tensor query_3_strides_0 = const()[name = tensor("query_3_strides_0"), val = tensor([1, 1])]; tensor query_3_pad_0 = const()[name = tensor("query_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor query_3_dilations_0 = const()[name = tensor("query_3_dilations_0"), val = tensor([1, 1])]; + tensor query_3_groups_0 = const()[name = tensor("query_3_groups_0"), val = tensor(1)]; tensor layers_0_encoder_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_0_encoder_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41363200)))]; tensor layers_0_encoder_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_0_encoder_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41658176)))]; - tensor query_3_cast_fp16 = conv(bias = layers_0_encoder_attn_q_proj_bias_to_fp16, dilations = var_195, groups = var_71, pad = query_3_pad_0, pad_type = query_3_pad_type_0, strides = var_193, weight = layers_0_encoder_attn_q_proj_weight_to_fp16, x = obj_9_cast_fp16)[name = tensor("query_3_cast_fp16")]; - tensor var_199 = const()[name = tensor("op_199"), val = tensor([1, 1])]; - tensor var_201 = const()[name = tensor("op_201"), val = tensor([1, 1])]; - tensor key_3_pad_type_0 = const()[name = tensor("key_3_pad_type_0"), val = tensor("custom")]; + tensor query_3_cast_fp16 = conv(bias = layers_0_encoder_attn_q_proj_bias_to_fp16, dilations = query_3_dilations_0, groups = query_3_groups_0, pad = query_3_pad_0, pad_type = query_3_pad_type_0, strides = query_3_strides_0, weight = layers_0_encoder_attn_q_proj_weight_to_fp16, x = obj_9_cast_fp16)[name = tensor("query_3_cast_fp16")]; + tensor key_3_pad_type_0 = const()[name = tensor("key_3_pad_type_0"), val = tensor("valid")]; + tensor key_3_strides_0 = const()[name = tensor("key_3_strides_0"), val = tensor([1, 1])]; tensor key_3_pad_0 = const()[name = tensor("key_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor key_3_dilations_0 = const()[name = tensor("key_3_dilations_0"), val = tensor([1, 1])]; + tensor key_3_groups_0 = const()[name = tensor("key_3_groups_0"), val = tensor(1)]; tensor layers_0_encoder_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_0_encoder_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41659008)))]; - tensor key_3_cast_fp16 = conv(dilations = var_201, groups = var_71, pad = key_3_pad_0, pad_type = key_3_pad_type_0, strides = var_199, weight = layers_0_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("key_3_cast_fp16")]; - tensor var_206 = const()[name = tensor("op_206"), val = tensor([1, 1])]; - tensor var_208 = const()[name = tensor("op_208"), val = tensor([1, 1])]; - tensor value_3_pad_type_0 = const()[name = tensor("value_3_pad_type_0"), val = tensor("custom")]; + tensor key_3_cast_fp16 = conv(dilations = key_3_dilations_0, groups = key_3_groups_0, pad = key_3_pad_0, pad_type = key_3_pad_type_0, strides = key_3_strides_0, weight = layers_0_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("key_3_cast_fp16")]; + tensor value_3_pad_type_0 = const()[name = tensor("value_3_pad_type_0"), val = tensor("valid")]; + tensor value_3_strides_0 = const()[name = tensor("value_3_strides_0"), val = tensor([1, 1])]; tensor value_3_pad_0 = const()[name = tensor("value_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor value_3_dilations_0 = const()[name = tensor("value_3_dilations_0"), val = tensor([1, 1])]; + tensor value_3_groups_0 = const()[name = tensor("value_3_groups_0"), val = tensor(1)]; tensor layers_0_encoder_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_0_encoder_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41953984)))]; tensor layers_0_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_0_encoder_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42248960)))]; - tensor value_3_cast_fp16 = conv(bias = layers_0_encoder_attn_v_proj_bias_to_fp16, dilations = var_208, groups = var_71, pad = value_3_pad_0, pad_type = value_3_pad_type_0, strides = var_206, weight = layers_0_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("value_3_cast_fp16")]; + tensor value_3_cast_fp16 = conv(bias = layers_0_encoder_attn_v_proj_bias_to_fp16, dilations = value_3_dilations_0, groups = value_3_groups_0, pad = value_3_pad_0, pad_type = value_3_pad_type_0, strides = value_3_strides_0, weight = layers_0_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("value_3_cast_fp16")]; tensor var_212 = const()[name = tensor("op_212"), val = tensor([1, 6, 64, -1])]; tensor mh_q_3_cast_fp16 = reshape(shape = var_212, x = query_3_cast_fp16)[name = tensor("mh_q_3_cast_fp16")]; tensor var_214_to_fp16 = const()[name = tensor("op_214_to_fp16"), val = tensor(0x1p-3)]; @@ -138,13 +144,14 @@ program(1.0) tensor attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_222_cast_fp16, y = obj_13_cast_fp16)[name = tensor("attn_3_cast_fp16")]; tensor var_225 = const()[name = tensor("op_225"), val = tensor([1, 384, 1, -1])]; tensor input_3_cast_fp16 = reshape(shape = var_225, x = attn_3_cast_fp16)[name = tensor("input_3_cast_fp16")]; - tensor var_229 = const()[name = tensor("op_229"), val = tensor([1, 1])]; - tensor var_231 = const()[name = tensor("op_231"), val = tensor([1, 1])]; - tensor obj_11_pad_type_0 = const()[name = tensor("obj_11_pad_type_0"), val = tensor("custom")]; + tensor obj_11_pad_type_0 = const()[name = tensor("obj_11_pad_type_0"), val = tensor("valid")]; + tensor obj_11_strides_0 = const()[name = tensor("obj_11_strides_0"), val = tensor([1, 1])]; tensor obj_11_pad_0 = const()[name = tensor("obj_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor obj_11_dilations_0 = const()[name = tensor("obj_11_dilations_0"), val = tensor([1, 1])]; + tensor obj_11_groups_0 = const()[name = tensor("obj_11_groups_0"), val = tensor(1)]; tensor layers_0_encoder_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_0_encoder_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42249792)))]; tensor layers_0_encoder_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_0_encoder_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42544768)))]; - tensor obj_11_cast_fp16 = conv(bias = layers_0_encoder_attn_o_proj_bias_to_fp16, dilations = var_231, groups = var_71, pad = obj_11_pad_0, pad_type = obj_11_pad_type_0, strides = var_229, weight = layers_0_encoder_attn_o_proj_weight_to_fp16, x = input_3_cast_fp16)[name = tensor("obj_11_cast_fp16")]; + tensor obj_11_cast_fp16 = conv(bias = layers_0_encoder_attn_o_proj_bias_to_fp16, dilations = obj_11_dilations_0, groups = obj_11_groups_0, pad = obj_11_pad_0, pad_type = obj_11_pad_type_0, strides = obj_11_strides_0, weight = layers_0_encoder_attn_o_proj_weight_to_fp16, x = input_3_cast_fp16)[name = tensor("obj_11_cast_fp16")]; tensor inputs_5_cast_fp16 = add(x = inputs_3_cast_fp16, y = obj_11_cast_fp16)[name = tensor("inputs_5_cast_fp16")]; tensor out_5_axes_0 = const()[name = tensor("out_5_axes_0"), val = tensor([1])]; tensor var_243_to_fp16 = const()[name = tensor("op_243_to_fp16"), val = tensor(0x1.5p-17)]; @@ -153,25 +160,26 @@ program(1.0) tensor input_5_beta_0_to_fp16 = const()[name = tensor("input_5_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42546432)))]; tensor input_5_epsilon_0_to_fp16 = const()[name = tensor("input_5_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; tensor input_5_cast_fp16 = batch_norm(beta = input_5_beta_0_to_fp16, epsilon = input_5_epsilon_0_to_fp16, gamma = input_5_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_5_cast_fp16)[name = tensor("input_5_cast_fp16")]; - tensor var_255 = const()[name = tensor("op_255"), val = tensor([1, 1])]; - tensor var_257 = const()[name = tensor("op_257"), val = tensor([1, 1])]; - tensor input_7_pad_type_0 = const()[name = tensor("input_7_pad_type_0"), val = tensor("custom")]; + tensor input_7_pad_type_0 = const()[name = tensor("input_7_pad_type_0"), val = tensor("valid")]; + tensor input_7_strides_0 = const()[name = tensor("input_7_strides_0"), val = tensor([1, 1])]; tensor input_7_pad_0 = const()[name = tensor("input_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor input_7_dilations_0 = const()[name = tensor("input_7_dilations_0"), val = tensor([1, 1])]; + tensor input_7_groups_0 = const()[name = tensor("input_7_groups_0"), val = tensor(1)]; tensor layers_0_fc1_weight_to_fp16 = const()[name = tensor("layers_0_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42547264)))]; tensor layers_0_fc1_bias_to_fp16 = const()[name = tensor("layers_0_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43726976)))]; - tensor input_7_cast_fp16 = conv(bias = layers_0_fc1_bias_to_fp16, dilations = var_257, groups = var_71, pad = input_7_pad_0, pad_type = input_7_pad_type_0, strides = var_255, weight = layers_0_fc1_weight_to_fp16, x = input_5_cast_fp16)[name = tensor("input_7_cast_fp16")]; + tensor input_7_cast_fp16 = conv(bias = layers_0_fc1_bias_to_fp16, dilations = input_7_dilations_0, groups = input_7_groups_0, pad = input_7_pad_0, pad_type = input_7_pad_type_0, strides = input_7_strides_0, weight = layers_0_fc1_weight_to_fp16, x = input_5_cast_fp16)[name = tensor("input_7_cast_fp16")]; tensor input_9_mode_0 = const()[name = tensor("input_9_mode_0"), val = tensor("EXACT")]; tensor input_9_cast_fp16 = gelu(mode = input_9_mode_0, x = input_7_cast_fp16)[name = tensor("input_9_cast_fp16")]; - tensor var_263 = const()[name = tensor("op_263"), val = tensor([1, 1])]; - tensor var_265 = const()[name = tensor("op_265"), val = tensor([1, 1])]; - tensor hidden_states_3_pad_type_0 = const()[name = tensor("hidden_states_3_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_3_pad_type_0 = const()[name = tensor("hidden_states_3_pad_type_0"), val = tensor("valid")]; + tensor hidden_states_3_strides_0 = const()[name = tensor("hidden_states_3_strides_0"), val = tensor([1, 1])]; tensor hidden_states_3_pad_0 = const()[name = tensor("hidden_states_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor hidden_states_3_dilations_0 = const()[name = tensor("hidden_states_3_dilations_0"), val = tensor([1, 1])]; + tensor hidden_states_3_groups_0 = const()[name = tensor("hidden_states_3_groups_0"), val = tensor(1)]; tensor layers_0_fc2_weight_to_fp16 = const()[name = tensor("layers_0_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43730112)))]; tensor layers_0_fc2_bias_to_fp16 = const()[name = tensor("layers_0_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44909824)))]; - tensor hidden_states_3_cast_fp16 = conv(bias = layers_0_fc2_bias_to_fp16, dilations = var_265, groups = var_71, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = var_263, weight = layers_0_fc2_weight_to_fp16, x = input_9_cast_fp16)[name = tensor("hidden_states_3_cast_fp16")]; + tensor hidden_states_3_cast_fp16 = conv(bias = layers_0_fc2_bias_to_fp16, dilations = hidden_states_3_dilations_0, groups = hidden_states_3_groups_0, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = hidden_states_3_strides_0, weight = layers_0_fc2_weight_to_fp16, x = input_9_cast_fp16)[name = tensor("hidden_states_3_cast_fp16")]; tensor inputs_7_cast_fp16 = add(x = inputs_5_cast_fp16, y = hidden_states_3_cast_fp16)[name = tensor("inputs_7_cast_fp16")]; tensor var_278 = const()[name = tensor("op_278"), val = tensor(3)]; - tensor var_285 = const()[name = tensor("op_285"), val = tensor(1)]; tensor out_7_axes_0 = const()[name = tensor("out_7_axes_0"), val = tensor([1])]; tensor var_304_to_fp16 = const()[name = tensor("op_304_to_fp16"), val = tensor(0x1.5p-17)]; tensor out_7_cast_fp16 = layer_norm(axes = out_7_axes_0, epsilon = var_304_to_fp16, x = inputs_7_cast_fp16)[name = tensor("out_7_cast_fp16")]; @@ -179,26 +187,29 @@ program(1.0) tensor obj_15_beta_0_to_fp16 = const()[name = tensor("obj_15_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44911488)))]; tensor obj_15_epsilon_0_to_fp16 = const()[name = tensor("obj_15_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; tensor obj_15_cast_fp16 = batch_norm(beta = obj_15_beta_0_to_fp16, epsilon = obj_15_epsilon_0_to_fp16, gamma = obj_15_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_7_cast_fp16)[name = tensor("obj_15_cast_fp16")]; - tensor var_320 = const()[name = tensor("op_320"), val = tensor([1, 1])]; - tensor var_322 = const()[name = tensor("op_322"), val = tensor([1, 1])]; - tensor query_5_pad_type_0 = const()[name = tensor("query_5_pad_type_0"), val = tensor("custom")]; + tensor query_5_pad_type_0 = const()[name = tensor("query_5_pad_type_0"), val = tensor("valid")]; + tensor query_5_strides_0 = const()[name = tensor("query_5_strides_0"), val = tensor([1, 1])]; tensor query_5_pad_0 = const()[name = tensor("query_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor query_5_dilations_0 = const()[name = tensor("query_5_dilations_0"), val = tensor([1, 1])]; + tensor query_5_groups_0 = const()[name = tensor("query_5_groups_0"), val = tensor(1)]; tensor layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44912320)))]; tensor layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45207296)))]; - tensor query_5_cast_fp16 = conv(bias = layers_1_self_attn_q_proj_bias_to_fp16, dilations = var_322, groups = var_285, pad = query_5_pad_0, pad_type = query_5_pad_type_0, strides = var_320, weight = layers_1_self_attn_q_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor("query_5_cast_fp16")]; - tensor var_326 = const()[name = tensor("op_326"), val = tensor([1, 1])]; - tensor var_328 = const()[name = tensor("op_328"), val = tensor([1, 1])]; - tensor current_key_3_pad_type_0 = const()[name = tensor("current_key_3_pad_type_0"), val = tensor("custom")]; + tensor query_5_cast_fp16 = conv(bias = layers_1_self_attn_q_proj_bias_to_fp16, dilations = query_5_dilations_0, groups = query_5_groups_0, pad = query_5_pad_0, pad_type = query_5_pad_type_0, strides = query_5_strides_0, weight = layers_1_self_attn_q_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor("query_5_cast_fp16")]; + tensor current_key_3_pad_type_0 = const()[name = tensor("current_key_3_pad_type_0"), val = tensor("valid")]; + tensor current_key_3_strides_0 = const()[name = tensor("current_key_3_strides_0"), val = tensor([1, 1])]; tensor current_key_3_pad_0 = const()[name = tensor("current_key_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor current_key_3_dilations_0 = const()[name = tensor("current_key_3_dilations_0"), val = tensor([1, 1])]; + tensor current_key_3_groups_0 = const()[name = tensor("current_key_3_groups_0"), val = tensor(1)]; tensor layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45208128)))]; - tensor current_key_3_cast_fp16 = conv(dilations = var_328, groups = var_285, pad = current_key_3_pad_0, pad_type = current_key_3_pad_type_0, strides = var_326, weight = layers_1_self_attn_k_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor("current_key_3_cast_fp16")]; - tensor var_333 = const()[name = tensor("op_333"), val = tensor([1, 1])]; - tensor var_335 = const()[name = tensor("op_335"), val = tensor([1, 1])]; - tensor current_value_3_pad_type_0 = const()[name = tensor("current_value_3_pad_type_0"), val = tensor("custom")]; + tensor current_key_3_cast_fp16 = conv(dilations = current_key_3_dilations_0, groups = current_key_3_groups_0, pad = current_key_3_pad_0, pad_type = current_key_3_pad_type_0, strides = current_key_3_strides_0, weight = layers_1_self_attn_k_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor("current_key_3_cast_fp16")]; + tensor current_value_3_pad_type_0 = const()[name = tensor("current_value_3_pad_type_0"), val = tensor("valid")]; + tensor current_value_3_strides_0 = const()[name = tensor("current_value_3_strides_0"), val = tensor([1, 1])]; tensor current_value_3_pad_0 = const()[name = tensor("current_value_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor current_value_3_dilations_0 = const()[name = tensor("current_value_3_dilations_0"), val = tensor([1, 1])]; + tensor current_value_3_groups_0 = const()[name = tensor("current_value_3_groups_0"), val = tensor(1)]; tensor layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45503104)))]; tensor layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45798080)))]; - tensor current_value_3_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations = var_335, groups = var_285, pad = current_value_3_pad_0, pad_type = current_value_3_pad_type_0, strides = var_333, weight = layers_1_self_attn_v_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor("current_value_3_cast_fp16")]; + tensor current_value_3_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations = current_value_3_dilations_0, groups = current_value_3_groups_0, pad = current_value_3_pad_0, pad_type = current_value_3_pad_type_0, strides = current_value_3_strides_0, weight = layers_1_self_attn_v_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor("current_value_3_cast_fp16")]; tensor var_342_cast_fp16 = mul(x = current_key_3_cast_fp16, y = var_126_cast_fp16)[name = tensor("op_342_cast_fp16")]; tensor var_344_cast_fp16 = mul(x = var_47_cast_fp16_1, y = var_129_cast_fp16)[name = tensor("op_344_cast_fp16")]; tensor key_5_cast_fp16 = add(x = var_342_cast_fp16, y = var_344_cast_fp16)[name = tensor("key_5_cast_fp16")]; @@ -223,13 +234,14 @@ program(1.0) tensor attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_366_cast_fp16, y = var_364_cast_fp16)[name = tensor("attn_5_cast_fp16")]; tensor var_369 = const()[name = tensor("op_369"), val = tensor([1, 384, 1, -1])]; tensor input_11_cast_fp16 = reshape(shape = var_369, x = attn_5_cast_fp16)[name = tensor("input_11_cast_fp16")]; - tensor var_373 = const()[name = tensor("op_373"), val = tensor([1, 1])]; - tensor var_375 = const()[name = tensor("op_375"), val = tensor([1, 1])]; - tensor obj_21_pad_type_0 = const()[name = tensor("obj_21_pad_type_0"), val = tensor("custom")]; + tensor obj_21_pad_type_0 = const()[name = tensor("obj_21_pad_type_0"), val = tensor("valid")]; + tensor obj_21_strides_0 = const()[name = tensor("obj_21_strides_0"), val = tensor([1, 1])]; tensor obj_21_pad_0 = const()[name = tensor("obj_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor obj_21_dilations_0 = const()[name = tensor("obj_21_dilations_0"), val = tensor([1, 1])]; + tensor obj_21_groups_0 = const()[name = tensor("obj_21_groups_0"), val = tensor(1)]; tensor layers_1_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_1_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45798912)))]; tensor layers_1_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_1_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(46093888)))]; - tensor obj_21_cast_fp16 = conv(bias = layers_1_self_attn_o_proj_bias_to_fp16, dilations = var_375, groups = var_285, pad = obj_21_pad_0, pad_type = obj_21_pad_type_0, strides = var_373, weight = layers_1_self_attn_o_proj_weight_to_fp16, x = input_11_cast_fp16)[name = tensor("obj_21_cast_fp16")]; + tensor obj_21_cast_fp16 = conv(bias = layers_1_self_attn_o_proj_bias_to_fp16, dilations = obj_21_dilations_0, groups = obj_21_groups_0, pad = obj_21_pad_0, pad_type = obj_21_pad_type_0, strides = obj_21_strides_0, weight = layers_1_self_attn_o_proj_weight_to_fp16, x = input_11_cast_fp16)[name = tensor("obj_21_cast_fp16")]; tensor inputs_9_cast_fp16 = add(x = inputs_7_cast_fp16, y = obj_21_cast_fp16)[name = tensor("inputs_9_cast_fp16")]; tensor out_9_axes_0 = const()[name = tensor("out_9_axes_0"), val = tensor([1])]; tensor var_391_to_fp16 = const()[name = tensor("op_391_to_fp16"), val = tensor(0x1.5p-17)]; @@ -238,26 +250,29 @@ program(1.0) tensor obj_23_beta_0_to_fp16 = const()[name = tensor("obj_23_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(46095552)))]; tensor obj_23_epsilon_0_to_fp16 = const()[name = tensor("obj_23_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; tensor obj_23_cast_fp16 = batch_norm(beta = obj_23_beta_0_to_fp16, epsilon = obj_23_epsilon_0_to_fp16, gamma = obj_23_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_9_cast_fp16)[name = tensor("obj_23_cast_fp16")]; - tensor var_407 = const()[name = tensor("op_407"), val = tensor([1, 1])]; - tensor var_409 = const()[name = tensor("op_409"), val = tensor([1, 1])]; - tensor query_7_pad_type_0 = const()[name = tensor("query_7_pad_type_0"), val = tensor("custom")]; + tensor query_7_pad_type_0 = const()[name = tensor("query_7_pad_type_0"), val = tensor("valid")]; + tensor query_7_strides_0 = const()[name = tensor("query_7_strides_0"), val = tensor([1, 1])]; tensor query_7_pad_0 = const()[name = tensor("query_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor query_7_dilations_0 = const()[name = tensor("query_7_dilations_0"), val = tensor([1, 1])]; + tensor query_7_groups_0 = const()[name = tensor("query_7_groups_0"), val = tensor(1)]; tensor layers_1_encoder_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_1_encoder_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(46096384)))]; tensor layers_1_encoder_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_1_encoder_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(46391360)))]; - tensor query_7_cast_fp16 = conv(bias = layers_1_encoder_attn_q_proj_bias_to_fp16, dilations = var_409, groups = var_285, pad = query_7_pad_0, pad_type = query_7_pad_type_0, strides = var_407, weight = layers_1_encoder_attn_q_proj_weight_to_fp16, x = obj_23_cast_fp16)[name = tensor("query_7_cast_fp16")]; - tensor var_413 = const()[name = tensor("op_413"), val = tensor([1, 1])]; - tensor var_415 = const()[name = tensor("op_415"), val = tensor([1, 1])]; - tensor key_7_pad_type_0 = const()[name = tensor("key_7_pad_type_0"), val = tensor("custom")]; + tensor query_7_cast_fp16 = conv(bias = layers_1_encoder_attn_q_proj_bias_to_fp16, dilations = query_7_dilations_0, groups = query_7_groups_0, pad = query_7_pad_0, pad_type = query_7_pad_type_0, strides = query_7_strides_0, weight = layers_1_encoder_attn_q_proj_weight_to_fp16, x = obj_23_cast_fp16)[name = tensor("query_7_cast_fp16")]; + tensor key_7_pad_type_0 = const()[name = tensor("key_7_pad_type_0"), val = tensor("valid")]; + tensor key_7_strides_0 = const()[name = tensor("key_7_strides_0"), val = tensor([1, 1])]; tensor key_7_pad_0 = const()[name = tensor("key_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor key_7_dilations_0 = const()[name = tensor("key_7_dilations_0"), val = tensor([1, 1])]; + tensor key_7_groups_0 = const()[name = tensor("key_7_groups_0"), val = tensor(1)]; tensor layers_1_encoder_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_1_encoder_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(46392192)))]; - tensor key_7_cast_fp16 = conv(dilations = var_415, groups = var_285, pad = key_7_pad_0, pad_type = key_7_pad_type_0, strides = var_413, weight = layers_1_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("key_7_cast_fp16")]; - tensor var_420 = const()[name = tensor("op_420"), val = tensor([1, 1])]; - tensor var_422 = const()[name = tensor("op_422"), val = tensor([1, 1])]; - tensor value_7_pad_type_0 = const()[name = tensor("value_7_pad_type_0"), val = tensor("custom")]; + tensor key_7_cast_fp16 = conv(dilations = key_7_dilations_0, groups = key_7_groups_0, pad = key_7_pad_0, pad_type = key_7_pad_type_0, strides = key_7_strides_0, weight = layers_1_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("key_7_cast_fp16")]; + tensor value_7_pad_type_0 = const()[name = tensor("value_7_pad_type_0"), val = tensor("valid")]; + tensor value_7_strides_0 = const()[name = tensor("value_7_strides_0"), val = tensor([1, 1])]; tensor value_7_pad_0 = const()[name = tensor("value_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor value_7_dilations_0 = const()[name = tensor("value_7_dilations_0"), val = tensor([1, 1])]; + tensor value_7_groups_0 = const()[name = tensor("value_7_groups_0"), val = tensor(1)]; tensor layers_1_encoder_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_1_encoder_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(46687168)))]; tensor layers_1_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_1_encoder_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(46982144)))]; - tensor value_7_cast_fp16 = conv(bias = layers_1_encoder_attn_v_proj_bias_to_fp16, dilations = var_422, groups = var_285, pad = value_7_pad_0, pad_type = value_7_pad_type_0, strides = var_420, weight = layers_1_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("value_7_cast_fp16")]; + tensor value_7_cast_fp16 = conv(bias = layers_1_encoder_attn_v_proj_bias_to_fp16, dilations = value_7_dilations_0, groups = value_7_groups_0, pad = value_7_pad_0, pad_type = value_7_pad_type_0, strides = value_7_strides_0, weight = layers_1_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("value_7_cast_fp16")]; tensor var_426 = const()[name = tensor("op_426"), val = tensor([1, 6, 64, -1])]; tensor mh_q_7_cast_fp16 = reshape(shape = var_426, x = query_7_cast_fp16)[name = tensor("mh_q_7_cast_fp16")]; tensor var_428_to_fp16 = const()[name = tensor("op_428_to_fp16"), val = tensor(0x1p-3)]; @@ -275,13 +290,14 @@ program(1.0) tensor attn_7_cast_fp16 = matmul(transpose_x = attn_7_transpose_x_0, transpose_y = attn_7_transpose_y_0, x = var_436_cast_fp16, y = obj_27_cast_fp16)[name = tensor("attn_7_cast_fp16")]; tensor var_439 = const()[name = tensor("op_439"), val = tensor([1, 384, 1, -1])]; tensor input_13_cast_fp16 = reshape(shape = var_439, x = attn_7_cast_fp16)[name = tensor("input_13_cast_fp16")]; - tensor var_443 = const()[name = tensor("op_443"), val = tensor([1, 1])]; - tensor var_445 = const()[name = tensor("op_445"), val = tensor([1, 1])]; - tensor obj_25_pad_type_0 = const()[name = tensor("obj_25_pad_type_0"), val = tensor("custom")]; + tensor obj_25_pad_type_0 = const()[name = tensor("obj_25_pad_type_0"), val = tensor("valid")]; + tensor obj_25_strides_0 = const()[name = tensor("obj_25_strides_0"), val = tensor([1, 1])]; tensor obj_25_pad_0 = const()[name = tensor("obj_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor obj_25_dilations_0 = const()[name = tensor("obj_25_dilations_0"), val = tensor([1, 1])]; + tensor obj_25_groups_0 = const()[name = tensor("obj_25_groups_0"), val = tensor(1)]; tensor layers_1_encoder_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_1_encoder_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(46982976)))]; tensor layers_1_encoder_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_1_encoder_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47277952)))]; - tensor obj_25_cast_fp16 = conv(bias = layers_1_encoder_attn_o_proj_bias_to_fp16, dilations = var_445, groups = var_285, pad = obj_25_pad_0, pad_type = obj_25_pad_type_0, strides = var_443, weight = layers_1_encoder_attn_o_proj_weight_to_fp16, x = input_13_cast_fp16)[name = tensor("obj_25_cast_fp16")]; + tensor obj_25_cast_fp16 = conv(bias = layers_1_encoder_attn_o_proj_bias_to_fp16, dilations = obj_25_dilations_0, groups = obj_25_groups_0, pad = obj_25_pad_0, pad_type = obj_25_pad_type_0, strides = obj_25_strides_0, weight = layers_1_encoder_attn_o_proj_weight_to_fp16, x = input_13_cast_fp16)[name = tensor("obj_25_cast_fp16")]; tensor inputs_11_cast_fp16 = add(x = inputs_9_cast_fp16, y = obj_25_cast_fp16)[name = tensor("inputs_11_cast_fp16")]; tensor out_11_axes_0 = const()[name = tensor("out_11_axes_0"), val = tensor([1])]; tensor var_460_to_fp16 = const()[name = tensor("op_460_to_fp16"), val = tensor(0x1.5p-17)]; @@ -290,25 +306,26 @@ program(1.0) tensor input_15_beta_0_to_fp16 = const()[name = tensor("input_15_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47279616)))]; tensor input_15_epsilon_0_to_fp16 = const()[name = tensor("input_15_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; tensor input_15_cast_fp16 = batch_norm(beta = input_15_beta_0_to_fp16, epsilon = input_15_epsilon_0_to_fp16, gamma = input_15_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_11_cast_fp16)[name = tensor("input_15_cast_fp16")]; - tensor var_472 = const()[name = tensor("op_472"), val = tensor([1, 1])]; - tensor var_474 = const()[name = tensor("op_474"), val = tensor([1, 1])]; - tensor input_17_pad_type_0 = const()[name = tensor("input_17_pad_type_0"), val = tensor("custom")]; + tensor input_17_pad_type_0 = const()[name = tensor("input_17_pad_type_0"), val = tensor("valid")]; + tensor input_17_strides_0 = const()[name = tensor("input_17_strides_0"), val = tensor([1, 1])]; tensor input_17_pad_0 = const()[name = tensor("input_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor input_17_dilations_0 = const()[name = tensor("input_17_dilations_0"), val = tensor([1, 1])]; + tensor input_17_groups_0 = const()[name = tensor("input_17_groups_0"), val = tensor(1)]; tensor layers_1_fc1_weight_to_fp16 = const()[name = tensor("layers_1_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47280448)))]; tensor layers_1_fc1_bias_to_fp16 = const()[name = tensor("layers_1_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(48460160)))]; - tensor input_17_cast_fp16 = conv(bias = layers_1_fc1_bias_to_fp16, dilations = var_474, groups = var_285, pad = input_17_pad_0, pad_type = input_17_pad_type_0, strides = var_472, weight = layers_1_fc1_weight_to_fp16, x = input_15_cast_fp16)[name = tensor("input_17_cast_fp16")]; + tensor input_17_cast_fp16 = conv(bias = layers_1_fc1_bias_to_fp16, dilations = input_17_dilations_0, groups = input_17_groups_0, pad = input_17_pad_0, pad_type = input_17_pad_type_0, strides = input_17_strides_0, weight = layers_1_fc1_weight_to_fp16, x = input_15_cast_fp16)[name = tensor("input_17_cast_fp16")]; tensor input_19_mode_0 = const()[name = tensor("input_19_mode_0"), val = tensor("EXACT")]; tensor input_19_cast_fp16 = gelu(mode = input_19_mode_0, x = input_17_cast_fp16)[name = tensor("input_19_cast_fp16")]; - tensor var_480 = const()[name = tensor("op_480"), val = tensor([1, 1])]; - tensor var_482 = const()[name = tensor("op_482"), val = tensor([1, 1])]; - tensor hidden_states_5_pad_type_0 = const()[name = tensor("hidden_states_5_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_5_pad_type_0 = const()[name = tensor("hidden_states_5_pad_type_0"), val = tensor("valid")]; + tensor hidden_states_5_strides_0 = const()[name = tensor("hidden_states_5_strides_0"), val = tensor([1, 1])]; tensor hidden_states_5_pad_0 = const()[name = tensor("hidden_states_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor hidden_states_5_dilations_0 = const()[name = tensor("hidden_states_5_dilations_0"), val = tensor([1, 1])]; + tensor hidden_states_5_groups_0 = const()[name = tensor("hidden_states_5_groups_0"), val = tensor(1)]; tensor layers_1_fc2_weight_to_fp16 = const()[name = tensor("layers_1_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(48463296)))]; tensor layers_1_fc2_bias_to_fp16 = const()[name = tensor("layers_1_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49643008)))]; - tensor hidden_states_5_cast_fp16 = conv(bias = layers_1_fc2_bias_to_fp16, dilations = var_482, groups = var_285, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = var_480, weight = layers_1_fc2_weight_to_fp16, x = input_19_cast_fp16)[name = tensor("hidden_states_5_cast_fp16")]; + tensor hidden_states_5_cast_fp16 = conv(bias = layers_1_fc2_bias_to_fp16, dilations = hidden_states_5_dilations_0, groups = hidden_states_5_groups_0, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = hidden_states_5_strides_0, weight = layers_1_fc2_weight_to_fp16, x = input_19_cast_fp16)[name = tensor("hidden_states_5_cast_fp16")]; tensor inputs_13_cast_fp16 = add(x = inputs_11_cast_fp16, y = hidden_states_5_cast_fp16)[name = tensor("inputs_13_cast_fp16")]; tensor var_496 = const()[name = tensor("op_496"), val = tensor(3)]; - tensor var_503 = const()[name = tensor("op_503"), val = tensor(1)]; tensor out_13_axes_0 = const()[name = tensor("out_13_axes_0"), val = tensor([1])]; tensor var_522_to_fp16 = const()[name = tensor("op_522_to_fp16"), val = tensor(0x1.5p-17)]; tensor out_13_cast_fp16 = layer_norm(axes = out_13_axes_0, epsilon = var_522_to_fp16, x = inputs_13_cast_fp16)[name = tensor("out_13_cast_fp16")]; @@ -316,26 +333,29 @@ program(1.0) tensor obj_29_beta_0_to_fp16 = const()[name = tensor("obj_29_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49644672)))]; tensor obj_29_epsilon_0_to_fp16 = const()[name = tensor("obj_29_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; tensor obj_29_cast_fp16 = batch_norm(beta = obj_29_beta_0_to_fp16, epsilon = obj_29_epsilon_0_to_fp16, gamma = obj_29_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_13_cast_fp16)[name = tensor("obj_29_cast_fp16")]; - tensor var_538 = const()[name = tensor("op_538"), val = tensor([1, 1])]; - tensor var_540 = const()[name = tensor("op_540"), val = tensor([1, 1])]; - tensor query_9_pad_type_0 = const()[name = tensor("query_9_pad_type_0"), val = tensor("custom")]; + tensor query_9_pad_type_0 = const()[name = tensor("query_9_pad_type_0"), val = tensor("valid")]; + tensor query_9_strides_0 = const()[name = tensor("query_9_strides_0"), val = tensor([1, 1])]; tensor query_9_pad_0 = const()[name = tensor("query_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor query_9_dilations_0 = const()[name = tensor("query_9_dilations_0"), val = tensor([1, 1])]; + tensor query_9_groups_0 = const()[name = tensor("query_9_groups_0"), val = tensor(1)]; tensor layers_2_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_2_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49645504)))]; tensor layers_2_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_2_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49940480)))]; - tensor query_9_cast_fp16 = conv(bias = layers_2_self_attn_q_proj_bias_to_fp16, dilations = var_540, groups = var_503, pad = query_9_pad_0, pad_type = query_9_pad_type_0, strides = var_538, weight = layers_2_self_attn_q_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor("query_9_cast_fp16")]; - tensor var_544 = const()[name = tensor("op_544"), val = tensor([1, 1])]; - tensor var_546 = const()[name = tensor("op_546"), val = tensor([1, 1])]; - tensor current_key_5_pad_type_0 = const()[name = tensor("current_key_5_pad_type_0"), val = tensor("custom")]; + tensor query_9_cast_fp16 = conv(bias = layers_2_self_attn_q_proj_bias_to_fp16, dilations = query_9_dilations_0, groups = query_9_groups_0, pad = query_9_pad_0, pad_type = query_9_pad_type_0, strides = query_9_strides_0, weight = layers_2_self_attn_q_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor("query_9_cast_fp16")]; + tensor current_key_5_pad_type_0 = const()[name = tensor("current_key_5_pad_type_0"), val = tensor("valid")]; + tensor current_key_5_strides_0 = const()[name = tensor("current_key_5_strides_0"), val = tensor([1, 1])]; tensor current_key_5_pad_0 = const()[name = tensor("current_key_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor current_key_5_dilations_0 = const()[name = tensor("current_key_5_dilations_0"), val = tensor([1, 1])]; + tensor current_key_5_groups_0 = const()[name = tensor("current_key_5_groups_0"), val = tensor(1)]; tensor layers_2_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_2_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49941312)))]; - tensor current_key_5_cast_fp16 = conv(dilations = var_546, groups = var_503, pad = current_key_5_pad_0, pad_type = current_key_5_pad_type_0, strides = var_544, weight = layers_2_self_attn_k_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor("current_key_5_cast_fp16")]; - tensor var_551 = const()[name = tensor("op_551"), val = tensor([1, 1])]; - tensor var_553 = const()[name = tensor("op_553"), val = tensor([1, 1])]; - tensor current_value_5_pad_type_0 = const()[name = tensor("current_value_5_pad_type_0"), val = tensor("custom")]; + tensor current_key_5_cast_fp16 = conv(dilations = current_key_5_dilations_0, groups = current_key_5_groups_0, pad = current_key_5_pad_0, pad_type = current_key_5_pad_type_0, strides = current_key_5_strides_0, weight = layers_2_self_attn_k_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor("current_key_5_cast_fp16")]; + tensor current_value_5_pad_type_0 = const()[name = tensor("current_value_5_pad_type_0"), val = tensor("valid")]; + tensor current_value_5_strides_0 = const()[name = tensor("current_value_5_strides_0"), val = tensor([1, 1])]; tensor current_value_5_pad_0 = const()[name = tensor("current_value_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor current_value_5_dilations_0 = const()[name = tensor("current_value_5_dilations_0"), val = tensor([1, 1])]; + tensor current_value_5_groups_0 = const()[name = tensor("current_value_5_groups_0"), val = tensor(1)]; tensor layers_2_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_2_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50236288)))]; tensor layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50531264)))]; - tensor current_value_5_cast_fp16 = conv(bias = layers_2_self_attn_v_proj_bias_to_fp16, dilations = var_553, groups = var_503, pad = current_value_5_pad_0, pad_type = current_value_5_pad_type_0, strides = var_551, weight = layers_2_self_attn_v_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor("current_value_5_cast_fp16")]; + tensor current_value_5_cast_fp16 = conv(bias = layers_2_self_attn_v_proj_bias_to_fp16, dilations = current_value_5_dilations_0, groups = current_value_5_groups_0, pad = current_value_5_pad_0, pad_type = current_value_5_pad_type_0, strides = current_value_5_strides_0, weight = layers_2_self_attn_v_proj_weight_to_fp16, x = obj_29_cast_fp16)[name = tensor("current_value_5_cast_fp16")]; tensor var_560_cast_fp16 = mul(x = current_key_5_cast_fp16, y = var_126_cast_fp16)[name = tensor("op_560_cast_fp16")]; tensor var_562_cast_fp16 = mul(x = var_47_cast_fp16_2, y = var_129_cast_fp16)[name = tensor("op_562_cast_fp16")]; tensor key_9_cast_fp16 = add(x = var_560_cast_fp16, y = var_562_cast_fp16)[name = tensor("key_9_cast_fp16")]; @@ -360,13 +380,14 @@ program(1.0) tensor attn_9_cast_fp16 = matmul(transpose_x = attn_9_transpose_x_0, transpose_y = attn_9_transpose_y_0, x = var_584_cast_fp16, y = var_582_cast_fp16)[name = tensor("attn_9_cast_fp16")]; tensor var_587 = const()[name = tensor("op_587"), val = tensor([1, 384, 1, -1])]; tensor input_21_cast_fp16 = reshape(shape = var_587, x = attn_9_cast_fp16)[name = tensor("input_21_cast_fp16")]; - tensor var_591 = const()[name = tensor("op_591"), val = tensor([1, 1])]; - tensor var_593 = const()[name = tensor("op_593"), val = tensor([1, 1])]; - tensor obj_35_pad_type_0 = const()[name = tensor("obj_35_pad_type_0"), val = tensor("custom")]; + tensor obj_35_pad_type_0 = const()[name = tensor("obj_35_pad_type_0"), val = tensor("valid")]; + tensor obj_35_strides_0 = const()[name = tensor("obj_35_strides_0"), val = tensor([1, 1])]; tensor obj_35_pad_0 = const()[name = tensor("obj_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor obj_35_dilations_0 = const()[name = tensor("obj_35_dilations_0"), val = tensor([1, 1])]; + tensor obj_35_groups_0 = const()[name = tensor("obj_35_groups_0"), val = tensor(1)]; tensor layers_2_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_2_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50532096)))]; tensor layers_2_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_2_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50827072)))]; - tensor obj_35_cast_fp16 = conv(bias = layers_2_self_attn_o_proj_bias_to_fp16, dilations = var_593, groups = var_503, pad = obj_35_pad_0, pad_type = obj_35_pad_type_0, strides = var_591, weight = layers_2_self_attn_o_proj_weight_to_fp16, x = input_21_cast_fp16)[name = tensor("obj_35_cast_fp16")]; + tensor obj_35_cast_fp16 = conv(bias = layers_2_self_attn_o_proj_bias_to_fp16, dilations = obj_35_dilations_0, groups = obj_35_groups_0, pad = obj_35_pad_0, pad_type = obj_35_pad_type_0, strides = obj_35_strides_0, weight = layers_2_self_attn_o_proj_weight_to_fp16, x = input_21_cast_fp16)[name = tensor("obj_35_cast_fp16")]; tensor inputs_15_cast_fp16 = add(x = inputs_13_cast_fp16, y = obj_35_cast_fp16)[name = tensor("inputs_15_cast_fp16")]; tensor out_15_axes_0 = const()[name = tensor("out_15_axes_0"), val = tensor([1])]; tensor var_609_to_fp16 = const()[name = tensor("op_609_to_fp16"), val = tensor(0x1.5p-17)]; @@ -375,26 +396,29 @@ program(1.0) tensor obj_37_beta_0_to_fp16 = const()[name = tensor("obj_37_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50828736)))]; tensor obj_37_epsilon_0_to_fp16 = const()[name = tensor("obj_37_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; tensor obj_37_cast_fp16 = batch_norm(beta = obj_37_beta_0_to_fp16, epsilon = obj_37_epsilon_0_to_fp16, gamma = obj_37_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_15_cast_fp16)[name = tensor("obj_37_cast_fp16")]; - tensor var_625 = const()[name = tensor("op_625"), val = tensor([1, 1])]; - tensor var_627 = const()[name = tensor("op_627"), val = tensor([1, 1])]; - tensor query_11_pad_type_0 = const()[name = tensor("query_11_pad_type_0"), val = tensor("custom")]; + tensor query_11_pad_type_0 = const()[name = tensor("query_11_pad_type_0"), val = tensor("valid")]; + tensor query_11_strides_0 = const()[name = tensor("query_11_strides_0"), val = tensor([1, 1])]; tensor query_11_pad_0 = const()[name = tensor("query_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor query_11_dilations_0 = const()[name = tensor("query_11_dilations_0"), val = tensor([1, 1])]; + tensor query_11_groups_0 = const()[name = tensor("query_11_groups_0"), val = tensor(1)]; tensor layers_2_encoder_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_2_encoder_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50829568)))]; tensor layers_2_encoder_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_2_encoder_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51124544)))]; - tensor query_11_cast_fp16 = conv(bias = layers_2_encoder_attn_q_proj_bias_to_fp16, dilations = var_627, groups = var_503, pad = query_11_pad_0, pad_type = query_11_pad_type_0, strides = var_625, weight = layers_2_encoder_attn_q_proj_weight_to_fp16, x = obj_37_cast_fp16)[name = tensor("query_11_cast_fp16")]; - tensor var_631 = const()[name = tensor("op_631"), val = tensor([1, 1])]; - tensor var_633 = const()[name = tensor("op_633"), val = tensor([1, 1])]; - tensor key_11_pad_type_0 = const()[name = tensor("key_11_pad_type_0"), val = tensor("custom")]; + tensor query_11_cast_fp16 = conv(bias = layers_2_encoder_attn_q_proj_bias_to_fp16, dilations = query_11_dilations_0, groups = query_11_groups_0, pad = query_11_pad_0, pad_type = query_11_pad_type_0, strides = query_11_strides_0, weight = layers_2_encoder_attn_q_proj_weight_to_fp16, x = obj_37_cast_fp16)[name = tensor("query_11_cast_fp16")]; + tensor key_11_pad_type_0 = const()[name = tensor("key_11_pad_type_0"), val = tensor("valid")]; + tensor key_11_strides_0 = const()[name = tensor("key_11_strides_0"), val = tensor([1, 1])]; tensor key_11_pad_0 = const()[name = tensor("key_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor key_11_dilations_0 = const()[name = tensor("key_11_dilations_0"), val = tensor([1, 1])]; + tensor key_11_groups_0 = const()[name = tensor("key_11_groups_0"), val = tensor(1)]; tensor layers_2_encoder_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_2_encoder_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51125376)))]; - tensor key_11_cast_fp16 = conv(dilations = var_633, groups = var_503, pad = key_11_pad_0, pad_type = key_11_pad_type_0, strides = var_631, weight = layers_2_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("key_11_cast_fp16")]; - tensor var_638 = const()[name = tensor("op_638"), val = tensor([1, 1])]; - tensor var_640 = const()[name = tensor("op_640"), val = tensor([1, 1])]; - tensor value_11_pad_type_0 = const()[name = tensor("value_11_pad_type_0"), val = tensor("custom")]; + tensor key_11_cast_fp16 = conv(dilations = key_11_dilations_0, groups = key_11_groups_0, pad = key_11_pad_0, pad_type = key_11_pad_type_0, strides = key_11_strides_0, weight = layers_2_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("key_11_cast_fp16")]; + tensor value_11_pad_type_0 = const()[name = tensor("value_11_pad_type_0"), val = tensor("valid")]; + tensor value_11_strides_0 = const()[name = tensor("value_11_strides_0"), val = tensor([1, 1])]; tensor value_11_pad_0 = const()[name = tensor("value_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor value_11_dilations_0 = const()[name = tensor("value_11_dilations_0"), val = tensor([1, 1])]; + tensor value_11_groups_0 = const()[name = tensor("value_11_groups_0"), val = tensor(1)]; tensor layers_2_encoder_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_2_encoder_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51420352)))]; tensor layers_2_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_2_encoder_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51715328)))]; - tensor value_11_cast_fp16 = conv(bias = layers_2_encoder_attn_v_proj_bias_to_fp16, dilations = var_640, groups = var_503, pad = value_11_pad_0, pad_type = value_11_pad_type_0, strides = var_638, weight = layers_2_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("value_11_cast_fp16")]; + tensor value_11_cast_fp16 = conv(bias = layers_2_encoder_attn_v_proj_bias_to_fp16, dilations = value_11_dilations_0, groups = value_11_groups_0, pad = value_11_pad_0, pad_type = value_11_pad_type_0, strides = value_11_strides_0, weight = layers_2_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("value_11_cast_fp16")]; tensor var_644 = const()[name = tensor("op_644"), val = tensor([1, 6, 64, -1])]; tensor mh_q_11_cast_fp16 = reshape(shape = var_644, x = query_11_cast_fp16)[name = tensor("mh_q_11_cast_fp16")]; tensor var_646_to_fp16 = const()[name = tensor("op_646_to_fp16"), val = tensor(0x1p-3)]; @@ -412,13 +436,14 @@ program(1.0) tensor attn_11_cast_fp16 = matmul(transpose_x = attn_11_transpose_x_0, transpose_y = attn_11_transpose_y_0, x = var_654_cast_fp16, y = obj_41_cast_fp16)[name = tensor("attn_11_cast_fp16")]; tensor var_657 = const()[name = tensor("op_657"), val = tensor([1, 384, 1, -1])]; tensor input_23_cast_fp16 = reshape(shape = var_657, x = attn_11_cast_fp16)[name = tensor("input_23_cast_fp16")]; - tensor var_661 = const()[name = tensor("op_661"), val = tensor([1, 1])]; - tensor var_663 = const()[name = tensor("op_663"), val = tensor([1, 1])]; - tensor obj_39_pad_type_0 = const()[name = tensor("obj_39_pad_type_0"), val = tensor("custom")]; + tensor obj_39_pad_type_0 = const()[name = tensor("obj_39_pad_type_0"), val = tensor("valid")]; + tensor obj_39_strides_0 = const()[name = tensor("obj_39_strides_0"), val = tensor([1, 1])]; tensor obj_39_pad_0 = const()[name = tensor("obj_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor obj_39_dilations_0 = const()[name = tensor("obj_39_dilations_0"), val = tensor([1, 1])]; + tensor obj_39_groups_0 = const()[name = tensor("obj_39_groups_0"), val = tensor(1)]; tensor layers_2_encoder_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_2_encoder_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51716160)))]; tensor layers_2_encoder_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_2_encoder_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52011136)))]; - tensor obj_39_cast_fp16 = conv(bias = layers_2_encoder_attn_o_proj_bias_to_fp16, dilations = var_663, groups = var_503, pad = obj_39_pad_0, pad_type = obj_39_pad_type_0, strides = var_661, weight = layers_2_encoder_attn_o_proj_weight_to_fp16, x = input_23_cast_fp16)[name = tensor("obj_39_cast_fp16")]; + tensor obj_39_cast_fp16 = conv(bias = layers_2_encoder_attn_o_proj_bias_to_fp16, dilations = obj_39_dilations_0, groups = obj_39_groups_0, pad = obj_39_pad_0, pad_type = obj_39_pad_type_0, strides = obj_39_strides_0, weight = layers_2_encoder_attn_o_proj_weight_to_fp16, x = input_23_cast_fp16)[name = tensor("obj_39_cast_fp16")]; tensor inputs_17_cast_fp16 = add(x = inputs_15_cast_fp16, y = obj_39_cast_fp16)[name = tensor("inputs_17_cast_fp16")]; tensor out_17_axes_0 = const()[name = tensor("out_17_axes_0"), val = tensor([1])]; tensor var_678_to_fp16 = const()[name = tensor("op_678_to_fp16"), val = tensor(0x1.5p-17)]; @@ -427,25 +452,26 @@ program(1.0) tensor input_25_beta_0_to_fp16 = const()[name = tensor("input_25_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52012800)))]; tensor input_25_epsilon_0_to_fp16 = const()[name = tensor("input_25_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; tensor input_25_cast_fp16 = batch_norm(beta = input_25_beta_0_to_fp16, epsilon = input_25_epsilon_0_to_fp16, gamma = input_25_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_17_cast_fp16)[name = tensor("input_25_cast_fp16")]; - tensor var_690 = const()[name = tensor("op_690"), val = tensor([1, 1])]; - tensor var_692 = const()[name = tensor("op_692"), val = tensor([1, 1])]; - tensor input_27_pad_type_0 = const()[name = tensor("input_27_pad_type_0"), val = tensor("custom")]; + tensor input_27_pad_type_0 = const()[name = tensor("input_27_pad_type_0"), val = tensor("valid")]; + tensor input_27_strides_0 = const()[name = tensor("input_27_strides_0"), val = tensor([1, 1])]; tensor input_27_pad_0 = const()[name = tensor("input_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor input_27_dilations_0 = const()[name = tensor("input_27_dilations_0"), val = tensor([1, 1])]; + tensor input_27_groups_0 = const()[name = tensor("input_27_groups_0"), val = tensor(1)]; tensor layers_2_fc1_weight_to_fp16 = const()[name = tensor("layers_2_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52013632)))]; tensor layers_2_fc1_bias_to_fp16 = const()[name = tensor("layers_2_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53193344)))]; - tensor input_27_cast_fp16 = conv(bias = layers_2_fc1_bias_to_fp16, dilations = var_692, groups = var_503, pad = input_27_pad_0, pad_type = input_27_pad_type_0, strides = var_690, weight = layers_2_fc1_weight_to_fp16, x = input_25_cast_fp16)[name = tensor("input_27_cast_fp16")]; + tensor input_27_cast_fp16 = conv(bias = layers_2_fc1_bias_to_fp16, dilations = input_27_dilations_0, groups = input_27_groups_0, pad = input_27_pad_0, pad_type = input_27_pad_type_0, strides = input_27_strides_0, weight = layers_2_fc1_weight_to_fp16, x = input_25_cast_fp16)[name = tensor("input_27_cast_fp16")]; tensor input_29_mode_0 = const()[name = tensor("input_29_mode_0"), val = tensor("EXACT")]; tensor input_29_cast_fp16 = gelu(mode = input_29_mode_0, x = input_27_cast_fp16)[name = tensor("input_29_cast_fp16")]; - tensor var_698 = const()[name = tensor("op_698"), val = tensor([1, 1])]; - tensor var_700 = const()[name = tensor("op_700"), val = tensor([1, 1])]; - tensor hidden_states_7_pad_type_0 = const()[name = tensor("hidden_states_7_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_7_pad_type_0 = const()[name = tensor("hidden_states_7_pad_type_0"), val = tensor("valid")]; + tensor hidden_states_7_strides_0 = const()[name = tensor("hidden_states_7_strides_0"), val = tensor([1, 1])]; tensor hidden_states_7_pad_0 = const()[name = tensor("hidden_states_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor hidden_states_7_dilations_0 = const()[name = tensor("hidden_states_7_dilations_0"), val = tensor([1, 1])]; + tensor hidden_states_7_groups_0 = const()[name = tensor("hidden_states_7_groups_0"), val = tensor(1)]; tensor layers_2_fc2_weight_to_fp16 = const()[name = tensor("layers_2_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53196480)))]; tensor layers_2_fc2_bias_to_fp16 = const()[name = tensor("layers_2_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54376192)))]; - tensor hidden_states_7_cast_fp16 = conv(bias = layers_2_fc2_bias_to_fp16, dilations = var_700, groups = var_503, pad = hidden_states_7_pad_0, pad_type = hidden_states_7_pad_type_0, strides = var_698, weight = layers_2_fc2_weight_to_fp16, x = input_29_cast_fp16)[name = tensor("hidden_states_7_cast_fp16")]; + tensor hidden_states_7_cast_fp16 = conv(bias = layers_2_fc2_bias_to_fp16, dilations = hidden_states_7_dilations_0, groups = hidden_states_7_groups_0, pad = hidden_states_7_pad_0, pad_type = hidden_states_7_pad_type_0, strides = hidden_states_7_strides_0, weight = layers_2_fc2_weight_to_fp16, x = input_29_cast_fp16)[name = tensor("hidden_states_7_cast_fp16")]; tensor inputs_19_cast_fp16 = add(x = inputs_17_cast_fp16, y = hidden_states_7_cast_fp16)[name = tensor("inputs_19_cast_fp16")]; tensor var_714 = const()[name = tensor("op_714"), val = tensor(3)]; - tensor var_721 = const()[name = tensor("op_721"), val = tensor(1)]; tensor out_19_axes_0 = const()[name = tensor("out_19_axes_0"), val = tensor([1])]; tensor var_740_to_fp16 = const()[name = tensor("op_740_to_fp16"), val = tensor(0x1.5p-17)]; tensor out_19_cast_fp16 = layer_norm(axes = out_19_axes_0, epsilon = var_740_to_fp16, x = inputs_19_cast_fp16)[name = tensor("out_19_cast_fp16")]; @@ -453,26 +479,29 @@ program(1.0) tensor obj_43_beta_0_to_fp16 = const()[name = tensor("obj_43_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54377856)))]; tensor obj_43_epsilon_0_to_fp16 = const()[name = tensor("obj_43_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; tensor obj_43_cast_fp16 = batch_norm(beta = obj_43_beta_0_to_fp16, epsilon = obj_43_epsilon_0_to_fp16, gamma = obj_43_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_19_cast_fp16)[name = tensor("obj_43_cast_fp16")]; - tensor var_756 = const()[name = tensor("op_756"), val = tensor([1, 1])]; - tensor var_758 = const()[name = tensor("op_758"), val = tensor([1, 1])]; - tensor query_13_pad_type_0 = const()[name = tensor("query_13_pad_type_0"), val = tensor("custom")]; + tensor query_13_pad_type_0 = const()[name = tensor("query_13_pad_type_0"), val = tensor("valid")]; + tensor query_13_strides_0 = const()[name = tensor("query_13_strides_0"), val = tensor([1, 1])]; tensor query_13_pad_0 = const()[name = tensor("query_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor query_13_dilations_0 = const()[name = tensor("query_13_dilations_0"), val = tensor([1, 1])]; + tensor query_13_groups_0 = const()[name = tensor("query_13_groups_0"), val = tensor(1)]; tensor layers_3_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_3_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54378688)))]; tensor layers_3_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_3_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54673664)))]; - tensor query_13_cast_fp16 = conv(bias = layers_3_self_attn_q_proj_bias_to_fp16, dilations = var_758, groups = var_721, pad = query_13_pad_0, pad_type = query_13_pad_type_0, strides = var_756, weight = layers_3_self_attn_q_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = tensor("query_13_cast_fp16")]; - tensor var_762 = const()[name = tensor("op_762"), val = tensor([1, 1])]; - tensor var_764 = const()[name = tensor("op_764"), val = tensor([1, 1])]; - tensor current_key_pad_type_0 = const()[name = tensor("current_key_pad_type_0"), val = tensor("custom")]; + tensor query_13_cast_fp16 = conv(bias = layers_3_self_attn_q_proj_bias_to_fp16, dilations = query_13_dilations_0, groups = query_13_groups_0, pad = query_13_pad_0, pad_type = query_13_pad_type_0, strides = query_13_strides_0, weight = layers_3_self_attn_q_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = tensor("query_13_cast_fp16")]; + tensor current_key_pad_type_0 = const()[name = tensor("current_key_pad_type_0"), val = tensor("valid")]; + tensor current_key_strides_0 = const()[name = tensor("current_key_strides_0"), val = tensor([1, 1])]; tensor current_key_pad_0 = const()[name = tensor("current_key_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor current_key_dilations_0 = const()[name = tensor("current_key_dilations_0"), val = tensor([1, 1])]; + tensor current_key_groups_0 = const()[name = tensor("current_key_groups_0"), val = tensor(1)]; tensor layers_3_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_3_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54674496)))]; - tensor current_key_cast_fp16 = conv(dilations = var_764, groups = var_721, pad = current_key_pad_0, pad_type = current_key_pad_type_0, strides = var_762, weight = layers_3_self_attn_k_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = tensor("current_key_cast_fp16")]; - tensor var_769 = const()[name = tensor("op_769"), val = tensor([1, 1])]; - tensor var_771 = const()[name = tensor("op_771"), val = tensor([1, 1])]; - tensor current_value_pad_type_0 = const()[name = tensor("current_value_pad_type_0"), val = tensor("custom")]; + tensor current_key_cast_fp16 = conv(dilations = current_key_dilations_0, groups = current_key_groups_0, pad = current_key_pad_0, pad_type = current_key_pad_type_0, strides = current_key_strides_0, weight = layers_3_self_attn_k_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = tensor("current_key_cast_fp16")]; + tensor current_value_pad_type_0 = const()[name = tensor("current_value_pad_type_0"), val = tensor("valid")]; + tensor current_value_strides_0 = const()[name = tensor("current_value_strides_0"), val = tensor([1, 1])]; tensor current_value_pad_0 = const()[name = tensor("current_value_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor current_value_dilations_0 = const()[name = tensor("current_value_dilations_0"), val = tensor([1, 1])]; + tensor current_value_groups_0 = const()[name = tensor("current_value_groups_0"), val = tensor(1)]; tensor layers_3_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_3_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54969472)))]; tensor layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55264448)))]; - tensor current_value_cast_fp16 = conv(bias = layers_3_self_attn_v_proj_bias_to_fp16, dilations = var_771, groups = var_721, pad = current_value_pad_0, pad_type = current_value_pad_type_0, strides = var_769, weight = layers_3_self_attn_v_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = tensor("current_value_cast_fp16")]; + tensor current_value_cast_fp16 = conv(bias = layers_3_self_attn_v_proj_bias_to_fp16, dilations = current_value_dilations_0, groups = current_value_groups_0, pad = current_value_pad_0, pad_type = current_value_pad_type_0, strides = current_value_strides_0, weight = layers_3_self_attn_v_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = tensor("current_value_cast_fp16")]; tensor var_778_cast_fp16 = mul(x = current_key_cast_fp16, y = var_126_cast_fp16)[name = tensor("op_778_cast_fp16")]; tensor var_780_cast_fp16 = mul(x = var_47_cast_fp16_3, y = var_129_cast_fp16)[name = tensor("op_780_cast_fp16")]; tensor key_13_cast_fp16 = add(x = var_778_cast_fp16, y = var_780_cast_fp16)[name = tensor("key_13_cast_fp16")]; @@ -497,13 +526,14 @@ program(1.0) tensor attn_13_cast_fp16 = matmul(transpose_x = attn_13_transpose_x_0, transpose_y = attn_13_transpose_y_0, x = var_802_cast_fp16, y = var_800_cast_fp16)[name = tensor("attn_13_cast_fp16")]; tensor var_805 = const()[name = tensor("op_805"), val = tensor([1, 384, 1, -1])]; tensor input_31_cast_fp16 = reshape(shape = var_805, x = attn_13_cast_fp16)[name = tensor("input_31_cast_fp16")]; - tensor var_809 = const()[name = tensor("op_809"), val = tensor([1, 1])]; - tensor var_811 = const()[name = tensor("op_811"), val = tensor([1, 1])]; - tensor obj_49_pad_type_0 = const()[name = tensor("obj_49_pad_type_0"), val = tensor("custom")]; + tensor obj_49_pad_type_0 = const()[name = tensor("obj_49_pad_type_0"), val = tensor("valid")]; + tensor obj_49_strides_0 = const()[name = tensor("obj_49_strides_0"), val = tensor([1, 1])]; tensor obj_49_pad_0 = const()[name = tensor("obj_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor obj_49_dilations_0 = const()[name = tensor("obj_49_dilations_0"), val = tensor([1, 1])]; + tensor obj_49_groups_0 = const()[name = tensor("obj_49_groups_0"), val = tensor(1)]; tensor layers_3_self_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_3_self_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55265280)))]; tensor layers_3_self_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_3_self_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55560256)))]; - tensor obj_49_cast_fp16 = conv(bias = layers_3_self_attn_o_proj_bias_to_fp16, dilations = var_811, groups = var_721, pad = obj_49_pad_0, pad_type = obj_49_pad_type_0, strides = var_809, weight = layers_3_self_attn_o_proj_weight_to_fp16, x = input_31_cast_fp16)[name = tensor("obj_49_cast_fp16")]; + tensor obj_49_cast_fp16 = conv(bias = layers_3_self_attn_o_proj_bias_to_fp16, dilations = obj_49_dilations_0, groups = obj_49_groups_0, pad = obj_49_pad_0, pad_type = obj_49_pad_type_0, strides = obj_49_strides_0, weight = layers_3_self_attn_o_proj_weight_to_fp16, x = input_31_cast_fp16)[name = tensor("obj_49_cast_fp16")]; tensor inputs_21_cast_fp16 = add(x = inputs_19_cast_fp16, y = obj_49_cast_fp16)[name = tensor("inputs_21_cast_fp16")]; tensor out_21_axes_0 = const()[name = tensor("out_21_axes_0"), val = tensor([1])]; tensor var_827_to_fp16 = const()[name = tensor("op_827_to_fp16"), val = tensor(0x1.5p-17)]; @@ -512,26 +542,29 @@ program(1.0) tensor obj_51_beta_0_to_fp16 = const()[name = tensor("obj_51_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55561920)))]; tensor obj_51_epsilon_0_to_fp16 = const()[name = tensor("obj_51_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; tensor obj_51_cast_fp16 = batch_norm(beta = obj_51_beta_0_to_fp16, epsilon = obj_51_epsilon_0_to_fp16, gamma = obj_51_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_21_cast_fp16)[name = tensor("obj_51_cast_fp16")]; - tensor var_843 = const()[name = tensor("op_843"), val = tensor([1, 1])]; - tensor var_845 = const()[name = tensor("op_845"), val = tensor([1, 1])]; - tensor query_pad_type_0 = const()[name = tensor("query_pad_type_0"), val = tensor("custom")]; + tensor query_pad_type_0 = const()[name = tensor("query_pad_type_0"), val = tensor("valid")]; + tensor query_strides_0 = const()[name = tensor("query_strides_0"), val = tensor([1, 1])]; tensor query_pad_0 = const()[name = tensor("query_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor query_dilations_0 = const()[name = tensor("query_dilations_0"), val = tensor([1, 1])]; + tensor query_groups_0 = const()[name = tensor("query_groups_0"), val = tensor(1)]; tensor layers_3_encoder_attn_q_proj_weight_to_fp16 = const()[name = tensor("layers_3_encoder_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55562752)))]; tensor layers_3_encoder_attn_q_proj_bias_to_fp16 = const()[name = tensor("layers_3_encoder_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55857728)))]; - tensor query_cast_fp16 = conv(bias = layers_3_encoder_attn_q_proj_bias_to_fp16, dilations = var_845, groups = var_721, pad = query_pad_0, pad_type = query_pad_type_0, strides = var_843, weight = layers_3_encoder_attn_q_proj_weight_to_fp16, x = obj_51_cast_fp16)[name = tensor("query_cast_fp16")]; - tensor var_849 = const()[name = tensor("op_849"), val = tensor([1, 1])]; - tensor var_851 = const()[name = tensor("op_851"), val = tensor([1, 1])]; - tensor key_pad_type_0 = const()[name = tensor("key_pad_type_0"), val = tensor("custom")]; + tensor query_cast_fp16 = conv(bias = layers_3_encoder_attn_q_proj_bias_to_fp16, dilations = query_dilations_0, groups = query_groups_0, pad = query_pad_0, pad_type = query_pad_type_0, strides = query_strides_0, weight = layers_3_encoder_attn_q_proj_weight_to_fp16, x = obj_51_cast_fp16)[name = tensor("query_cast_fp16")]; + tensor key_pad_type_0 = const()[name = tensor("key_pad_type_0"), val = tensor("valid")]; + tensor key_strides_0 = const()[name = tensor("key_strides_0"), val = tensor([1, 1])]; tensor key_pad_0 = const()[name = tensor("key_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor key_dilations_0 = const()[name = tensor("key_dilations_0"), val = tensor([1, 1])]; + tensor key_groups_0 = const()[name = tensor("key_groups_0"), val = tensor(1)]; tensor layers_3_encoder_attn_k_proj_weight_to_fp16 = const()[name = tensor("layers_3_encoder_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55858560)))]; - tensor key_cast_fp16 = conv(dilations = var_851, groups = var_721, pad = key_pad_0, pad_type = key_pad_type_0, strides = var_849, weight = layers_3_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("key_cast_fp16")]; - tensor var_856 = const()[name = tensor("op_856"), val = tensor([1, 1])]; - tensor var_858 = const()[name = tensor("op_858"), val = tensor([1, 1])]; - tensor value_pad_type_0 = const()[name = tensor("value_pad_type_0"), val = tensor("custom")]; + tensor key_cast_fp16 = conv(dilations = key_dilations_0, groups = key_groups_0, pad = key_pad_0, pad_type = key_pad_type_0, strides = key_strides_0, weight = layers_3_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("key_cast_fp16")]; + tensor value_pad_type_0 = const()[name = tensor("value_pad_type_0"), val = tensor("valid")]; + tensor value_strides_0 = const()[name = tensor("value_strides_0"), val = tensor([1, 1])]; tensor value_pad_0 = const()[name = tensor("value_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor value_dilations_0 = const()[name = tensor("value_dilations_0"), val = tensor([1, 1])]; + tensor value_groups_0 = const()[name = tensor("value_groups_0"), val = tensor(1)]; tensor layers_3_encoder_attn_v_proj_weight_to_fp16 = const()[name = tensor("layers_3_encoder_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56153536)))]; tensor layers_3_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor("layers_3_encoder_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56448512)))]; - tensor value_cast_fp16 = conv(bias = layers_3_encoder_attn_v_proj_bias_to_fp16, dilations = var_858, groups = var_721, pad = value_pad_0, pad_type = value_pad_type_0, strides = var_856, weight = layers_3_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("value_cast_fp16")]; + tensor value_cast_fp16 = conv(bias = layers_3_encoder_attn_v_proj_bias_to_fp16, dilations = value_dilations_0, groups = value_groups_0, pad = value_pad_0, pad_type = value_pad_type_0, strides = value_strides_0, weight = layers_3_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor("value_cast_fp16")]; tensor var_862 = const()[name = tensor("op_862"), val = tensor([1, 6, 64, -1])]; tensor mh_q_cast_fp16 = reshape(shape = var_862, x = query_cast_fp16)[name = tensor("mh_q_cast_fp16")]; tensor var_864_to_fp16 = const()[name = tensor("op_864_to_fp16"), val = tensor(0x1p-3)]; @@ -549,13 +582,14 @@ program(1.0) tensor attn_cast_fp16 = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_872_cast_fp16, y = obj_55_cast_fp16)[name = tensor("attn_cast_fp16")]; tensor var_875 = const()[name = tensor("op_875"), val = tensor([1, 384, 1, -1])]; tensor input_33_cast_fp16 = reshape(shape = var_875, x = attn_cast_fp16)[name = tensor("input_33_cast_fp16")]; - tensor var_879 = const()[name = tensor("op_879"), val = tensor([1, 1])]; - tensor var_881 = const()[name = tensor("op_881"), val = tensor([1, 1])]; - tensor obj_53_pad_type_0 = const()[name = tensor("obj_53_pad_type_0"), val = tensor("custom")]; + tensor obj_53_pad_type_0 = const()[name = tensor("obj_53_pad_type_0"), val = tensor("valid")]; + tensor obj_53_strides_0 = const()[name = tensor("obj_53_strides_0"), val = tensor([1, 1])]; tensor obj_53_pad_0 = const()[name = tensor("obj_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor obj_53_dilations_0 = const()[name = tensor("obj_53_dilations_0"), val = tensor([1, 1])]; + tensor obj_53_groups_0 = const()[name = tensor("obj_53_groups_0"), val = tensor(1)]; tensor layers_3_encoder_attn_o_proj_weight_to_fp16 = const()[name = tensor("layers_3_encoder_attn_o_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56449344)))]; tensor layers_3_encoder_attn_o_proj_bias_to_fp16 = const()[name = tensor("layers_3_encoder_attn_o_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56744320)))]; - tensor obj_53_cast_fp16 = conv(bias = layers_3_encoder_attn_o_proj_bias_to_fp16, dilations = var_881, groups = var_721, pad = obj_53_pad_0, pad_type = obj_53_pad_type_0, strides = var_879, weight = layers_3_encoder_attn_o_proj_weight_to_fp16, x = input_33_cast_fp16)[name = tensor("obj_53_cast_fp16")]; + tensor obj_53_cast_fp16 = conv(bias = layers_3_encoder_attn_o_proj_bias_to_fp16, dilations = obj_53_dilations_0, groups = obj_53_groups_0, pad = obj_53_pad_0, pad_type = obj_53_pad_type_0, strides = obj_53_strides_0, weight = layers_3_encoder_attn_o_proj_weight_to_fp16, x = input_33_cast_fp16)[name = tensor("obj_53_cast_fp16")]; tensor inputs_23_cast_fp16 = add(x = inputs_21_cast_fp16, y = obj_53_cast_fp16)[name = tensor("inputs_23_cast_fp16")]; tensor out_23_axes_0 = const()[name = tensor("out_23_axes_0"), val = tensor([1])]; tensor var_896_to_fp16 = const()[name = tensor("op_896_to_fp16"), val = tensor(0x1.5p-17)]; @@ -564,22 +598,24 @@ program(1.0) tensor input_35_beta_0_to_fp16 = const()[name = tensor("input_35_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56745984)))]; tensor input_35_epsilon_0_to_fp16 = const()[name = tensor("input_35_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; tensor input_35_cast_fp16 = batch_norm(beta = input_35_beta_0_to_fp16, epsilon = input_35_epsilon_0_to_fp16, gamma = input_35_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_23_cast_fp16)[name = tensor("input_35_cast_fp16")]; - tensor var_908 = const()[name = tensor("op_908"), val = tensor([1, 1])]; - tensor var_910 = const()[name = tensor("op_910"), val = tensor([1, 1])]; - tensor input_37_pad_type_0 = const()[name = tensor("input_37_pad_type_0"), val = tensor("custom")]; + tensor input_37_pad_type_0 = const()[name = tensor("input_37_pad_type_0"), val = tensor("valid")]; + tensor input_37_strides_0 = const()[name = tensor("input_37_strides_0"), val = tensor([1, 1])]; tensor input_37_pad_0 = const()[name = tensor("input_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor input_37_dilations_0 = const()[name = tensor("input_37_dilations_0"), val = tensor([1, 1])]; + tensor input_37_groups_0 = const()[name = tensor("input_37_groups_0"), val = tensor(1)]; tensor layers_3_fc1_weight_to_fp16 = const()[name = tensor("layers_3_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56746816)))]; tensor layers_3_fc1_bias_to_fp16 = const()[name = tensor("layers_3_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(57926528)))]; - tensor input_37_cast_fp16 = conv(bias = layers_3_fc1_bias_to_fp16, dilations = var_910, groups = var_721, pad = input_37_pad_0, pad_type = input_37_pad_type_0, strides = var_908, weight = layers_3_fc1_weight_to_fp16, x = input_35_cast_fp16)[name = tensor("input_37_cast_fp16")]; + tensor input_37_cast_fp16 = conv(bias = layers_3_fc1_bias_to_fp16, dilations = input_37_dilations_0, groups = input_37_groups_0, pad = input_37_pad_0, pad_type = input_37_pad_type_0, strides = input_37_strides_0, weight = layers_3_fc1_weight_to_fp16, x = input_35_cast_fp16)[name = tensor("input_37_cast_fp16")]; tensor input_mode_0 = const()[name = tensor("input_mode_0"), val = tensor("EXACT")]; tensor input_cast_fp16 = gelu(mode = input_mode_0, x = input_37_cast_fp16)[name = tensor("input_cast_fp16")]; - tensor var_916 = const()[name = tensor("op_916"), val = tensor([1, 1])]; - tensor var_918 = const()[name = tensor("op_918"), val = tensor([1, 1])]; - tensor hidden_states_9_pad_type_0 = const()[name = tensor("hidden_states_9_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_9_pad_type_0 = const()[name = tensor("hidden_states_9_pad_type_0"), val = tensor("valid")]; + tensor hidden_states_9_strides_0 = const()[name = tensor("hidden_states_9_strides_0"), val = tensor([1, 1])]; tensor hidden_states_9_pad_0 = const()[name = tensor("hidden_states_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor hidden_states_9_dilations_0 = const()[name = tensor("hidden_states_9_dilations_0"), val = tensor([1, 1])]; + tensor hidden_states_9_groups_0 = const()[name = tensor("hidden_states_9_groups_0"), val = tensor(1)]; tensor layers_3_fc2_weight_to_fp16 = const()[name = tensor("layers_3_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(57929664)))]; tensor layers_3_fc2_bias_to_fp16 = const()[name = tensor("layers_3_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(59109376)))]; - tensor hidden_states_9_cast_fp16 = conv(bias = layers_3_fc2_bias_to_fp16, dilations = var_918, groups = var_721, pad = hidden_states_9_pad_0, pad_type = hidden_states_9_pad_type_0, strides = var_916, weight = layers_3_fc2_weight_to_fp16, x = input_cast_fp16)[name = tensor("hidden_states_9_cast_fp16")]; + tensor hidden_states_9_cast_fp16 = conv(bias = layers_3_fc2_bias_to_fp16, dilations = hidden_states_9_dilations_0, groups = hidden_states_9_groups_0, pad = hidden_states_9_pad_0, pad_type = hidden_states_9_pad_type_0, strides = hidden_states_9_strides_0, weight = layers_3_fc2_weight_to_fp16, x = input_cast_fp16)[name = tensor("hidden_states_9_cast_fp16")]; tensor inputs_cast_fp16 = add(x = inputs_23_cast_fp16, y = hidden_states_9_cast_fp16)[name = tensor("inputs_cast_fp16")]; tensor out_axes_0 = const()[name = tensor("out_axes_0"), val = tensor([1])]; tensor var_939_to_fp16 = const()[name = tensor("op_939_to_fp16"), val = tensor(0x1.5p-17)]; @@ -675,8 +711,8 @@ program(1.0) tensor var_1107 = const()[name = tensor("op_1107"), val = tensor(1)]; tensor var_1108_interleave_0 = const()[name = tensor("op_1108_interleave_0"), val = tensor(false)]; tensor var_1108_cast_fp16 = concat(axis = var_1107, interleave = var_1108_interleave_0, values = (var_974_cast_fp16, var_992_cast_fp16, var_1010_cast_fp16, var_1028_cast_fp16, var_1046_cast_fp16, var_1064_cast_fp16, var_1082_cast_fp16, var_1100_cast_fp16))[name = tensor("op_1108_cast_fp16")]; - tensor var_1110 = const()[name = tensor("op_1110"), val = tensor([1])]; tensor var_1111 = const()[name = tensor("op_1111"), val = tensor(false)]; - tensor alignment_heads_weights = reduce_mean(axes = var_1110, keep_dims = var_1111, x = var_1108_cast_fp16)[name = tensor("obj_cast_fp16")]; + tensor obj_axes_0 = const()[name = tensor("obj_axes_0"), val = tensor([1])]; + tensor alignment_heads_weights = reduce_mean(axes = obj_axes_0, keep_dims = var_1111, x = var_1108_cast_fp16)[name = tensor("obj_cast_fp16")]; } -> (logits, key_cache_updates, value_cache_updates, alignment_heads_weights); } \ No newline at end of file