diff --git "a/debug/debug-phase1.py" "b/debug/debug-phase1.py" new file mode 100644--- /dev/null +++ "b/debug/debug-phase1.py" @@ -0,0 +1,10152 @@ +# from tvm.script import ir as I +# from tvm.script import tir as T +# from tvm.script import relax as R + +@I.ir_module +class Module: + I.module_attrs({"external_mods": [metadata["runtime.Module"][0], metadata["runtime.Module"][1], metadata["runtime.Module"][2], metadata["runtime.Module"][3], metadata["runtime.Module"][4], metadata["runtime.Module"][5], metadata["runtime.Module"][6], metadata["runtime.Module"][7], metadata["runtime.Module"][8], metadata["runtime.Module"][9], metadata["runtime.Module"][10], metadata["runtime.Module"][11], metadata["runtime.Module"][12], metadata["runtime.Module"][13], metadata["runtime.Module"][14]]}) + @T.prim_func(private=True) + def NT_matmul(layer_norm356: T.Buffer((T.int64(1), T.int64(1), T.int64(1280)), "float16"), model_decoder_layers_0_self_attn_q_proj_weight5: T.Buffer((T.int64(1280), T.int64(1280)), "float16"), NT_matmul: T.Buffer((T.int64(1), T.int64(1), T.int64(1280)), "float16")): + T.func_attr({"tir.noalias": T.bool(True)}) + # with T.block("root"): + for i0, i1, i2, k in T.grid(T.int64(1), T.int64(1), T.int64(1280), T.int64(1280)): + with T.block("NT_matmul"): + v_i0, v_i1, v_i2, v_k = T.axis.remap("SSSR", [i0, i1, i2, k]) + T.reads(layer_norm356[v_i0, v_i1, v_k], model_decoder_layers_0_self_attn_q_proj_weight5[v_i2, v_k]) + T.writes(NT_matmul[v_i0, v_i1, v_i2]) + with T.init(): + NT_matmul[v_i0, v_i1, v_i2] = T.float16(0) + NT_matmul[v_i0, v_i1, v_i2] = NT_matmul[v_i0, v_i1, v_i2] + layer_norm356[v_i0, v_i1, v_k] * model_decoder_layers_0_self_attn_q_proj_weight5[v_i2, v_k] + + @T.prim_func(private=True) + def NT_matmul1(layer_norm358: T.Buffer((T.int64(1), T.int64(1), T.int64(1280)), "float16"), model_decoder_layers_0_fc1_weight5: T.Buffer((T.int64(5120), T.int64(1280)), "float16"), NT_matmul: T.Buffer((T.int64(1), T.int64(1), T.int64(5120)), "float16")): + T.func_attr({"tir.noalias": T.bool(True)}) + # with T.block("root"): + for i0, i1, i2, k in T.grid(T.int64(1), T.int64(1), T.int64(5120), T.int64(1280)): + with T.block("NT_matmul"): + v_i0, v_i1, v_i2, v_k = T.axis.remap("SSSR", [i0, i1, i2, k]) + T.reads(layer_norm358[v_i0, v_i1, v_k], model_decoder_layers_0_fc1_weight5[v_i2, v_k]) + T.writes(NT_matmul[v_i0, v_i1, v_i2]) + with T.init(): + NT_matmul[v_i0, v_i1, v_i2] = T.float16(0) + NT_matmul[v_i0, v_i1, v_i2] = NT_matmul[v_i0, v_i1, v_i2] + layer_norm358[v_i0, v_i1, v_k] * model_decoder_layers_0_fc1_weight5[v_i2, v_k] + + @T.prim_func(private=True) + def NT_matmul2(gelu130: T.Buffer((T.int64(1), T.int64(1), T.int64(5120)), "float16"), model_decoder_layers_0_fc2_weight5: T.Buffer((T.int64(1280), T.int64(5120)), "float16"), NT_matmul: T.Buffer((T.int64(1), T.int64(1), T.int64(1280)), "float16")): + T.func_attr({"tir.noalias": T.bool(True)}) + # with T.block("root"): + for i0, i1, i2, k in T.grid(T.int64(1), T.int64(1), T.int64(1280), T.int64(5120)): + with T.block("NT_matmul"): + v_i0, v_i1, v_i2, v_k = T.axis.remap("SSSR", [i0, i1, i2, k]) + T.reads(gelu130[v_i0, v_i1, v_k], model_decoder_layers_0_fc2_weight5[v_i2, v_k]) + T.writes(NT_matmul[v_i0, v_i1, v_i2]) + with T.init(): + NT_matmul[v_i0, v_i1, v_i2] = T.float16(0) + NT_matmul[v_i0, v_i1, v_i2] = NT_matmul[v_i0, v_i1, v_i2] + gelu130[v_i0, v_i1, v_k] * model_decoder_layers_0_fc2_weight5[v_i2, v_k] + + @T.prim_func(private=True) + def NT_matmul3(layer_norm452: T.Buffer((T.int64(1), T.int64(1), T.int64(1280)), "float16"), model_decoder_embed_tokens_weight5: T.Buffer((T.int64(51866), T.int64(1280)), "float16"), NT_matmul: T.Buffer((T.int64(1), T.int64(1), T.int64(51866)), "float32")): + T.func_attr({"tir.noalias": T.bool(True)}) + # with T.block("root"): + for i0, i1, i2, k in T.grid(T.int64(1), T.int64(1), T.int64(51866), T.int64(1280)): + with T.block("NT_matmul"): + v_i0, v_i1, v_i2, v_k = T.axis.remap("SSSR", [i0, i1, i2, k]) + T.reads(layer_norm452[v_i0, v_i1, v_k], model_decoder_embed_tokens_weight5[v_i2, v_k]) + T.writes(NT_matmul[v_i0, v_i1, v_i2]) + with T.init(): + NT_matmul[v_i0, v_i1, v_i2] = T.float32(0) + NT_matmul[v_i0, v_i1, v_i2] = NT_matmul[v_i0, v_i1, v_i2] + T.Cast("float32", layer_norm452[v_i0, v_i1, v_k]) * T.Cast("float32", model_decoder_embed_tokens_weight5[v_i2, v_k]) + + @T.prim_func + def apply_bitmask_inplace(var_logits: T.handle, var_seq_ids: T.handle, var_bitmask: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": T.bool(True), "tir.noalias": T.bool(True)}) + batch_size, vocab_size = T.int32(is_size_var=True), T.int32(is_size_var=True) + logits = T.match_buffer(var_logits, (batch_size, vocab_size)) + num_seq = T.int32(is_size_var=True) + seq_ids = T.match_buffer(var_seq_ids, (num_seq,), "int32") + bitmask = T.match_buffer(var_bitmask, (batch_size, (vocab_size + 31) // 32), "int32") + # with T.block("root"): + for fused_s_v_0 in T.thread_binding((num_seq * vocab_size + 1023) // 1024, thread="blockIdx.x"): + for fused_s_v_1 in T.thread_binding(1024, thread="threadIdx.x"): + with T.block("block"): + vs = T.axis.spatial(num_seq, (fused_s_v_0 * 1024 + fused_s_v_1) // vocab_size) + vv = T.axis.spatial(vocab_size, (fused_s_v_0 * 1024 + fused_s_v_1) % vocab_size) + T.where(fused_s_v_0 * 1024 + fused_s_v_1 < num_seq * vocab_size) + T.reads(bitmask[seq_ids[vs], vv // 32], seq_ids[vs], logits[seq_ids[vs], vv]) + T.writes(logits[seq_ids[vs], vv]) + logits[seq_ids[vs], vv] = T.if_then_else(T.bitwise_and(T.shift_right(bitmask[seq_ids[vs], vv // 32], vv % 32), 1) == 1, logits[seq_ids[vs], vv], T.float32(-3.4028234663852886e+38)) + + @T.prim_func + def apply_logit_bias_inplace(var_logits: T.handle, var_pos2seq_id: T.handle, var_token_ids: T.handle, var_logit_bias: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": T.bool(True), "tir.noalias": T.bool(True)}) + batch_size, vocab_size = T.int32(is_size_var=True), T.int32(is_size_var=True) + logits = T.match_buffer(var_logits, (batch_size, vocab_size)) + num_token = T.int32(is_size_var=True) + pos2seq_id = T.match_buffer(var_pos2seq_id, (num_token,), "int32") + token_ids = T.match_buffer(var_token_ids, (num_token,), "int32") + logit_bias = T.match_buffer(var_logit_bias, (num_token,)) + # with T.block("root"): + for p0 in T.thread_binding((num_token + 1023) // 1024, thread="blockIdx.x"): + for p1 in T.thread_binding(1024, thread="threadIdx.x"): + with T.block("block"): + vp = T.axis.spatial(num_token, p0 * 1024 + p1) + T.where(p0 * 1024 + p1 < num_token) + T.reads(logits[pos2seq_id[vp], token_ids[vp]], pos2seq_id[vp], token_ids[vp], logit_bias[vp]) + T.writes(logits[pos2seq_id[vp], token_ids[vp]]) + logits[pos2seq_id[vp], token_ids[vp]] = logits[pos2seq_id[vp], token_ids[vp]] + logit_bias[vp] + + @T.prim_func + def apply_penalty_inplace(var_logits: T.handle, var_seq_ids: T.handle, var_pos2seq_id: T.handle, var_token_ids: T.handle, var_token_cnt: T.handle, var_penalties: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": T.bool(True), "tir.noalias": T.bool(True)}) + batch_size, vocab_size = T.int32(is_size_var=True), T.int32(is_size_var=True) + logits = T.match_buffer(var_logits, (batch_size, vocab_size)) + num_seq = T.int32(is_size_var=True) + seq_ids = T.match_buffer(var_seq_ids, (num_seq,), "int32") + num_token = T.int32(is_size_var=True) + pos2seq_id = T.match_buffer(var_pos2seq_id, (num_token,), "int32") + token_ids = T.match_buffer(var_token_ids, (num_token,), "int32") + token_cnt = T.match_buffer(var_token_cnt, (num_token,), "int32") + penalties = T.match_buffer(var_penalties, (num_seq, 3)) + # with T.block("root"): + for p0 in T.thread_binding((num_token + 1023) // 1024, thread="blockIdx.x"): + for p1 in T.thread_binding(1024, thread="threadIdx.x"): + with T.block("block"): + vp = T.axis.spatial(num_token, p0 * 1024 + p1) + T.where(p0 * 1024 + p1 < num_token) + T.reads(logits[seq_ids[pos2seq_id[vp]], token_ids[vp]], seq_ids[pos2seq_id[vp]], pos2seq_id[vp], token_ids[vp], penalties[pos2seq_id[vp], 0:3], token_cnt[vp]) + T.writes(logits[seq_ids[pos2seq_id[vp]], token_ids[vp]]) + logits[seq_ids[pos2seq_id[vp]], token_ids[vp]] = logits[seq_ids[pos2seq_id[vp]], token_ids[vp]] - (penalties[pos2seq_id[vp], 0] + T.Cast("float32", token_cnt[vp]) * penalties[pos2seq_id[vp], 1]) + logits[seq_ids[pos2seq_id[vp]], token_ids[vp]] = T.if_then_else(logits[seq_ids[pos2seq_id[vp]], token_ids[vp]] > T.float32(0), logits[seq_ids[pos2seq_id[vp]], token_ids[vp]] * penalties[pos2seq_id[vp], 2], logits[seq_ids[pos2seq_id[vp]], token_ids[vp]] / penalties[pos2seq_id[vp], 2]) + + @T.prim_func + def batch_decode_paged_kv(_0: T.int32, Q_handle: T.handle, pages_handle: T.handle, page_table_indptr_handle: T.handle, page_table_values_handle: T.handle, var_length_info: T.handle, k_rope_pos_offset_handle: T.handle, q_rope_position_handle: T.handle, output_handle: T.handle, lse_handle: T.handle, rotary_mode: T.int32, rope_scale: T.float32, rope_theta: T.float32, attn_score_scaling_factor: T.float32): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) + B = T.int32(is_size_var=True) + Q = T.match_buffer(Q_handle, (B, 20, 64), "float16") + max_num_pages = T.int32(is_size_var=True) + pages = T.match_buffer(pages_handle, (max_num_pages, 2, 20, 16, 64), "float16") + page_table_indptr = T.match_buffer(page_table_indptr_handle, (B + 1,), "int32", offset_factor=1) + nnz_pages = T.int32(is_size_var=True) + page_table_values = T.match_buffer(page_table_values_handle, (nnz_pages,), "int32", offset_factor=1) + length_info = T.match_buffer(var_length_info, (B,), "int32", offset_factor=1) + k_rope_pos_offset = T.match_buffer(k_rope_pos_offset_handle, (B,), "int32", offset_factor=1) + q_rope_position = T.match_buffer(q_rope_position_handle, (B,), "int32", offset_factor=1) + output = T.match_buffer(output_handle, (B, 20, 64), "float16") + lse = T.match_buffer(lse_handle, (B, 20)) + # with T.block("root"): + sm_scale: T.float32 = T.float32(0.18033688011112042) + for bx in T.thread_binding(B, thread="blockIdx.x"): + for fused_by_bz in T.thread_binding(20, thread="blockIdx.y"): + for ty in T.thread_binding(1, thread="threadIdx.y"): + for tx in T.thread_binding(16, thread="threadIdx.x"): + for tz in T.thread_binding(32, thread="threadIdx.z"): + with T.block("attn"): + T.reads(page_table_indptr[bx:bx + 2], length_info[bx], q_rope_position[bx], Q[bx, fused_by_bz // 20 + ty + fused_by_bz % 20, tx * 4 - 32:tx * 4 - 32 + 68]) + T.writes(output[bx, fused_by_bz % 20 + fused_by_bz // 20 + ty, tx * 4:tx * 4 + 4], lse[bx, fused_by_bz % 20 + fused_by_bz // 20 + ty]) + Q_local = T.alloc_buffer((4,), "float16", scope="local") + kv_chunk_len = T.alloc_buffer((1,), "int32", scope="local") + K_smem = T.alloc_buffer((64, 64), "float16", scope="shared") + V_smem = T.alloc_buffer((64, 64), "float16", scope="shared") + O_allreduce = T.alloc_buffer((32, 1, 64), scope="shared") + md_allreduce = T.alloc_buffer((32, 1, 2), scope="shared") + S_reduce_local = T.alloc_buffer((1,), scope="local") + t0 = T.alloc_buffer((1,), scope="local") + S_local = T.alloc_buffer((2,), scope="local") + QK_local = T.alloc_buffer((4,), scope="local") + V_local = T.alloc_buffer((4,), "float16", scope="local") + m_prev = T.alloc_buffer((1,), scope="local") + d_prev = T.alloc_buffer((1,), scope="local") + other_m = T.alloc_buffer((1,), scope="local") + other_d = T.alloc_buffer((1,), scope="local") + exp_mprev = T.alloc_buffer((1,), scope="local") + exp_otherm = T.alloc_buffer((1,), scope="local") + other_o = T.alloc_buffer((4,), scope="local") + st_m = T.alloc_buffer((1,), scope="local") + st_d = T.alloc_buffer((1,), scope="local") + O_local = T.alloc_buffer((4,), scope="local") + by: T.int32 = fused_by_bz % 20 + bz: T.int32 = fused_by_bz // 20 + batch_idx: T.int32 = bx + cur_page_indptr_begin: T.int32 = page_table_indptr[batch_idx] + cur_page_indptr_end: T.int32 = page_table_indptr[batch_idx + 1] + kv_chunk_len[0] = T.if_then_else(cur_page_indptr_begin != cur_page_indptr_end, (cur_page_indptr_end - cur_page_indptr_begin - 1) * 16 + length_info[batch_idx], 0) + st_m[0] = T.float32(-50000) + st_d[0] = T.float32(1) + for vec in T.vectorized(4): + O_local[vec] = T.float32(0) + for vec in T.vectorized(4): + Q_local[vec] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[batch_idx]) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", Q[bx, by + bz + ty, tx * 4 + vec]) + T.sin(T.Cast("float32", q_rope_position[batch_idx]) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(tx * 4 + vec < 32, Q[bx, by + bz + ty, tx * 4 + vec + 32] * T.float16(-1), Q[bx, by + bz + ty, tx * 4 + vec - 32]))), Q[bx, by + bz + ty, tx * 4 + vec]) + for iterator in range((kv_chunk_len[0] + 63) // 64): + tile_start_s: T.int32 = (tz + ty) * 2 + tile_start_g: T.int32 = (iterator * 32 + tz + ty) * 2 + for j in range(2): + with T.block("KV_load"): + T.reads() + T.writes() + row_g: T.int32 = tile_start_g + j + if row_g < kv_chunk_len[0]: + seq_offset: T.int32 = row_g + page_no: T.int32 = page_table_values[cur_page_indptr_begin + seq_offset // 16] + page_offset: T.int32 = seq_offset % 16 + for vec in T.vectorized(4): + K_smem[tile_start_s + j, tx * 4 + vec] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", k_rope_pos_offset[batch_idx] + row_g) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", pages[page_no, 0, by, page_offset, tx * 4 + vec]) + T.sin(T.Cast("float32", k_rope_pos_offset[batch_idx] + row_g) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(tx * 4 + vec < 32, pages[page_no, 0, by, page_offset, tx * 4 + vec + 32] * T.float16(-1), pages[page_no, 0, by, page_offset, tx * 4 + vec - 32]))), pages[page_no, 0, by, page_offset, tx * 4 + vec]) + V_smem[tile_start_s + j, tx * 4 + vec] = pages[page_no, 1, by, page_offset, tx * 4 + vec] + else: + for vec in T.vectorized(4): + K_smem[tile_start_s + j, tx * 4 + vec] = T.float16(0) + V_smem[tile_start_s + j, tx * 4 + vec] = T.float16(0) + T.tvm_storage_sync("shared") + m_prev[0] = st_m[0] + for j in range(2): + for vec in T.vectorized(4): + QK_local[vec] = T.Cast("float32", Q_local[vec]) * T.Cast("float32", K_smem[tz * 2 + j, tx * 4 + vec]) * attn_score_scaling_factor * sm_scale + S_reduce_local[0] = T.float32(0) + for vec in T.unroll(4): + S_reduce_local[0] = S_reduce_local[0] + QK_local[vec] + with T.block("block_cross_thread"): + T.reads(S_reduce_local[0]) + T.writes(t0[0]) + T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) + T.tvm_thread_allreduce(T.uint32(1), S_reduce_local[0], T.bool(True), t0[0], tx) + S_local[j] = T.float32(-50000) + if (iterator * 32 + tz) * 2 + j < kv_chunk_len[0]: + S_local[j] = t0[0] + st_m[0] = T.max(st_m[0], S_local[j]) + o_scale: T.float32 = T.exp2(m_prev[0] - st_m[0]) + st_d[0] = st_d[0] * o_scale + for j in range(2): + S_local[j] = T.exp2(S_local[j] - st_m[0]) + st_d[0] = st_d[0] + S_local[j] + for j in T.vectorized(4): + O_local[j] = O_local[j] * o_scale + for j in range(2): + for vec in T.vectorized(4): + V_local[vec] = V_smem[tz * 2 + j, tx * 4 + vec] + for vec in T.vectorized(4): + O_local[vec] = O_local[vec] + T.Cast("float32", V_local[vec]) * S_local[j] + for vec in T.vectorized(4): + O_allreduce[tz, ty, tx * 4 + vec] = O_local[vec] + md_allreduce[tz, ty, 0] = st_m[0] + md_allreduce[tz, ty, 1] = st_d[0] + T.tvm_storage_sync("shared") + st_m[0] = T.float32(-50000) + st_d[0] = T.float32(1) + for vec in T.vectorized(4): + O_local[vec] = T.float32(0) + for j in range(32): + m_prev[0] = st_m[0] + d_prev[0] = st_d[0] + other_m[0] = md_allreduce[j, ty, 0] + other_d[0] = md_allreduce[j, ty, 1] + for vec in T.vectorized(4): + other_o[vec] = O_allreduce[j, ty, tx * 4 + vec] + st_m[0] = T.max(st_m[0], other_m[0]) + st_d[0] = d_prev[0] * T.exp2(m_prev[0] - st_m[0]) + other_d[0] * T.exp2(other_m[0] - st_m[0]) + exp_mprev[0] = T.exp2(m_prev[0] - st_m[0]) + exp_otherm[0] = T.exp2(other_m[0] - st_m[0]) + for vec in T.vectorized(4): + O_local[vec] = O_local[vec] * exp_mprev[0] + other_o[vec] * exp_otherm[0] + for vec in T.vectorized(4): + O_local[vec] = O_local[vec] / st_d[0] + for vec in T.vectorized(4): + output[batch_idx, by + bz + ty, tx * 4 + vec] = T.Cast("float16", O_local[vec]) + lse[batch_idx, by + bz + ty] = st_m[0] + T.log2(st_d[0]) + + @T.prim_func + def batch_decode_paged_kv_sliding_window(_0: T.int32, Q_handle: T.handle, pages_handle: T.handle, page_table_indptr_handle: T.handle, page_table_values_handle: T.handle, var_length_info: T.handle, k_rope_pos_offset_handle: T.handle, q_rope_position_handle: T.handle, output_handle: T.handle, lse_handle: T.handle, rotary_mode: T.int32, rope_scale: T.float32, rope_theta: T.float32, attn_score_scaling_factor: T.float32): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) + B = T.int32(is_size_var=True) + Q = T.match_buffer(Q_handle, (B, 20, 64), "float16") + max_num_pages = T.int32(is_size_var=True) + pages = T.match_buffer(pages_handle, (max_num_pages, 2, 20, 16, 64), "float16") + page_table_indptr = T.match_buffer(page_table_indptr_handle, (B + 1,), "int32", offset_factor=1) + nnz_pages = T.int32(is_size_var=True) + page_table_values = T.match_buffer(page_table_values_handle, (nnz_pages,), "int32", offset_factor=1) + length_info = T.match_buffer(var_length_info, (3, B), "int32", offset_factor=1) + k_rope_pos_offset = T.match_buffer(k_rope_pos_offset_handle, (B,), "int32", offset_factor=1) + q_rope_position = T.match_buffer(q_rope_position_handle, (B,), "int32", offset_factor=1) + output = T.match_buffer(output_handle, (B, 20, 64), "float16") + lse = T.match_buffer(lse_handle, (B, 20)) + # with T.block("root"): + sm_scale: T.float32 = T.float32(0.18033688011112042) + for bx in T.thread_binding(B, thread="blockIdx.x"): + for fused_by_bz in T.thread_binding(20, thread="blockIdx.y"): + for ty in T.thread_binding(1, thread="threadIdx.y"): + for tx in T.thread_binding(16, thread="threadIdx.x"): + for tz in T.thread_binding(32, thread="threadIdx.z"): + with T.block("attn"): + T.reads(page_table_indptr[bx:bx + 2], length_info[0:3, bx], q_rope_position[bx], Q[bx, fused_by_bz // 20 + ty + fused_by_bz % 20, tx * 4 - 32:tx * 4 - 32 + 68]) + T.writes(output[bx, fused_by_bz % 20 + fused_by_bz // 20 + ty, tx * 4:tx * 4 + 4], lse[bx, fused_by_bz % 20 + fused_by_bz // 20 + ty]) + Q_local = T.alloc_buffer((4,), "float16", scope="local") + kv_chunk_len = T.alloc_buffer((1,), "int32", scope="local") + K_smem = T.alloc_buffer((64, 64), "float16", scope="shared") + V_smem = T.alloc_buffer((64, 64), "float16", scope="shared") + O_allreduce = T.alloc_buffer((32, 1, 64), scope="shared") + md_allreduce = T.alloc_buffer((32, 1, 2), scope="shared") + S_reduce_local = T.alloc_buffer((1,), scope="local") + t0 = T.alloc_buffer((1,), scope="local") + S_local = T.alloc_buffer((2,), scope="local") + QK_local = T.alloc_buffer((4,), scope="local") + V_local = T.alloc_buffer((4,), "float16", scope="local") + m_prev = T.alloc_buffer((1,), scope="local") + d_prev = T.alloc_buffer((1,), scope="local") + other_m = T.alloc_buffer((1,), scope="local") + other_d = T.alloc_buffer((1,), scope="local") + exp_mprev = T.alloc_buffer((1,), scope="local") + exp_otherm = T.alloc_buffer((1,), scope="local") + other_o = T.alloc_buffer((4,), scope="local") + st_m = T.alloc_buffer((1,), scope="local") + st_d = T.alloc_buffer((1,), scope="local") + O_local = T.alloc_buffer((4,), scope="local") + by: T.int32 = fused_by_bz % 20 + bz: T.int32 = fused_by_bz // 20 + batch_idx: T.int32 = bx + cur_page_indptr_begin: T.int32 = page_table_indptr[batch_idx] + cur_page_indptr_end: T.int32 = page_table_indptr[batch_idx + 1] + kv_chunk_len[0] = T.if_then_else(cur_page_indptr_begin != cur_page_indptr_end, (cur_page_indptr_end - cur_page_indptr_begin - 1) * 16 + length_info[0, batch_idx] - length_info[1, batch_idx] + length_info[2, batch_idx], 0) + st_m[0] = T.float32(-50000) + st_d[0] = T.float32(1) + for vec in T.vectorized(4): + O_local[vec] = T.float32(0) + for vec in T.vectorized(4): + Q_local[vec] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[batch_idx]) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", Q[bx, by + bz + ty, tx * 4 + vec]) + T.sin(T.Cast("float32", q_rope_position[batch_idx]) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(tx * 4 + vec < 32, Q[bx, by + bz + ty, tx * 4 + vec + 32] * T.float16(-1), Q[bx, by + bz + ty, tx * 4 + vec - 32]))), Q[bx, by + bz + ty, tx * 4 + vec]) + for iterator in range((kv_chunk_len[0] + 63) // 64): + tile_start_s: T.int32 = (tz + ty) * 2 + tile_start_g: T.int32 = (iterator * 32 + tz + ty) * 2 + for j in range(2): + with T.block("KV_load"): + T.reads() + T.writes() + row_g: T.int32 = tile_start_g + j + if row_g < kv_chunk_len[0]: + seq_offset: T.int32 = T.if_then_else(row_g < length_info[2, batch_idx], row_g, row_g - length_info[2, batch_idx] + length_info[1, batch_idx]) + page_no: T.int32 = page_table_values[cur_page_indptr_begin + seq_offset // 16] + page_offset: T.int32 = seq_offset % 16 + for vec in T.vectorized(4): + K_smem[tile_start_s + j, tx * 4 + vec] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", k_rope_pos_offset[batch_idx] + row_g) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", pages[page_no, 0, by, page_offset, tx * 4 + vec]) + T.sin(T.Cast("float32", k_rope_pos_offset[batch_idx] + row_g) * rope_scale / T.pow(rope_theta, T.Cast("float32", (tx * 4 + vec) * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(tx * 4 + vec < 32, pages[page_no, 0, by, page_offset, tx * 4 + vec + 32] * T.float16(-1), pages[page_no, 0, by, page_offset, tx * 4 + vec - 32]))), pages[page_no, 0, by, page_offset, tx * 4 + vec]) + V_smem[tile_start_s + j, tx * 4 + vec] = pages[page_no, 1, by, page_offset, tx * 4 + vec] + else: + for vec in T.vectorized(4): + K_smem[tile_start_s + j, tx * 4 + vec] = T.float16(0) + V_smem[tile_start_s + j, tx * 4 + vec] = T.float16(0) + T.tvm_storage_sync("shared") + m_prev[0] = st_m[0] + for j in range(2): + for vec in T.vectorized(4): + QK_local[vec] = T.Cast("float32", Q_local[vec]) * T.Cast("float32", K_smem[tz * 2 + j, tx * 4 + vec]) * attn_score_scaling_factor * sm_scale + S_reduce_local[0] = T.float32(0) + for vec in T.unroll(4): + S_reduce_local[0] = S_reduce_local[0] + QK_local[vec] + with T.block("block_cross_thread"): + T.reads(S_reduce_local[0]) + T.writes(t0[0]) + T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) + T.tvm_thread_allreduce(T.uint32(1), S_reduce_local[0], T.bool(True), t0[0], tx) + S_local[j] = T.float32(-50000) + if (iterator * 32 + tz) * 2 + j < kv_chunk_len[0]: + S_local[j] = t0[0] + st_m[0] = T.max(st_m[0], S_local[j]) + o_scale: T.float32 = T.exp2(m_prev[0] - st_m[0]) + st_d[0] = st_d[0] * o_scale + for j in range(2): + S_local[j] = T.exp2(S_local[j] - st_m[0]) + st_d[0] = st_d[0] + S_local[j] + for j in T.vectorized(4): + O_local[j] = O_local[j] * o_scale + for j in range(2): + for vec in T.vectorized(4): + V_local[vec] = V_smem[tz * 2 + j, tx * 4 + vec] + for vec in T.vectorized(4): + O_local[vec] = O_local[vec] + T.Cast("float32", V_local[vec]) * S_local[j] + for vec in T.vectorized(4): + O_allreduce[tz, ty, tx * 4 + vec] = O_local[vec] + md_allreduce[tz, ty, 0] = st_m[0] + md_allreduce[tz, ty, 1] = st_d[0] + T.tvm_storage_sync("shared") + st_m[0] = T.float32(-50000) + st_d[0] = T.float32(1) + for vec in T.vectorized(4): + O_local[vec] = T.float32(0) + for j in range(32): + m_prev[0] = st_m[0] + d_prev[0] = st_d[0] + other_m[0] = md_allreduce[j, ty, 0] + other_d[0] = md_allreduce[j, ty, 1] + for vec in T.vectorized(4): + other_o[vec] = O_allreduce[j, ty, tx * 4 + vec] + st_m[0] = T.max(st_m[0], other_m[0]) + st_d[0] = d_prev[0] * T.exp2(m_prev[0] - st_m[0]) + other_d[0] * T.exp2(other_m[0] - st_m[0]) + exp_mprev[0] = T.exp2(m_prev[0] - st_m[0]) + exp_otherm[0] = T.exp2(other_m[0] - st_m[0]) + for vec in T.vectorized(4): + O_local[vec] = O_local[vec] * exp_mprev[0] + other_o[vec] * exp_otherm[0] + for vec in T.vectorized(4): + O_local[vec] = O_local[vec] / st_d[0] + for vec in T.vectorized(4): + output[batch_idx, by + bz + ty, tx * 4 + vec] = T.Cast("float16", O_local[vec]) + lse[batch_idx, by + bz + ty] = st_m[0] + T.log2(st_d[0]) + + @T.prim_func + def batch_prefill_paged_kv(_0: T.int32, var_q: T.handle, var_q_indptr: T.handle, var_pages: T.handle, var_page_indptr: T.handle, var_page_values: T.handle, var_length_info: T.handle, var_k_rope_pos_offset: T.handle, var_q_rope_position: T.handle, var_output: T.handle, var_lse: T.handle, causal: T.int32, rotary_mode: T.int32, rope_scale: T.float32, rope_theta: T.float32, attn_score_scaling_factor: T.float32): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) + total_len = T.int32(is_size_var=True) + q = T.match_buffer(var_q, (total_len, 20, 64), "float16") + batch_size = T.int32(is_size_var=True) + q_indptr = T.match_buffer(var_q_indptr, (batch_size + 1,), "int32", offset_factor=1) + max_num_pages = T.int32(is_size_var=True) + pages = T.match_buffer(var_pages, (max_num_pages, 2, 20, 16, 64), "float16") + page_indptr = T.match_buffer(var_page_indptr, (batch_size + 1,), "int32", offset_factor=1) + nnz_pages = T.int32(is_size_var=True) + page_values = T.match_buffer(var_page_values, (nnz_pages,), "int32", offset_factor=1) + length_info = T.match_buffer(var_length_info, (batch_size,), "int32", offset_factor=1) + k_rope_pos_offset = T.match_buffer(var_k_rope_pos_offset, (batch_size,), "int32", offset_factor=1) + q_rope_position = T.match_buffer(var_q_rope_position, (total_len,), "int32", offset_factor=1) + output = T.match_buffer(var_output, (total_len, 20, 64), "float16") + lse = T.match_buffer(var_lse, (total_len, 20)) + # with T.block("root"): + for lbx in T.thread_binding(16, thread="blockIdx.x"): + for lby in T.thread_binding(20, thread="blockIdx.y"): + for lty in T.thread_binding(4, thread="threadIdx.y"): + for ltx in T.thread_binding(32, thread="threadIdx.x"): + with T.block("attn"): + bx, by, ty, tx = T.axis.remap("SSSS", [lbx, lby, lty, ltx]) + T.reads() + T.writes() + tile_id = T.alloc_buffer((1,), "int32", scope="local") + batch_idx = T.alloc_buffer((1,), "int32", scope="local") + batch_tiles = T.alloc_buffer((1,), "int32", scope="local") + batch_rows = T.alloc_buffer((1,), "int32", scope="local") + iterator = T.alloc_buffer((1,), "int32", scope="local") + kv_chunk_len = T.alloc_buffer((1,), "int32", scope="local") + Q_smem = T.alloc_buffer((32, 64), "float16", scope="shared") + K_smem = T.alloc_buffer((16, 64), "float16", scope="shared") + V_smem = T.alloc_buffer((16, 64), "float16", scope="shared") + S_smem = T.alloc_buffer((32, 16), scope="shared") + S_local = T.alloc_buffer((32, 16), scope="local") + O_local = T.alloc_buffer((32, 64), scope="local") + m_smem = T.alloc_buffer((32,), scope="shared") + m_prev_smem = T.alloc_buffer((32,), scope="shared") + d_smem = T.alloc_buffer((32,), scope="shared") + m_new = T.alloc_buffer((1,), scope="local") + m_prev = T.alloc_buffer((1,), scope="local") + d_new = T.alloc_buffer((1,), scope="local") + tile_id[0] = bx + batch_idx[0] = 0 + batch_rows[0] = q_indptr[1] - q_indptr[0] + batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 + while T.tvm_thread_invariant(batch_idx[0] < batch_size): + while tile_id[0] >= batch_tiles[0] and batch_idx[0] < batch_size: + tile_id[0] = tile_id[0] - batch_tiles[0] + batch_idx[0] = batch_idx[0] + 1 + if batch_idx[0] < batch_size: + b_idx: T.int32 = batch_idx[0] + batch_rows[0] = q_indptr[b_idx + 1] - q_indptr[b_idx] + batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 + if T.tvm_thread_invariant(batch_idx[0] < batch_size): + b_idx: T.int32 = batch_idx[0] + LH_start: T.int32 = tile_id[0] * 32 + q_indptr_val: T.int32 = q_indptr[b_idx] + cur_page_indptr_begin: T.int32 = page_indptr[b_idx] + cur_page_indptr_end: T.int32 = page_indptr[b_idx + 1] + kv_chunk_len[0] = T.if_then_else(cur_page_indptr_begin != cur_page_indptr_end, (cur_page_indptr_end - cur_page_indptr_begin - 1) * 16 + length_info[b_idx], 0) + T.tvm_storage_sync("shared") + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + if row < 32: + m_smem[row] = T.float32(-50000) + d_smem[row] = T.float32(1) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for li_1, lj_1 in T.grid(4, 4): + with T.block("O_init"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) + j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) + T.reads() + T.writes(O_local[i, j]) + O_local[i, j] = T.float32(0) + T.tvm_storage_sync("shared") + for li_lj_fused_0 in range(4): + for li_lj_fused_1 in T.thread_binding(4, thread="threadIdx.y"): + for li_lj_fused_2 in T.thread_binding(32, thread="threadIdx.x"): + for li_lj_fused_3 in T.vectorized(4): + with T.block("Q_load"): + i = T.axis.spatial(32, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) // 64) + j = T.axis.spatial(64, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) % 64) + T.reads() + T.writes() + cur_L: T.int32 = q_indptr_val + (LH_start + i) + cur_H_qo: T.int32 = by + if cur_L < q_indptr[b_idx + 1]: + Q_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", q[cur_L, cur_H_qo, j]) + T.sin(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(j < 32, q[cur_L, cur_H_qo, j + 32] * T.float16(-1), q[cur_L, cur_H_qo, j - 32]))), q[cur_L, cur_H_qo, j]) + else: + Q_smem[i, j] = T.float16(0) + T.tvm_storage_sync("shared") + for iterator_1 in range((kv_chunk_len[0] + 15) // 16): + L_kv_start: T.int32 = iterator_1 * 16 + for lz_ly_fused_0 in range(2): + for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): + for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): + for lz_ly_fused_3 in T.vectorized(4): + with T.block("K_load"): + i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) + j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) + T.reads() + T.writes() + cur_L: T.int32 = L_kv_start + i + if cur_L < kv_chunk_len[0]: + seq_offset: T.int32 = cur_L + page_no: T.int32 = page_values[cur_page_indptr_begin + seq_offset // 16] + page_offset: T.int32 = seq_offset % 16 + K_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", k_rope_pos_offset[b_idx] + cur_L) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", pages[page_no, 0, by, page_offset, j]) + T.sin(T.Cast("float32", k_rope_pos_offset[b_idx] + cur_L) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(j < 32, pages[page_no, 0, by, page_offset, j + 32] * T.float16(-1), pages[page_no, 0, by, page_offset, j - 32]))), pages[page_no, 0, by, page_offset, j]) + else: + K_smem[i, j] = T.float16(0) + T.tvm_storage_sync("shared") + for lz_ly_fused_0 in range(2): + for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): + for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): + for lz_ly_fused_3 in T.vectorized(4): + with T.block("V_load"): + i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) + j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) + T.reads() + T.writes() + cur_L: T.int32 = L_kv_start + i + if cur_L < kv_chunk_len[0]: + seq_offset: T.int32 = cur_L + page_no: T.int32 = page_values[cur_page_indptr_begin + seq_offset // 16] + page_offset: T.int32 = seq_offset % 16 + V_smem[i, j] = pages[page_no, 1, by, page_offset, j] + else: + V_smem[i, j] = T.float16(0) + T.tvm_storage_sync("shared") + with T.block(""): + T.reads(Q_smem[0:32, 0:64], K_smem[0:16, 0:64]) + T.writes(S_local[0:32, 0:16]) + for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): + for li_1_init, lj_1_init in T.grid(2, 2): + with T.block("S_gemm_init"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 8 * 2 + li_1_init) + j = T.axis.spatial(16, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 8 * 2 + lj_1_init) + T.reads() + T.writes(S_local[i, j]) + S_local[i, j] = T.float32(0) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for lk_0, li_1, lj_1, lk_1 in T.grid(8, 2, 2, 8): + with T.block("S_gemm_update"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) + j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) + k = T.axis.reduce(64, lk_0 * 8 + lk_1) + T.reads(S_local[i, j], Q_smem[i, k], K_smem[j, k]) + T.writes(S_local[i, j]) + S_local[i, j] = S_local[i, j] + T.Cast("float32", Q_smem[i, k]) * T.Cast("float32", K_smem[j, k]) * attn_score_scaling_factor * T.float32(0.18033688011112042) + T.tvm_storage_sync("shared") + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for li_1, lj_1 in T.grid(2, 2): + with T.block("S_store"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) + j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) + T.reads(S_local[i, j]) + T.writes(S_smem[i, j]) + S_smem[i, j] = S_local[i, j] + T.tvm_storage_sync("shared") + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + if row < 32: + with T.block("update1"): + T.reads(m_smem[row], kv_chunk_len[0], q_indptr[b_idx:b_idx + 2], m_new[i], S_smem[row, 0:16], d_smem[row], m_prev[i]) + T.writes(m_prev[i], m_new[i], d_new[i]) + m_prev[i] = m_smem[row] + m_new[i] = m_smem[row] + row_: T.int32 = LH_start + row + for j in range(16): + if T.if_then_else(causal > 0, L_kv_start + j < kv_chunk_len[0] - (q_indptr[b_idx + 1] - q_indptr[b_idx]) + row_ + 1, L_kv_start + j < kv_chunk_len[0]): + m_new[i] = T.max(m_new[i], S_smem[row, j]) + d_new[i] = d_smem[row] * T.exp2(m_prev[i] - m_new[i]) + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + with T.block("update"): + T.reads(kv_chunk_len[0], q_indptr[b_idx:b_idx + 2], S_smem[row, 0:16], m_new[i]) + T.writes(S_smem[row, 0:16]) + for j in range(16): + if row < 32: + row_: T.int32 = LH_start + row + if T.if_then_else(causal > 0, L_kv_start + j < kv_chunk_len[0] - (q_indptr[b_idx + 1] - q_indptr[b_idx]) + row_ + 1, L_kv_start + j < kv_chunk_len[0]): + S_smem[row, j] = T.exp2(S_smem[row, j] - m_new[i]) + else: + S_smem[row, j] = T.exp2(T.float32(-50000) - m_new[i]) + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + if row < 32: + with T.block("update"): + T.reads(d_new[i], S_smem[row, 0:16], m_new[i], m_prev[i]) + T.writes(d_new[i], m_smem[row], d_smem[row], m_prev_smem[row]) + for j in range(16): + d_new[i] = d_new[i] + S_smem[row, j] + m_smem[row] = m_new[i] + d_smem[row] = d_new[i] + m_prev_smem[row] = m_prev[i] + T.tvm_storage_sync("shared") + with T.block(""): + T.reads(m_prev_smem[0:32], m_smem[0:32], S_smem[0:32, 0:16], V_smem[0:16, 0:64]) + T.writes(O_local[0:32, 0:64]) + for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): + for li_1_init, lj_1_init in T.grid(4, 4): + with T.block("O_gemm_init"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 16 * 4 + li_1_init) + j = T.axis.spatial(64, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 16 * 4 + lj_1_init) + T.reads() + T.writes(O_local[i, j]) + O_local[i, j] = O_local[i, j] * T.exp2(m_prev_smem[i] - m_smem[i]) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for lk_0, lk_1, li_1, lj_1 in T.grid(2, 8, 4, 4): + with T.block("O_gemm_update"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) + j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) + k = T.axis.reduce(16, lk_0 * 8 + lk_1) + T.reads(O_local[i, j], m_prev_smem[i], m_smem[i], S_smem[i, k], V_smem[k, j]) + T.writes(O_local[i, j]) + O_local[i, j] = O_local[i, j] + S_smem[i, k] * T.Cast("float32", V_smem[k, j]) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for li_1, lj_1 in T.grid(4, 4): + with T.block("O_store"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) + j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) + T.reads(q_indptr[b_idx:b_idx + 2], O_local[i, j], d_smem[i]) + T.writes(output[q_indptr[b_idx] + (LH_start + i), by, j]) + cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) + cur_H_qo: T.int32 = by + if cur_L < q_indptr[b_idx + 1]: + output[cur_L, cur_H_qo, j] = T.Cast("float16", O_local[i, j] / d_smem[i]) + for li_0 in range(1): + for li_1 in T.thread_binding(4, thread="threadIdx.y"): + for li_2 in T.thread_binding(32, thread="threadIdx.x"): + with T.block("lse_store"): + i = T.axis.spatial(32, li_0 * 128 + li_1 * 32 + li_2) + T.where((li_0 * 4 + li_1) * 32 + li_2 < 32) + T.reads(q_indptr[b_idx:b_idx + 2], m_smem[i], d_smem[i]) + T.writes(lse[q_indptr[b_idx] + (LH_start + i), by]) + cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) + cur_H_qo: T.int32 = by + if cur_L < q_indptr[b_idx + 1]: + lse[cur_L, cur_H_qo] = m_smem[i] + T.log2(d_smem[i]) + tile_id[0] = tile_id[0] + 16 + + @T.prim_func + def batch_prefill_paged_kv_sliding_window(_0: T.int32, var_q: T.handle, var_q_indptr: T.handle, var_pages: T.handle, var_page_indptr: T.handle, var_page_values: T.handle, var_length_info: T.handle, var_k_rope_pos_offset: T.handle, var_q_rope_position: T.handle, var_output: T.handle, var_lse: T.handle, causal: T.int32, rotary_mode: T.int32, rope_scale: T.float32, rope_theta: T.float32, attn_score_scaling_factor: T.float32): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) + total_len = T.int32(is_size_var=True) + q = T.match_buffer(var_q, (total_len, 20, 64), "float16") + batch_size = T.int32(is_size_var=True) + q_indptr = T.match_buffer(var_q_indptr, (batch_size + 1,), "int32", offset_factor=1) + max_num_pages = T.int32(is_size_var=True) + pages = T.match_buffer(var_pages, (max_num_pages, 2, 20, 16, 64), "float16") + page_indptr = T.match_buffer(var_page_indptr, (batch_size + 1,), "int32", offset_factor=1) + nnz_pages = T.int32(is_size_var=True) + page_values = T.match_buffer(var_page_values, (nnz_pages,), "int32", offset_factor=1) + length_info = T.match_buffer(var_length_info, (3, batch_size), "int32", offset_factor=1) + k_rope_pos_offset = T.match_buffer(var_k_rope_pos_offset, (batch_size,), "int32", offset_factor=1) + q_rope_position = T.match_buffer(var_q_rope_position, (total_len,), "int32", offset_factor=1) + output = T.match_buffer(var_output, (total_len, 20, 64), "float16") + lse = T.match_buffer(var_lse, (total_len, 20)) + # with T.block("root"): + for lbx in T.thread_binding(16, thread="blockIdx.x"): + for lby in T.thread_binding(20, thread="blockIdx.y"): + for lty in T.thread_binding(4, thread="threadIdx.y"): + for ltx in T.thread_binding(32, thread="threadIdx.x"): + with T.block("attn"): + bx, by, ty, tx = T.axis.remap("SSSS", [lbx, lby, lty, ltx]) + T.reads() + T.writes() + tile_id = T.alloc_buffer((1,), "int32", scope="local") + batch_idx = T.alloc_buffer((1,), "int32", scope="local") + batch_tiles = T.alloc_buffer((1,), "int32", scope="local") + batch_rows = T.alloc_buffer((1,), "int32", scope="local") + iterator = T.alloc_buffer((1,), "int32", scope="local") + kv_chunk_len = T.alloc_buffer((1,), "int32", scope="local") + Q_smem = T.alloc_buffer((32, 64), "float16", scope="shared") + K_smem = T.alloc_buffer((16, 64), "float16", scope="shared") + V_smem = T.alloc_buffer((16, 64), "float16", scope="shared") + S_smem = T.alloc_buffer((32, 16), scope="shared") + S_local = T.alloc_buffer((32, 16), scope="local") + O_local = T.alloc_buffer((32, 64), scope="local") + m_smem = T.alloc_buffer((32,), scope="shared") + m_prev_smem = T.alloc_buffer((32,), scope="shared") + d_smem = T.alloc_buffer((32,), scope="shared") + m_new = T.alloc_buffer((1,), scope="local") + m_prev = T.alloc_buffer((1,), scope="local") + d_new = T.alloc_buffer((1,), scope="local") + tile_id[0] = bx + batch_idx[0] = 0 + batch_rows[0] = q_indptr[1] - q_indptr[0] + batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 + while T.tvm_thread_invariant(batch_idx[0] < batch_size): + while tile_id[0] >= batch_tiles[0] and batch_idx[0] < batch_size: + tile_id[0] = tile_id[0] - batch_tiles[0] + batch_idx[0] = batch_idx[0] + 1 + if batch_idx[0] < batch_size: + b_idx: T.int32 = batch_idx[0] + batch_rows[0] = q_indptr[b_idx + 1] - q_indptr[b_idx] + batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 + if T.tvm_thread_invariant(batch_idx[0] < batch_size): + b_idx: T.int32 = batch_idx[0] + LH_start: T.int32 = tile_id[0] * 32 + q_indptr_val: T.int32 = q_indptr[b_idx] + cur_page_indptr_begin: T.int32 = page_indptr[b_idx] + cur_page_indptr_end: T.int32 = page_indptr[b_idx + 1] + kv_chunk_len[0] = T.if_then_else(cur_page_indptr_begin != cur_page_indptr_end, (cur_page_indptr_end - cur_page_indptr_begin - 1) * 16 + length_info[0, b_idx] - length_info[1, b_idx] + length_info[2, b_idx], 0) + T.tvm_storage_sync("shared") + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + if row < 32: + m_smem[row] = T.float32(-50000) + d_smem[row] = T.float32(1) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for li_1, lj_1 in T.grid(4, 4): + with T.block("O_init"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) + j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) + T.reads() + T.writes(O_local[i, j]) + O_local[i, j] = T.float32(0) + T.tvm_storage_sync("shared") + for li_lj_fused_0 in range(4): + for li_lj_fused_1 in T.thread_binding(4, thread="threadIdx.y"): + for li_lj_fused_2 in T.thread_binding(32, thread="threadIdx.x"): + for li_lj_fused_3 in T.vectorized(4): + with T.block("Q_load"): + i = T.axis.spatial(32, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) // 64) + j = T.axis.spatial(64, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) % 64) + T.reads() + T.writes() + cur_L: T.int32 = q_indptr_val + (LH_start + i) + cur_H_qo: T.int32 = by + if cur_L < q_indptr[b_idx + 1]: + Q_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", q[cur_L, cur_H_qo, j]) + T.sin(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(j < 32, q[cur_L, cur_H_qo, j + 32] * T.float16(-1), q[cur_L, cur_H_qo, j - 32]))), q[cur_L, cur_H_qo, j]) + else: + Q_smem[i, j] = T.float16(0) + T.tvm_storage_sync("shared") + for iterator_1 in range((kv_chunk_len[0] + 15) // 16): + L_kv_start: T.int32 = iterator_1 * 16 + for lz_ly_fused_0 in range(2): + for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): + for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): + for lz_ly_fused_3 in T.vectorized(4): + with T.block("K_load"): + i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) + j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) + T.reads() + T.writes() + cur_L: T.int32 = L_kv_start + i + if cur_L < kv_chunk_len[0]: + seq_offset: T.int32 = T.if_then_else(cur_L < length_info[2, b_idx], cur_L, cur_L - length_info[2, b_idx] + length_info[1, b_idx]) + page_no: T.int32 = page_values[cur_page_indptr_begin + seq_offset // 16] + page_offset: T.int32 = seq_offset % 16 + K_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", k_rope_pos_offset[b_idx] + cur_L) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", pages[page_no, 0, by, page_offset, j]) + T.sin(T.Cast("float32", k_rope_pos_offset[b_idx] + cur_L) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(j < 32, pages[page_no, 0, by, page_offset, j + 32] * T.float16(-1), pages[page_no, 0, by, page_offset, j - 32]))), pages[page_no, 0, by, page_offset, j]) + else: + K_smem[i, j] = T.float16(0) + T.tvm_storage_sync("shared") + for lz_ly_fused_0 in range(2): + for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): + for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): + for lz_ly_fused_3 in T.vectorized(4): + with T.block("V_load"): + i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) + j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) + T.reads() + T.writes() + cur_L: T.int32 = L_kv_start + i + if cur_L < kv_chunk_len[0]: + seq_offset: T.int32 = T.if_then_else(cur_L < length_info[2, b_idx], cur_L, cur_L - length_info[2, b_idx] + length_info[1, b_idx]) + page_no: T.int32 = page_values[cur_page_indptr_begin + seq_offset // 16] + page_offset: T.int32 = seq_offset % 16 + V_smem[i, j] = pages[page_no, 1, by, page_offset, j] + else: + V_smem[i, j] = T.float16(0) + T.tvm_storage_sync("shared") + with T.block(""): + T.reads(Q_smem[0:32, 0:64], K_smem[0:16, 0:64]) + T.writes(S_local[0:32, 0:16]) + for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): + for li_1_init, lj_1_init in T.grid(2, 2): + with T.block("S_gemm_init"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 8 * 2 + li_1_init) + j = T.axis.spatial(16, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 8 * 2 + lj_1_init) + T.reads() + T.writes(S_local[i, j]) + S_local[i, j] = T.float32(0) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for lk_0, li_1, lj_1, lk_1 in T.grid(8, 2, 2, 8): + with T.block("S_gemm_update"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) + j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) + k = T.axis.reduce(64, lk_0 * 8 + lk_1) + T.reads(S_local[i, j], Q_smem[i, k], K_smem[j, k]) + T.writes(S_local[i, j]) + S_local[i, j] = S_local[i, j] + T.Cast("float32", Q_smem[i, k]) * T.Cast("float32", K_smem[j, k]) * attn_score_scaling_factor * T.float32(0.18033688011112042) + T.tvm_storage_sync("shared") + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for li_1, lj_1 in T.grid(2, 2): + with T.block("S_store"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) + j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) + T.reads(S_local[i, j]) + T.writes(S_smem[i, j]) + S_smem[i, j] = S_local[i, j] + T.tvm_storage_sync("shared") + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + if row < 32: + with T.block("update1"): + T.reads(m_smem[row], kv_chunk_len[0], q_indptr[b_idx:b_idx + 2], m_new[i], S_smem[row, 0:16], d_smem[row], m_prev[i]) + T.writes(m_prev[i], m_new[i], d_new[i]) + m_prev[i] = m_smem[row] + m_new[i] = m_smem[row] + row_: T.int32 = LH_start + row + for j in range(16): + if T.if_then_else(causal > 0, L_kv_start + j < kv_chunk_len[0] - (q_indptr[b_idx + 1] - q_indptr[b_idx]) + row_ + 1, L_kv_start + j < kv_chunk_len[0]): + m_new[i] = T.max(m_new[i], S_smem[row, j]) + d_new[i] = d_smem[row] * T.exp2(m_prev[i] - m_new[i]) + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + with T.block("update"): + T.reads(kv_chunk_len[0], q_indptr[b_idx:b_idx + 2], S_smem[row, 0:16], m_new[i]) + T.writes(S_smem[row, 0:16]) + for j in range(16): + if row < 32: + row_: T.int32 = LH_start + row + if T.if_then_else(causal > 0, L_kv_start + j < kv_chunk_len[0] - (q_indptr[b_idx + 1] - q_indptr[b_idx]) + row_ + 1, L_kv_start + j < kv_chunk_len[0]): + S_smem[row, j] = T.exp2(S_smem[row, j] - m_new[i]) + else: + S_smem[row, j] = T.exp2(T.float32(-50000) - m_new[i]) + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + if row < 32: + with T.block("update"): + T.reads(d_new[i], S_smem[row, 0:16], m_new[i], m_prev[i]) + T.writes(d_new[i], m_smem[row], d_smem[row], m_prev_smem[row]) + for j in range(16): + d_new[i] = d_new[i] + S_smem[row, j] + m_smem[row] = m_new[i] + d_smem[row] = d_new[i] + m_prev_smem[row] = m_prev[i] + T.tvm_storage_sync("shared") + with T.block(""): + T.reads(m_prev_smem[0:32], m_smem[0:32], S_smem[0:32, 0:16], V_smem[0:16, 0:64]) + T.writes(O_local[0:32, 0:64]) + for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): + for li_1_init, lj_1_init in T.grid(4, 4): + with T.block("O_gemm_init"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 16 * 4 + li_1_init) + j = T.axis.spatial(64, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 16 * 4 + lj_1_init) + T.reads() + T.writes(O_local[i, j]) + O_local[i, j] = O_local[i, j] * T.exp2(m_prev_smem[i] - m_smem[i]) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for lk_0, lk_1, li_1, lj_1 in T.grid(2, 8, 4, 4): + with T.block("O_gemm_update"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) + j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) + k = T.axis.reduce(16, lk_0 * 8 + lk_1) + T.reads(O_local[i, j], m_prev_smem[i], m_smem[i], S_smem[i, k], V_smem[k, j]) + T.writes(O_local[i, j]) + O_local[i, j] = O_local[i, j] + S_smem[i, k] * T.Cast("float32", V_smem[k, j]) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for li_1, lj_1 in T.grid(4, 4): + with T.block("O_store"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) + j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) + T.reads(q_indptr[b_idx:b_idx + 2], O_local[i, j], d_smem[i]) + T.writes(output[q_indptr[b_idx] + (LH_start + i), by, j]) + cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) + cur_H_qo: T.int32 = by + if cur_L < q_indptr[b_idx + 1]: + output[cur_L, cur_H_qo, j] = T.Cast("float16", O_local[i, j] / d_smem[i]) + for li_0 in range(1): + for li_1 in T.thread_binding(4, thread="threadIdx.y"): + for li_2 in T.thread_binding(32, thread="threadIdx.x"): + with T.block("lse_store"): + i = T.axis.spatial(32, li_0 * 128 + li_1 * 32 + li_2) + T.where((li_0 * 4 + li_1) * 32 + li_2 < 32) + T.reads(q_indptr[b_idx:b_idx + 2], m_smem[i], d_smem[i]) + T.writes(lse[q_indptr[b_idx] + (LH_start + i), by]) + cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) + cur_H_qo: T.int32 = by + if cur_L < q_indptr[b_idx + 1]: + lse[cur_L, cur_H_qo] = m_smem[i] + T.log2(d_smem[i]) + tile_id[0] = tile_id[0] + 16 + + @T.prim_func + def batch_prefill_ragged_kv(var_q: T.handle, var_q_indptr: T.handle, var_k: T.handle, var_v: T.handle, var_kv_indptr: T.handle, var_q_rope_position: T.handle, var_k_rope_pos_offset: T.handle, var_output: T.handle, var_lse: T.handle, causal: T.int32, rotary_mode: T.int32, rope_scale: T.float32, rope_theta: T.float32, attn_score_scaling_factor: T.float32): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) + qo_len = T.int32(is_size_var=True) + q = T.match_buffer(var_q, (qo_len, 20, 64), "float16") + batch_size = T.int32(is_size_var=True) + q_indptr = T.match_buffer(var_q_indptr, (batch_size + 1,), "int32", offset_factor=1) + kv_len = T.int32(is_size_var=True) + k = T.match_buffer(var_k, (kv_len, 20, 64), "float16") + v = T.match_buffer(var_v, (kv_len, 20, 64), "float16") + kv_indptr = T.match_buffer(var_kv_indptr, (batch_size + 1,), "int32", offset_factor=1) + q_rope_position = T.match_buffer(var_q_rope_position, (qo_len,), "int32", offset_factor=1) + k_rope_pos_offset = T.match_buffer(var_k_rope_pos_offset, (batch_size,), "int32", offset_factor=1) + output = T.match_buffer(var_output, (qo_len, 20, 64), "float16") + lse = T.match_buffer(var_lse, (qo_len, 20)) + # with T.block("root"): + for lbx in T.thread_binding(16, thread="blockIdx.x"): + for lby in T.thread_binding(20, thread="blockIdx.y"): + for lty in T.thread_binding(4, thread="threadIdx.y"): + for ltx in T.thread_binding(32, thread="threadIdx.x"): + with T.block("attn"): + bx, by, ty, tx = T.axis.remap("SSSS", [lbx, lby, lty, ltx]) + T.reads() + T.writes() + tile_id = T.alloc_buffer((1,), "int32", scope="local") + batch_idx = T.alloc_buffer((1,), "int32", scope="local") + batch_tiles = T.alloc_buffer((1,), "int32", scope="local") + batch_rows = T.alloc_buffer((1,), "int32", scope="local") + iterator = T.alloc_buffer((1,), "int32", scope="local") + kv_chunk_len = T.alloc_buffer((1,), "int32", scope="local") + Q_smem = T.alloc_buffer((32, 64), "float16", scope="shared") + K_smem = T.alloc_buffer((16, 64), "float16", scope="shared") + V_smem = T.alloc_buffer((16, 64), "float16", scope="shared") + S_smem = T.alloc_buffer((32, 16), scope="shared") + S_local = T.alloc_buffer((32, 16), scope="local") + O_local = T.alloc_buffer((32, 64), scope="local") + m_smem = T.alloc_buffer((32,), scope="shared") + m_prev_smem = T.alloc_buffer((32,), scope="shared") + d_smem = T.alloc_buffer((32,), scope="shared") + m_new = T.alloc_buffer((1,), scope="local") + m_prev = T.alloc_buffer((1,), scope="local") + d_new = T.alloc_buffer((1,), scope="local") + tile_id[0] = bx + batch_idx[0] = 0 + batch_rows[0] = q_indptr[1] - q_indptr[0] + batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 + while T.tvm_thread_invariant(batch_idx[0] < batch_size): + while tile_id[0] >= batch_tiles[0] and batch_idx[0] < batch_size: + tile_id[0] = tile_id[0] - batch_tiles[0] + batch_idx[0] = batch_idx[0] + 1 + if batch_idx[0] < batch_size: + b_idx: T.int32 = batch_idx[0] + batch_rows[0] = q_indptr[b_idx + 1] - q_indptr[b_idx] + batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 + if T.tvm_thread_invariant(batch_idx[0] < batch_size): + b_idx: T.int32 = batch_idx[0] + q_indptr_val: T.int32 = q_indptr[b_idx] + LH_start: T.int32 = tile_id[0] * 32 + kv_chunk_len[0] = kv_indptr[b_idx + 1] - kv_indptr[b_idx] + T.tvm_storage_sync("shared") + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + if row < 32: + m_smem[row] = T.float32(-50000) + d_smem[row] = T.float32(1) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for li_1, lj_1 in T.grid(4, 4): + with T.block("O_init"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) + j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) + T.reads() + T.writes(O_local[i, j]) + O_local[i, j] = T.float32(0) + T.tvm_storage_sync("shared") + for li_lj_fused_0 in range(4): + for li_lj_fused_1 in T.thread_binding(4, thread="threadIdx.y"): + for li_lj_fused_2 in T.thread_binding(32, thread="threadIdx.x"): + for li_lj_fused_3 in T.vectorized(4): + with T.block("Q_load"): + i = T.axis.spatial(32, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) // 64) + j = T.axis.spatial(64, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) % 64) + T.reads() + T.writes() + cur_L: T.int32 = q_indptr_val + (LH_start + i) + cur_H_qo: T.int32 = by + if cur_L < q_indptr[b_idx + 1]: + Q_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", q[cur_L, cur_H_qo, j]) + T.sin(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(j < 32, q[cur_L, cur_H_qo, j + 32] * T.float16(-1), q[cur_L, cur_H_qo, j - 32]))), q[cur_L, cur_H_qo, j]) + else: + Q_smem[i, j] = T.float16(0) + T.tvm_storage_sync("shared") + for iterator_1 in range((kv_chunk_len[0] + 15) // 16): + L_kv_start: T.int32 = iterator_1 * 16 + L_kv_base: T.int32 = kv_indptr[b_idx] + for lz_ly_fused_0 in range(2): + for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): + for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): + for lz_ly_fused_3 in T.vectorized(4): + with T.block("K_load"): + i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) + j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) + T.reads() + T.writes() + cur_L: T.int32 = L_kv_start + i + if cur_L < kv_chunk_len[0]: + K_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", k_rope_pos_offset[b_idx] + cur_L) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", k[L_kv_base + cur_L, by, j]) + T.sin(T.Cast("float32", k_rope_pos_offset[b_idx] + cur_L) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(j < 32, k[L_kv_base + cur_L, by, j + 32] * T.float16(-1), k[L_kv_base + cur_L, by, j - 32]))), k[L_kv_base + cur_L, by, j]) + else: + K_smem[i, j] = T.float16(0) + T.tvm_storage_sync("shared") + for lz_ly_fused_0 in range(2): + for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): + for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): + for lz_ly_fused_3 in T.vectorized(4): + with T.block("V_load"): + i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) + j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) + T.reads() + T.writes() + cur_L: T.int32 = L_kv_start + i + if cur_L < kv_chunk_len[0]: + V_smem[i, j] = v[L_kv_base + cur_L, by, j] + else: + V_smem[i, j] = T.float16(0) + T.tvm_storage_sync("shared") + with T.block(""): + T.reads(Q_smem[0:32, 0:64], K_smem[0:16, 0:64]) + T.writes(S_local[0:32, 0:16]) + for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): + for li_1_init, lj_1_init in T.grid(2, 2): + with T.block("S_gemm_init"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 8 * 2 + li_1_init) + j = T.axis.spatial(16, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 8 * 2 + lj_1_init) + T.reads() + T.writes(S_local[i, j]) + S_local[i, j] = T.float32(0) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for lk_0, li_1, lj_1, lk_1 in T.grid(8, 2, 2, 8): + with T.block("S_gemm_update"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) + j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) + k_1 = T.axis.reduce(64, lk_0 * 8 + lk_1) + T.reads(S_local[i, j], Q_smem[i, k_1], K_smem[j, k_1]) + T.writes(S_local[i, j]) + S_local[i, j] = S_local[i, j] + T.Cast("float32", Q_smem[i, k_1]) * T.Cast("float32", K_smem[j, k_1]) * attn_score_scaling_factor * T.float32(0.18033688011112042) + T.tvm_storage_sync("shared") + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for li_1, lj_1 in T.grid(2, 2): + with T.block("S_store"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) + j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) + T.reads(S_local[i, j]) + T.writes(S_smem[i, j]) + S_smem[i, j] = S_local[i, j] + T.tvm_storage_sync("shared") + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + if row < 32: + with T.block("update1"): + T.reads(m_smem[row], kv_chunk_len[0], q_indptr[b_idx:b_idx + 2], m_new[i], S_smem[row, 0:16], d_smem[row], m_prev[i]) + T.writes(m_prev[i], m_new[i], d_new[i]) + m_prev[i] = m_smem[row] + m_new[i] = m_smem[row] + row_: T.int32 = LH_start + row + for j in range(16): + if T.if_then_else(causal > 0, L_kv_start + j < kv_chunk_len[0] - (q_indptr[b_idx + 1] - q_indptr[b_idx]) + row_ + 1, L_kv_start + j < kv_chunk_len[0]): + m_new[i] = T.max(m_new[i], S_smem[row, j]) + d_new[i] = d_smem[row] * T.exp2(m_prev[i] - m_new[i]) + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + with T.block("update"): + T.reads(kv_chunk_len[0], q_indptr[b_idx:b_idx + 2], S_smem[row, 0:16], m_new[i]) + T.writes(S_smem[row, 0:16]) + for j in range(16): + if row < 32: + row_: T.int32 = LH_start + row + if T.if_then_else(causal > 0, L_kv_start + j < kv_chunk_len[0] - (q_indptr[b_idx + 1] - q_indptr[b_idx]) + row_ + 1, L_kv_start + j < kv_chunk_len[0]): + S_smem[row, j] = T.exp2(S_smem[row, j] - m_new[i]) + else: + S_smem[row, j] = T.exp2(T.float32(-50000) - m_new[i]) + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + if row < 32: + with T.block("update"): + T.reads(d_new[i], S_smem[row, 0:16], m_new[i], m_prev[i]) + T.writes(d_new[i], m_smem[row], d_smem[row], m_prev_smem[row]) + for j in range(16): + d_new[i] = d_new[i] + S_smem[row, j] + m_smem[row] = m_new[i] + d_smem[row] = d_new[i] + m_prev_smem[row] = m_prev[i] + T.tvm_storage_sync("shared") + with T.block(""): + T.reads(m_prev_smem[0:32], m_smem[0:32], S_smem[0:32, 0:16], V_smem[0:16, 0:64]) + T.writes(O_local[0:32, 0:64]) + for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): + for li_1_init, lj_1_init in T.grid(4, 4): + with T.block("O_gemm_init"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 16 * 4 + li_1_init) + j = T.axis.spatial(64, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 16 * 4 + lj_1_init) + T.reads() + T.writes(O_local[i, j]) + O_local[i, j] = O_local[i, j] * T.exp2(m_prev_smem[i] - m_smem[i]) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for lk_0, lk_1, li_1, lj_1 in T.grid(2, 8, 4, 4): + with T.block("O_gemm_update"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) + j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) + k_1 = T.axis.reduce(16, lk_0 * 8 + lk_1) + T.reads(O_local[i, j], m_prev_smem[i], m_smem[i], S_smem[i, k_1], V_smem[k_1, j]) + T.writes(O_local[i, j]) + O_local[i, j] = O_local[i, j] + S_smem[i, k_1] * T.Cast("float32", V_smem[k_1, j]) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for li_1, lj_1 in T.grid(4, 4): + with T.block("O_store"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) + j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) + T.reads(q_indptr[b_idx:b_idx + 2], O_local[i, j], d_smem[i]) + T.writes(output[q_indptr[b_idx] + (LH_start + i), by, j]) + cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) + cur_H_qo: T.int32 = by + if cur_L < q_indptr[b_idx + 1]: + output[cur_L, cur_H_qo, j] = T.Cast("float16", O_local[i, j] / d_smem[i]) + for li_0 in range(1): + for li_1 in T.thread_binding(4, thread="threadIdx.y"): + for li_2 in T.thread_binding(32, thread="threadIdx.x"): + with T.block("lse_store"): + i = T.axis.spatial(32, li_0 * 128 + li_1 * 32 + li_2) + T.where((li_0 * 4 + li_1) * 32 + li_2 < 32) + T.reads(q_indptr[b_idx:b_idx + 2], m_smem[i], d_smem[i]) + T.writes(lse[q_indptr[b_idx] + (LH_start + i), by]) + cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) + cur_H_qo: T.int32 = by + if cur_L < q_indptr[b_idx + 1]: + lse[cur_L, cur_H_qo] = m_smem[i] + T.log2(d_smem[i]) + tile_id[0] = tile_id[0] + 16 + + @T.prim_func + def batch_tree_attn(var_q: T.handle, var_q_indptr: T.handle, var_k: T.handle, var_v: T.handle, var_kv_indptr: T.handle, var_q_rope_position: T.handle, var_mn_indptr: T.handle, var_mask: T.handle, var_output: T.handle, var_lse: T.handle, rotary_mode: T.int32, rope_scale: T.float32, rope_theta: T.float32, attn_score_scaling_factor: T.float32, batch_size: T.int32): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) + qo_len = T.int32(is_size_var=True) + q = T.match_buffer(var_q, (qo_len, 20, 64), "float16") + q_indptr = T.match_buffer(var_q_indptr, (batch_size + 1,), "int32", offset_factor=1) + kv_len = T.int32(is_size_var=True) + k = T.match_buffer(var_k, (kv_len, 20, 64), "float16") + v = T.match_buffer(var_v, (kv_len, 20, 64), "float16") + kv_indptr = T.match_buffer(var_kv_indptr, (batch_size + 1,), "int32", offset_factor=1) + q_rope_position = T.match_buffer(var_q_rope_position, (qo_len,), "int32", offset_factor=1) + mn_indptr = T.match_buffer(var_mn_indptr, (batch_size + 1,), "int32", offset_factor=1) + tree_size = T.int32(is_size_var=True) + mask = T.match_buffer(var_mask, (tree_size,), "int32", offset_factor=1) + output = T.match_buffer(var_output, (qo_len, 20, 64), "float16") + lse = T.match_buffer(var_lse, (qo_len, 20)) + # with T.block("root"): + for lbx in T.thread_binding(16, thread="blockIdx.x"): + for lby in T.thread_binding(20, thread="blockIdx.y"): + for lty in T.thread_binding(4, thread="threadIdx.y"): + for ltx in T.thread_binding(32, thread="threadIdx.x"): + with T.block("attn"): + bx, by, ty, tx = T.axis.remap("SSSS", [lbx, lby, lty, ltx]) + T.reads() + T.writes() + tile_id = T.alloc_buffer((1,), "int32", scope="local") + batch_idx = T.alloc_buffer((1,), "int32", scope="local") + batch_tiles = T.alloc_buffer((1,), "int32", scope="local") + batch_rows = T.alloc_buffer((1,), "int32", scope="local") + iterator = T.alloc_buffer((1,), "int32", scope="local") + kv_chunk_len = T.alloc_buffer((1,), "int32", scope="local") + Q_smem = T.alloc_buffer((32, 64), "float16", scope="shared") + K_smem = T.alloc_buffer((16, 64), "float16", scope="shared") + V_smem = T.alloc_buffer((16, 64), "float16", scope="shared") + S_smem = T.alloc_buffer((32, 16), scope="shared") + S_local = T.alloc_buffer((32, 16), scope="local") + O_local = T.alloc_buffer((32, 64), scope="local") + m_smem = T.alloc_buffer((32,), scope="shared") + m_prev_smem = T.alloc_buffer((32,), scope="shared") + d_smem = T.alloc_buffer((32,), scope="shared") + m_new = T.alloc_buffer((1,), scope="local") + m_prev = T.alloc_buffer((1,), scope="local") + d_new = T.alloc_buffer((1,), scope="local") + tile_id[0] = bx + batch_idx[0] = 0 + batch_rows[0] = q_indptr[1] - q_indptr[0] + batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 + while T.tvm_thread_invariant(batch_idx[0] < batch_size): + while tile_id[0] >= batch_tiles[0] and batch_idx[0] < batch_size: + tile_id[0] = tile_id[0] - batch_tiles[0] + batch_idx[0] = batch_idx[0] + 1 + if batch_idx[0] < batch_size: + b_idx: T.int32 = batch_idx[0] + batch_rows[0] = q_indptr[b_idx + 1] - q_indptr[b_idx] + batch_tiles[0] = (batch_rows[0] + 32 - 1) // 32 + if T.tvm_thread_invariant(batch_idx[0] < batch_size): + b_idx: T.int32 = batch_idx[0] + LH_start: T.int32 = tile_id[0] * 32 + q_indptr_val: T.int32 = q_indptr[b_idx] + kv_chunk_len[0] = kv_indptr[b_idx + 1] - kv_indptr[b_idx] + T.tvm_storage_sync("shared") + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + if row < 32: + m_smem[row] = T.float32(-50000) + d_smem[row] = T.float32(1) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for li_1, lj_1 in T.grid(4, 4): + with T.block("O_init"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) + j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) + T.reads() + T.writes(O_local[i, j]) + O_local[i, j] = T.float32(0) + T.tvm_storage_sync("shared") + for li_lj_fused_0 in range(4): + for li_lj_fused_1 in T.thread_binding(4, thread="threadIdx.y"): + for li_lj_fused_2 in T.thread_binding(32, thread="threadIdx.x"): + for li_lj_fused_3 in T.vectorized(4): + with T.block("Q_load"): + i = T.axis.spatial(32, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) // 64) + j = T.axis.spatial(64, (li_lj_fused_0 * 512 + li_lj_fused_1 * 128 + li_lj_fused_2 * 4 + li_lj_fused_3) % 64) + T.reads() + T.writes() + cur_L: T.int32 = q_indptr_val + (LH_start + i) + cur_H_qo: T.int32 = by + if cur_L < q_indptr[b_idx + 1]: + Q_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64)))) * q[cur_L, cur_H_qo, j] + T.Cast("float16", T.sin(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64)))) * T.if_then_else(j < 32, q[cur_L, cur_H_qo, j + 32] * T.float16(-1), q[cur_L, cur_H_qo, j - 32]), q[cur_L, cur_H_qo, j]) + else: + Q_smem[i, j] = T.float16(0) + T.tvm_storage_sync("shared") + for iterator_1 in range((kv_chunk_len[0] + 15) // 16): + L_kv_start: T.int32 = iterator_1 * 16 + L_kv_base: T.int32 = kv_indptr[b_idx] + for lz_ly_fused_0 in range(2): + for lz_ly_fused_1 in T.thread_binding(4, thread="threadIdx.y"): + for lz_ly_fused_2 in T.thread_binding(32, thread="threadIdx.x"): + for lz_ly_fused_3 in T.vectorized(4): + with T.block("KV_load"): + i = T.axis.spatial(16, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) // 64) + j = T.axis.spatial(64, (lz_ly_fused_0 * 512 + lz_ly_fused_1 * 128 + lz_ly_fused_2 * 4 + lz_ly_fused_3) % 64) + T.reads() + T.writes() + cur_L: T.int32 = L_kv_base + L_kv_start + i + if L_kv_start + i < kv_chunk_len[0]: + K_smem[i, j] = T.if_then_else(rotary_mode == 1, T.Cast("float16", T.cos(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64)))) * k[cur_L, by, j] + T.Cast("float16", T.sin(T.Cast("float32", q_rope_position[cur_L]) * rope_scale / T.pow(rope_theta, T.Cast("float32", j * 2 % 64) / T.float32(64)))) * T.if_then_else(j < 32, k[cur_L, by, j + 32] * T.float16(-1), k[cur_L, by, j - 32]), k[cur_L, by, j]) + V_smem[i, j] = v[cur_L, by, j] + else: + K_smem[i, j] = T.float16(0) + V_smem[i, j] = T.float16(0) + T.tvm_storage_sync("shared") + with T.block(""): + T.reads(Q_smem[0:32, 0:64], K_smem[0:16, 0:64]) + T.writes(S_local[0:32, 0:16]) + for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): + for li_1_init, lj_1_init in T.grid(2, 2): + with T.block("S_gemm_init"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 8 * 2 + li_1_init) + j = T.axis.spatial(16, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 8 * 2 + lj_1_init) + T.reads() + T.writes(S_local[i, j]) + S_local[i, j] = T.float32(0) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for lk_0, li_1, lj_1, lk_1 in T.grid(8, 2, 2, 8): + with T.block("S_gemm_update"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) + j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) + k_1 = T.axis.reduce(64, lk_0 * 8 + lk_1) + T.reads(S_local[i, j], Q_smem[i, k_1], K_smem[j, k_1]) + T.writes(S_local[i, j]) + S_local[i, j] = S_local[i, j] + T.Cast("float32", Q_smem[i, k_1]) * T.Cast("float32", K_smem[j, k_1]) * attn_score_scaling_factor * T.float32(0.18033688011112042) + T.tvm_storage_sync("shared") + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for li_1, lj_1 in T.grid(2, 2): + with T.block("S_store"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 8 * 2 + li_1) + j = T.axis.spatial(16, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 8 * 2 + lj_1) + T.reads(S_local[i, j]) + T.writes(S_smem[i, j]) + S_smem[i, j] = S_local[i, j] + T.tvm_storage_sync("shared") + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + if row < 32: + with T.block("update1"): + T.reads(m_smem[row], kv_chunk_len[0], mask[mn_indptr[b_idx] + (LH_start + row) * (q_indptr[b_idx + 1] - q_indptr[b_idx]) + L_kv_start:mn_indptr[b_idx] + (LH_start + row) * (q_indptr[b_idx + 1] - q_indptr[b_idx]) + L_kv_start + 16], mn_indptr[b_idx], q_indptr[b_idx:b_idx + 2], m_new[i], S_smem[row, 0:16], d_smem[row], m_prev[i]) + T.writes(m_prev[i], m_new[i], d_new[i]) + m_prev[i] = m_smem[row] + m_new[i] = m_smem[row] + row_: T.int32 = LH_start + row + for j in range(16): + if L_kv_start + j < kv_chunk_len[0] and mask[mn_indptr[b_idx] + row_ * (q_indptr[b_idx + 1] - q_indptr[b_idx]) + (L_kv_start + j)] == 1: + m_new[i] = T.max(m_new[i], S_smem[row, j]) + d_new[i] = d_smem[row] * T.exp2(m_prev[i] - m_new[i]) + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + with T.block("update"): + T.reads(kv_chunk_len[0], mask[mn_indptr[b_idx] + (LH_start + row) * (q_indptr[b_idx + 1] - q_indptr[b_idx]) + L_kv_start:mn_indptr[b_idx] + (LH_start + row) * (q_indptr[b_idx + 1] - q_indptr[b_idx]) + L_kv_start + 16], mn_indptr[b_idx], q_indptr[b_idx:b_idx + 2], S_smem[row, 0:16], m_new[i]) + T.writes(S_smem[row, 0:16]) + for j in range(16): + if row < 32: + row_: T.int32 = LH_start + row + if L_kv_start + j < kv_chunk_len[0] and mask[mn_indptr[b_idx] + row_ * (q_indptr[b_idx + 1] - q_indptr[b_idx]) + (L_kv_start + j)] == 1: + S_smem[row, j] = T.exp2(S_smem[row, j] - m_new[i]) + else: + S_smem[row, j] = T.exp2(T.float32(-50000) - m_new[i]) + for i in range(1): + row: T.int32 = i * 32 * 4 + ty * 32 + tx + if row < 32: + with T.block("update"): + T.reads(d_new[i], S_smem[row, 0:16], m_new[i], m_prev[i]) + T.writes(d_new[i], m_smem[row], d_smem[row], m_prev_smem[row]) + for j in range(16): + d_new[i] = d_new[i] + S_smem[row, j] + m_smem[row] = m_new[i] + d_smem[row] = d_new[i] + m_prev_smem[row] = m_prev[i] + T.tvm_storage_sync("shared") + with T.block(""): + T.reads(m_prev_smem[0:32], m_smem[0:32], S_smem[0:32, 0:16], V_smem[0:16, 0:64]) + T.writes(O_local[0:32, 0:64]) + for li_0_lj_0_fused_0_init in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1_init in T.thread_binding(32, thread="threadIdx.x"): + for li_1_init, lj_1_init in T.grid(4, 4): + with T.block("O_gemm_init"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) // 16 * 4 + li_1_init) + j = T.axis.spatial(64, (li_0_lj_0_fused_0_init * 32 + li_0_lj_0_fused_1_init) % 16 * 4 + lj_1_init) + T.reads() + T.writes(O_local[i, j]) + O_local[i, j] = O_local[i, j] * T.exp2(m_prev_smem[i] - m_smem[i]) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for lk_0, lk_1, li_1, lj_1 in T.grid(2, 8, 4, 4): + with T.block("O_gemm_update"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) + j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) + k_1 = T.axis.reduce(16, lk_0 * 8 + lk_1) + T.reads(O_local[i, j], m_prev_smem[i], m_smem[i], S_smem[i, k_1], V_smem[k_1, j]) + T.writes(O_local[i, j]) + O_local[i, j] = O_local[i, j] + S_smem[i, k_1] * T.Cast("float32", V_smem[k_1, j]) + for li_0_lj_0_fused_0 in T.thread_binding(4, thread="threadIdx.y"): + for li_0_lj_0_fused_1 in T.thread_binding(32, thread="threadIdx.x"): + for li_1, lj_1 in T.grid(4, 4): + with T.block("O_store"): + i = T.axis.spatial(32, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) // 16 * 4 + li_1) + j = T.axis.spatial(64, (li_0_lj_0_fused_0 * 32 + li_0_lj_0_fused_1) % 16 * 4 + lj_1) + T.reads(q_indptr[b_idx:b_idx + 2], O_local[i, j], d_smem[i]) + T.writes(output[q_indptr[b_idx] + (LH_start + i), by, j]) + cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) + cur_H_qo: T.int32 = by + if cur_L < q_indptr[b_idx + 1]: + output[cur_L, cur_H_qo, j] = T.Cast("float16", O_local[i, j] / d_smem[i]) + for li_0 in range(1): + for li_1 in T.thread_binding(4, thread="threadIdx.y"): + for li_2 in T.thread_binding(32, thread="threadIdx.x"): + with T.block("lse_store"): + i = T.axis.spatial(32, li_0 * 128 + li_1 * 32 + li_2) + T.where((li_0 * 4 + li_1) * 32 + li_2 < 32) + T.reads(q_indptr[b_idx:b_idx + 2], m_smem[i], d_smem[i]) + T.writes(lse[q_indptr[b_idx] + (LH_start + i), by]) + cur_L: T.int32 = q_indptr[b_idx] + (LH_start + i) + cur_H_qo: T.int32 = by + if cur_L < q_indptr[b_idx + 1]: + lse[cur_L, cur_H_qo] = m_smem[i] + T.log2(d_smem[i]) + tile_id[0] = tile_id[0] + 16 + + @T.prim_func(private=True) + def batch_verify_on_gpu_single_kernel(var_draft_probs: T.handle, var_draft_tokens: T.handle, var_model_probs: T.handle, var_token_tree_first_child: T.handle, var_token_tree_next_sibling: T.handle, var_uniform_samples: T.handle, var_token_tree_parent_ptr: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1, "tir.noalias": T.bool(True)}) + num_nodes, vocab_size = T.int32(is_size_var=True), T.int64() + draft_probs = T.match_buffer(var_draft_probs, (num_nodes, vocab_size)) + draft_tokens = T.match_buffer(var_draft_tokens, (num_nodes,), "int32") + model_probs = T.match_buffer(var_model_probs, (num_nodes, vocab_size)) + token_tree_first_child = T.match_buffer(var_token_tree_first_child, (num_nodes,), "int32") + token_tree_next_sibling = T.match_buffer(var_token_tree_next_sibling, (num_nodes,), "int32") + uniform_samples = T.match_buffer(var_uniform_samples, (num_nodes,)) + nbatch = T.int32(is_size_var=True) + token_tree_parent_ptr = T.match_buffer(var_token_tree_parent_ptr, (nbatch,), "int32") + # with T.block("root"): + child_ptr = T.alloc_buffer((1,), "int32", scope="local") + parent_ptr = T.alloc_buffer((1,), "int32", scope="local") + child_token = T.alloc_buffer((1,), "int32", scope="local") + done = T.alloc_buffer((1,), "bool", scope="local") + psum = T.alloc_buffer((1,), scope="local") + t0 = T.alloc_buffer((1,), scope="local") + model_prob_local = T.alloc_buffer((1,), scope="local") + draft_prob_local = T.alloc_buffer((1,), scope="local") + p_child = T.alloc_buffer((1,), scope="local") + q_child = T.alloc_buffer((1,), scope="local") + uniform_sample = T.alloc_buffer((1,), scope="local") + pred_shared = T.alloc_buffer((1,), "bool", scope="shared") + pred_local = T.alloc_buffer((1,), "bool", scope="local") + for _bx in T.thread_binding(nbatch, thread="blockIdx.x"): + for _tx in T.thread_binding(1024, thread="threadIdx.x"): + with T.block("CTA"): + b, tx = T.axis.remap("SS", [_bx, _tx]) + T.reads(token_tree_parent_ptr[b], token_tree_first_child[T.min(parent_ptr[0], child_ptr[0]):T.min(parent_ptr[0], child_ptr[0]) + (T.max(parent_ptr[0], child_ptr[0]) + 1 - T.min(parent_ptr[0], child_ptr[0]))], parent_ptr[0], done[0], child_ptr[0], draft_tokens[child_ptr[0]], model_probs[parent_ptr[0], T.min(T.Cast("int64", child_token[0]), T.Cast("int64", tx)):T.min(T.Cast("int64", child_token[0]), T.Cast("int64", tx)) + (T.max(T.Cast("int64", child_token[0]), (vocab_size + T.int64(1023)) // T.int64(1024) * T.int64(1024) + T.Cast("int64", tx) - T.int64(1024)) + T.int64(1) - T.min(T.Cast("int64", child_token[0]), T.Cast("int64", tx)))], child_token[0], draft_probs[child_ptr[0], T.min(T.Cast("int64", child_token[0]), T.Cast("int64", tx)):T.min(T.Cast("int64", child_token[0]), T.Cast("int64", tx)) + (T.max(T.Cast("int64", child_token[0]), (vocab_size + T.int64(1023)) // T.int64(1024) * T.int64(1024) + T.Cast("int64", tx) - T.int64(1024)) + T.int64(1) - T.min(T.Cast("int64", child_token[0]), T.Cast("int64", tx)))], uniform_samples[child_ptr[0]], p_child[0], uniform_sample[0], q_child[0], pred_shared[0], pred_local[0], model_prob_local[0], draft_prob_local[0], psum[0], t0[0], token_tree_next_sibling[child_ptr[0]]) + T.writes(parent_ptr[0], child_ptr[0], done[0], child_token[0], p_child[0], q_child[0], uniform_sample[0], pred_shared[0], pred_local[0], psum[0], model_prob_local[0], draft_prob_local[0], t0[0], model_probs[parent_ptr[0], T.Cast("int64", tx):T.Cast("int64", tx) + ((vocab_size + T.int64(1023)) // T.int64(1024) * T.int64(1024) - T.int64(1023))], token_tree_parent_ptr[b]) + parent_ptr[0] = token_tree_parent_ptr[b] + child_ptr[0] = token_tree_first_child[parent_ptr[0]] + done[0] = T.bool(False) + while not done[0]: + T.tvm_storage_sync("shared") + if child_ptr[0] == -1: + done[0] = T.bool(True) + T.tvm_storage_sync("shared") + else: + if tx == 0: + child_token[0] = draft_tokens[child_ptr[0]] + p_child[0] = model_probs[parent_ptr[0], child_token[0]] + q_child[0] = draft_probs[child_ptr[0], child_token[0]] + uniform_sample[0] = uniform_samples[child_ptr[0]] + pred_shared[0] = p_child[0] >= uniform_sample[0] * q_child[0] + T.tvm_storage_sync("shared") + pred_local[0] = pred_shared[0] + if pred_local[0]: + parent_ptr[0] = child_ptr[0] + child_ptr[0] = token_tree_first_child[child_ptr[0]] + else: + psum[0] = T.float32(0) + for i in range((vocab_size + T.int64(1023)) // T.int64(1024)): + if i * T.int64(1024) + T.Cast("int64", tx) < vocab_size: + model_prob_local[0] = model_probs[parent_ptr[0], i * T.int64(1024) + T.Cast("int64", tx)] + draft_prob_local[0] = draft_probs[child_ptr[0], i * T.int64(1024) + T.Cast("int64", tx)] + model_prob_local[0] = T.max(model_prob_local[0] - draft_prob_local[0], T.float32(0)) + psum[0] = psum[0] + model_prob_local[0] + with T.block("block_cross_thread"): + T.reads(psum[0]) + T.writes(t0[0]) + T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) + T.tvm_thread_allreduce(T.uint32(1), psum[0], T.bool(True), t0[0], tx) + if t0[0] < T.float32(9.9999999999999995e-08): + parent_ptr[0] = child_ptr[0] + child_ptr[0] = token_tree_first_child[child_ptr[0]] + else: + for i in range((vocab_size + T.int64(1023)) // T.int64(1024)): + if i * T.int64(1024) + T.Cast("int64", tx) < vocab_size: + model_prob_local[0] = model_probs[parent_ptr[0], i * T.int64(1024) + T.Cast("int64", tx)] + draft_prob_local[0] = draft_probs[child_ptr[0], i * T.int64(1024) + T.Cast("int64", tx)] + model_prob_local[0] = T.max(model_prob_local[0] - draft_prob_local[0], T.float32(0)) + model_probs[parent_ptr[0], i * T.int64(1024) + T.Cast("int64", tx)] = model_prob_local[0] / t0[0] + child_ptr[0] = token_tree_next_sibling[child_ptr[0]] + if tx == 0: + token_tree_parent_ptr[b] = parent_ptr[0] + + @T.prim_func + def chunk_lse(var_A: T.handle, var_temperature: T.handle, var_chunked_sum: T.handle, var_chunked_max: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) + batch_size, vocab_size = T.int64(is_size_var=True), T.int64(is_size_var=True) + A = T.match_buffer(var_A, (batch_size, vocab_size)) + temperature = T.match_buffer(var_temperature, (batch_size,)) + num_chunks = T.int64(is_size_var=True) + chunked_sum = T.match_buffer(var_chunked_sum, (batch_size, num_chunks)) + chunked_max = T.match_buffer(var_chunked_max, (batch_size, num_chunks)) + # with T.block("root"): + A_pad = T.alloc_buffer((batch_size, num_chunks, T.int64(4096))) + temp_max = T.alloc_buffer((batch_size, num_chunks)) + temp_sum = T.alloc_buffer((batch_size, num_chunks)) + for l0, l1, l2 in T.grid(batch_size, num_chunks, T.int64(4096)): + with T.block("pad"): + v0, v1, v2 = T.axis.remap("SSS", [l0, l1, l2]) + T.reads(temperature[v0], A[v0, v1 * T.int64(4096) + v2]) + T.writes(A_pad[v0, v1, v2]) + A_pad[v0, v1, v2] = T.if_then_else(v1 * T.int64(4096) + v2 < vocab_size, T.if_then_else(temperature[v0] > T.float32(1.0000000000000001e-05), A[v0, v1 * T.int64(4096) + v2] / temperature[v0], A[v0, v1 * T.int64(4096) + v2]), T.float32(-3.4028234663852886e+38)) + for l0, l1, l2 in T.grid(batch_size, num_chunks, T.int64(4096)): + with T.block("max"): + v0, v1, v2 = T.axis.remap("SSR", [l0, l1, l2]) + T.reads(A_pad[v0, v1, v2]) + T.writes(temp_max[v0, v1]) + with T.init(): + temp_max[v0, v1] = T.float32(-3.4028234663852886e+38) + temp_max[v0, v1] = T.max(temp_max[v0, v1], A_pad[v0, v1, v2]) + for l0, l1, l2 in T.grid(batch_size, num_chunks, T.int64(4096)): + with T.block("sum_exp"): + v0, v1, v2 = T.axis.remap("SSR", [l0, l1, l2]) + T.reads(temperature[v0], A_pad[v0, v1, v2], temp_max[v0, v1]) + T.writes(temp_sum[v0, v1]) + with T.init(): + temp_sum[v0, v1] = T.float32(0) + temp_sum[v0, v1] = temp_sum[v0, v1] + T.if_then_else(v1 * T.int64(4096) + v2 < vocab_size, T.Select(temperature[v0] > T.float32(1.0000000000000001e-05), T.exp(A_pad[v0, v1, v2] - temp_max[v0, v1]), T.Cast("float32", A_pad[v0, v1, v2] == temp_max[v0, v1])), T.float32(0)) + for l0, l1, l2 in T.grid(batch_size, num_chunks, T.int64(1)): + with T.block("log"): + v0, v1, v2 = T.axis.remap("SSS", [l0, l1, l2]) + T.reads(temperature[v0], temp_sum[v0, v1], temp_max[v0, v1]) + T.writes(chunked_sum[v0, v1], chunked_max[v0, v1]) + chunked_sum[v0, v1] = T.Select(temperature[v0] > T.float32(1.0000000000000001e-05), T.log(temp_sum[v0, v1]), temp_sum[v0, v1]) + chunked_max[v0, v1] = temp_max[v0, v1] + + @T.prim_func + def compact_kv_copy(var_pages: T.handle, var_copy_length_indptr: T.handle, var_copy_src_dst_pos: T.handle, batch_size: T.int32): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) + num_pages = T.int32() + pages = T.match_buffer(var_pages, (num_pages, 2, 20, 16, 64), "float16") + copy_length_indptr = T.match_buffer(var_copy_length_indptr, (batch_size + 1,), "int32", offset_factor=1) + total_copy_length = T.int32() + copy_src_dst_pos = T.match_buffer(var_copy_src_dst_pos, (2, total_copy_length), "int32", offset_factor=1) + with T.block("root"): + T.reads() + T.writes() + for bhd_o in T.thread_binding((batch_size * 1280 + 1023) // 1024, thread="blockIdx.x"): + for bhd_i in T.thread_binding(1024, thread="threadIdx.x"): + b: T.int32 = (bhd_o * 1024 + bhd_i) // 1280 + h: T.int32 = (bhd_o * 1024 + bhd_i) // 64 % 20 + d: T.int32 = (bhd_o * 1024 + bhd_i) % 64 + if bhd_o * 1024 + bhd_i < batch_size * 20 * 64: + for i in range(copy_length_indptr[b + 1] - copy_length_indptr[b]): + src_pos: T.int32 = copy_src_dst_pos[0, copy_length_indptr[b] + i] + dst_pos: T.int32 = copy_src_dst_pos[1, copy_length_indptr[b] + i] + pages[dst_pos // 16, 0, h, dst_pos % 16, d] = pages[src_pos // 16, 0, h, src_pos % 16, d] + pages[dst_pos // 16, 1, h, dst_pos % 16, d] = pages[src_pos // 16, 1, h, src_pos % 16, d] + + @T.prim_func + def copy_single_page(var_pages: T.handle, src_page_id: T.int64, tgt_page_id: T.int64, copy_length: T.int64): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) + num_pages, page_size = T.int32(), T.int64() + pages = T.match_buffer(var_pages, (num_pages, 2, 20, page_size, 64), "float16") + # with T.block("root"): + for b in T.thread_binding((copy_length * T.int64(1280) + T.int64(1023)) // T.int64(1024), thread="blockIdx.x"): + for t in T.thread_binding(1024, thread="threadIdx.x"): + with T.block("copy"): + vh = T.axis.spatial(20, T.Cast("int32", (b * T.int64(1024) + T.Cast("int64", t)) // (copy_length * T.int64(64)))) + vp = T.axis.spatial(copy_length, (b * T.int64(1024) + T.Cast("int64", t)) % (copy_length * T.int64(64)) // T.int64(64)) + vd = T.axis.spatial(64, T.Cast("int32", (b * T.int64(1024) + T.Cast("int64", t)) % T.int64(64))) + T.reads(pages[src_page_id, 0:2, vh, vp, vd]) + T.writes(pages[tgt_page_id, 0:2, vh, vp, vd]) + pages[tgt_page_id, 0, vh, vp, vd] = pages[src_page_id, 0, vh, vp, vd] + pages[tgt_page_id, 1, vh, vp, vd] = pages[src_page_id, 1, vh, vp, vd] + + @T.prim_func + def full(var_result: T.handle, value: T.int32): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32})}) + batch_size = T.int32(is_size_var=True) + result = T.match_buffer(var_result, (batch_size, 1), "int32") + # with T.block("root"): + for i in range(batch_size): + with T.block("block"): + vi = T.axis.spatial(batch_size, i) + T.reads() + T.writes(result[vi, 0]) + result[vi, 0] = value + + @T.prim_func + def fused_rope(var_qkv: T.handle, var_position_map: T.handle, var_q: T.handle, var_k: T.handle, var_v: T.handle, apply_rope: T.int32): + T.func_attr({"op_pattern": 8, "target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) + seq_len = T.int64() + qkv = T.match_buffer(var_qkv, (seq_len, 60, 64), "float16") + position_map = T.match_buffer(var_position_map, (seq_len,), "int32", offset_factor=1) + q = T.match_buffer(var_q, (seq_len, 20, 64), "float16") + k = T.match_buffer(var_k, (seq_len, 20, 64), "float16") + v = T.match_buffer(var_v, (seq_len, 20, 64), "float16") + # with T.block("root"): + for iters_0, iters_1, iters_2 in T.grid(seq_len, 60, 64): + with T.block("llama_fused_rope"): + s, h, d = T.axis.remap("SSS", [iters_0, iters_1, iters_2]) + T.reads(position_map[s], qkv[s, h, d - 32:d - 32 + 65]) + T.writes(q[s, h, d], k[s, h - 20, d], v[s, h - 40, d]) + if h < 20: + q[s, h, d] = T.if_then_else(apply_rope > 0 and d < 64, T.Cast("float16", T.cos(T.Cast("float32", position_map[s]) / T.pow(T.float32(1), T.Cast("float32", d * 2 % 64) / T.float32(64))) * T.Cast("float32", qkv[s, h, d]) + T.sin(T.Cast("float32", position_map[s]) / T.pow(T.float32(1), T.Cast("float32", d * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(d < 32, qkv[s, h, d + 32] * T.float16(-1), qkv[s, h, d - 32]))), qkv[s, h, d]) + else: + if h < 40: + k[s, h - 20, d] = T.if_then_else(apply_rope > 0 and d < 64, T.Cast("float16", T.cos(T.Cast("float32", position_map[s]) / T.pow(T.float32(1), T.Cast("float32", d * 2 % 64) / T.float32(64))) * T.Cast("float32", qkv[s, h, d]) + T.sin(T.Cast("float32", position_map[s]) / T.pow(T.float32(1), T.Cast("float32", d * 2 % 64) / T.float32(64))) * T.Cast("float32", T.if_then_else(d < 32, qkv[s, h, d + 32] * T.float16(-1), qkv[s, h, d - 32]))), qkv[s, h, d]) + else: + v[s, h - 40, d] = qkv[s, h, d] + + @T.prim_func + def gather_probs(var_src: T.handle, var_indices: T.handle, var_dst: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) + m, n = T.int32(is_size_var=True), T.int32(is_size_var=True) + src = T.match_buffer(var_src, (m, n)) + batch_size = T.int32(is_size_var=True) + indices = T.match_buffer(var_indices, (batch_size,), "int32") + dst = T.match_buffer(var_dst, (batch_size, n)) + # with T.block("root"): + for b, j in T.grid(batch_size, n): + with T.block("gather_2d"): + vb, vj = T.axis.remap("SS", [b, j]) + T.reads(src[indices[vb], vj], indices[vb]) + T.writes(dst[vb, vj]) + dst[vb, vj] = src[indices[vb], vj] + + @T.prim_func(private=True) + def get_index_from_sorted(A: T.handle, B: T.handle, C: T.handle, D: T.handle, E: T.handle, F: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32})}) + batch, vocab_size = T.int64(), T.int64() + cumsum_sorted = T.match_buffer(A, (batch, vocab_size)) + indices = T.match_buffer(B, (batch, vocab_size), "int32") + renorm_prob = T.match_buffer(C, (batch, 1)) + out_batch = T.int64() + usample = T.match_buffer(D, (out_batch, 1)) + sample_indices = T.match_buffer(E, (out_batch, 1), "int32") + output_index = T.match_buffer(F, (out_batch, 1), "int32") + # with T.block("root"): + for ax0, ax1 in T.grid(out_batch, vocab_size): + with T.block("T_get_index_from_sorted"): + v_ax0, v_ax1 = T.axis.remap("SS", [ax0, ax1]) + T.reads(usample[v_ax0, T.int64(0)], cumsum_sorted[sample_indices[v_ax0, T.int64(0)], v_ax1 - T.int64(1):v_ax1 - T.int64(1) + T.int64(2)], sample_indices[v_ax0, T.int64(0)], renorm_prob[sample_indices[v_ax0, T.int64(0)], 0], indices[sample_indices[v_ax0, T.int64(0)], T.min(T.int64(0), v_ax1):T.min(T.int64(0), v_ax1) + (T.max(T.int64(0), v_ax1) + T.int64(1) - T.min(T.int64(0), v_ax1))]) + T.writes(output_index[v_ax0, 0]) + if usample[v_ax0, T.int64(0)] < cumsum_sorted[sample_indices[v_ax0, T.int64(0)], v_ax1] / renorm_prob[sample_indices[v_ax0, T.int64(0)], 0] or v_ax1 + T.int64(1) == vocab_size: + if v_ax1 == T.int64(0): + output_index[v_ax0, 0] = indices[sample_indices[v_ax0, T.int64(0)], 0] + else: + if usample[v_ax0, T.int64(0)] >= cumsum_sorted[sample_indices[v_ax0, T.int64(0)], v_ax1 - T.int64(1)] / renorm_prob[sample_indices[v_ax0, T.int64(0)], 0]: + output_index[v_ax0, 0] = indices[sample_indices[v_ax0, T.int64(0)], v_ax1] + + @T.prim_func(private=True) + def get_renorm_prob(A: T.handle, B: T.handle, C: T.handle, D: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32})}) + batch, vocab_size = T.int64(), T.int64() + cumsum_sorted = T.match_buffer(A, (batch, vocab_size)) + top_p = T.match_buffer(B, (batch, 1)) + top_k = T.match_buffer(C, (batch, 1), "int32") + renorm_prob = T.match_buffer(D, (batch, 1)) + # with T.block("root"): + for ax0, ax1 in T.grid(batch, vocab_size): + with T.block("T_get_renorm_prob"): + v_ax0, v_ax1 = T.axis.remap("SS", [ax0, ax1]) + T.reads(cumsum_sorted[v_ax0, T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)):T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)) + (T.max(T.max(T.int64(0), v_ax1), v_ax1 + T.int64(1)) + T.int64(1) - T.min(T.min(T.int64(0), v_ax1), v_ax1 + T.int64(1)))], top_p[v_ax0, 0], top_k[v_ax0, 0]) + T.writes(renorm_prob[v_ax0, 0]) + if not (cumsum_sorted[v_ax0, 0] < top_p[v_ax0, 0] and top_k[v_ax0, 0] > 1): + renorm_prob[v_ax0, 0] = cumsum_sorted[v_ax0, 0] + else: + if cumsum_sorted[v_ax0, v_ax1] < top_p[v_ax0, 0] and v_ax1 + T.int64(1) < T.Cast("int64", top_k[v_ax0, 0]): + if v_ax1 + T.int64(1) == vocab_size: + renorm_prob[v_ax0, 0] = cumsum_sorted[v_ax0, v_ax1] + else: + if not (cumsum_sorted[v_ax0, v_ax1 + T.int64(1)] < top_p[v_ax0, 0] and v_ax1 + T.int64(1) + T.int64(1) < T.Cast("int64", top_k[v_ax0, 0])): + renorm_prob[v_ax0, 0] = cumsum_sorted[v_ax0, v_ax1 + T.int64(1)] + + @T.prim_func(private=True) + def index(var_layer_norm355: T.handle, index: T.Buffer((T.int64(1), T.int64(1), T.int64(1280)), "float16")): + T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) + seq_len = T.int64() + layer_norm355 = T.match_buffer(var_layer_norm355, (T.int64(1), seq_len, T.int64(1280)), "float16") + # with T.block("root"): + for i, _, k in T.grid(T.int64(1), T.int64(1), T.int64(1280)): + with T.block("index"): + v_i, v__, v_k = T.axis.remap("SSS", [i, _, k]) + T.reads(layer_norm355[v_i, seq_len - T.int64(1), v_k]) + T.writes(index[v_i, v__, v_k]) + index[v_i, v__, v_k] = layer_norm355[v_i, seq_len - T.int64(1), v_k] + + @T.prim_func + def merge_state_inplace(v: T.handle, s: T.handle, v_other: T.handle, s_other: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1}) + N, H, D = T.int32(is_size_var=True), T.int32(is_size_var=True), T.int32(is_size_var=True) + V = T.match_buffer(v, (N, H, D), "float16") + S = T.match_buffer(s, (N, H)) + V_other = T.match_buffer(v_other, (N, H, D), "float16") + S_other = T.match_buffer(s_other, (N, H)) + # with T.block("root"): + for bx in T.thread_binding(N, thread="blockIdx.x"): + for by in T.thread_binding(1, thread="blockIdx.y"): + for ty in T.thread_binding(20, thread="threadIdx.y"): + for tx in T.thread_binding(16, thread="threadIdx.x"): + with T.block("merge"): + T.reads(S[bx, ty + by * 20], S_other[bx, ty + by * 20], V[bx, ty + by * 20, tx * 4:tx * 4 + 4], V_other[bx, ty + by * 20, tx * 4:tx * 4 + 4]) + T.writes(V[bx, ty + by * 20, tx * 4:tx * 4 + 4], S[bx, ty + by * 20]) + s_val = T.alloc_buffer((1,), scope="local") + s_other_val = T.alloc_buffer((1,), scope="local") + s_max = T.alloc_buffer((1,), scope="local") + scale = T.alloc_buffer((1,), scope="local") + other_scale = T.alloc_buffer((1,), scope="local") + v_vec = T.alloc_buffer((4,), "float16", scope="local") + v_other_vec = T.alloc_buffer((4,), "float16", scope="local") + s_val[0] = S[bx, ty + by * 20] + s_other_val[0] = S_other[bx, ty + by * 20] + s_max[0] = T.max(s_val[0], s_other_val[0]) + s_val[0] = T.exp2(s_val[0] - s_max[0]) + s_other_val[0] = T.exp2(s_other_val[0] - s_max[0]) + scale[0] = s_val[0] / (s_val[0] + s_other_val[0]) + other_scale[0] = s_other_val[0] / (s_val[0] + s_other_val[0]) + for vec in T.vectorized(4): + v_vec[vec] = V[bx, ty + by * 20, tx * 4 + vec] + for vec in T.vectorized(4): + v_other_vec[vec] = V_other[bx, ty + by * 20, tx * 4 + vec] + for vec in range(4): + v_vec[vec] = T.Cast("float16", T.Cast("float32", v_vec[vec]) * scale[0] + T.Cast("float32", v_other_vec[vec]) * other_scale[0]) + for vec in T.vectorized(4): + V[bx, ty + by * 20, tx * 4 + vec] = v_vec[vec] + S[bx, ty + by * 20] = T.log2(s_val[0] + s_other_val[0]) + s_max[0] + + @T.prim_func + def sampler_take_probs_tir(var_unsorted_probs: T.handle, var_sorted_indices: T.handle, var_sample_indices: T.handle, var_sampling_results: T.handle, var_top_prob_offsets: T.handle, var_sampled_values: T.handle, var_top_prob_probs: T.handle, var_top_prob_indices: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32})}) + batch_size, vocab_size = T.int32(is_size_var=True), T.int32(is_size_var=True) + unsorted_probs = T.match_buffer(var_unsorted_probs, (batch_size, vocab_size)) + sorted_indices = T.match_buffer(var_sorted_indices, (batch_size, vocab_size), "int32") + num_samples = T.int32(is_size_var=True) + sample_indices = T.match_buffer(var_sample_indices, (num_samples,), "int32") + sampling_results = T.match_buffer(var_sampling_results, (num_samples,), "int32") + num_positions = T.int32(is_size_var=True) + top_prob_offsets = T.match_buffer(var_top_prob_offsets, (num_positions,), "int32") + sampled_values = T.match_buffer(var_sampled_values, (num_samples,)) + top_prob_probs = T.match_buffer(var_top_prob_probs, (num_positions,)) + top_prob_indices = T.match_buffer(var_top_prob_indices, (num_positions,), "int32") + # with T.block("root"): + for i in range(num_positions + num_samples): + with T.block("block"): + vi = T.axis.spatial(num_positions + num_samples, i) + T.reads(top_prob_offsets[vi], sorted_indices[top_prob_offsets[vi] // vocab_size, top_prob_offsets[vi] % vocab_size], unsorted_probs[T.min(top_prob_offsets[vi] // vocab_size, sample_indices[vi - num_positions]):T.min(top_prob_offsets[vi] // vocab_size, sample_indices[vi - num_positions]) + (T.max(top_prob_offsets[vi] // vocab_size, sample_indices[vi - num_positions]) + 1 - T.min(top_prob_offsets[vi] // vocab_size, sample_indices[vi - num_positions])), T.min(sorted_indices[top_prob_offsets[vi] // vocab_size, top_prob_offsets[vi] % vocab_size], sampling_results[vi - num_positions]):T.min(sorted_indices[top_prob_offsets[vi] // vocab_size, top_prob_offsets[vi] % vocab_size], sampling_results[vi - num_positions]) + (T.max(sorted_indices[top_prob_offsets[vi] // vocab_size, top_prob_offsets[vi] % vocab_size], sampling_results[vi - num_positions]) + 1 - T.min(sorted_indices[top_prob_offsets[vi] // vocab_size, top_prob_offsets[vi] % vocab_size], sampling_results[vi - num_positions]))], sample_indices[vi - num_positions], sampling_results[vi - num_positions]) + T.writes(top_prob_indices[vi], top_prob_probs[vi], sampled_values[vi - num_positions]) + if vi < num_positions: + row: T.int32 = top_prob_offsets[vi] // vocab_size + col: T.int32 = top_prob_offsets[vi] % vocab_size + top_prob_indices[vi] = sorted_indices[row, col] + top_prob_probs[vi] = unsorted_probs[row, sorted_indices[row, col]] + else: + vj: T.int32 = vi - num_positions + sampled_values[vj] = unsorted_probs[sample_indices[vj], sampling_results[vj]] + + @T.prim_func + def scatter_probs(var_src: T.handle, var_indices: T.handle, var_dst: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) + batch_size, n = T.int32(is_size_var=True), T.int32(is_size_var=True) + src = T.match_buffer(var_src, (batch_size, n)) + indices = T.match_buffer(var_indices, (batch_size,), "int32") + m = T.int32(is_size_var=True) + dst = T.match_buffer(var_dst, (m, n)) + # with T.block("root"): + for b, j in T.grid(batch_size, n): + with T.block("scatter_2d"): + vb, vj = T.axis.remap("SS", [b, j]) + T.reads(src[vb, vj], indices[vb]) + T.writes(dst[indices[vb], vj]) + dst[indices[vb], vj] = src[vb, vj] + + @T.prim_func + def softmax_with_chunked_sum(var_A: T.handle, var_temperature: T.handle, var_chunked_sum: T.handle, var_chunked_max: T.handle, var_softmax: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1, "tir.noalias": T.bool(True)}) + batch_size, vocab_size = T.int64(is_size_var=True), T.int64(is_size_var=True) + A = T.match_buffer(var_A, (batch_size, vocab_size)) + temperature = T.match_buffer(var_temperature, (batch_size,)) + num_chunks = T.int64(is_size_var=True) + chunked_sum = T.match_buffer(var_chunked_sum, (batch_size, num_chunks)) + chunked_max = T.match_buffer(var_chunked_max, (batch_size, num_chunks)) + softmax = T.match_buffer(var_softmax, (batch_size, vocab_size)) + # with T.block("root"): + temp_max_shared = T.alloc_buffer((batch_size,), scope="shared") + temp_sum_shared = T.alloc_buffer((batch_size,), scope="shared") + for l0_l1_fused in T.thread_binding(batch_size * num_chunks, thread="blockIdx.x"): + for ax0_1 in T.thread_binding(T.int64(32), thread="threadIdx.x"): + for ax0_0 in T.serial((num_chunks + T.int64(31)) // T.int64(32), annotations={"pragma_auto_unroll_max_step": 64, "pragma_unroll_explicit": 1}): + with T.block("max"): + v0 = T.axis.spatial(batch_size, l0_l1_fused % (num_chunks * batch_size) // num_chunks) + v1 = T.axis.reduce(num_chunks, ax0_0 * T.int64(32) + ax0_1) + T.where(ax0_0 * T.int64(32) + ax0_1 < num_chunks) + T.reads(chunked_max[v0, v1]) + T.writes(temp_max_shared[v0]) + with T.init(): + temp_max_shared[v0] = T.float32(-3.4028234663852886e+38) + temp_max_shared[v0] = T.max(temp_max_shared[v0], chunked_max[v0, v1]) + for ax0_1 in T.thread_binding(T.int64(32), thread="threadIdx.x"): + for ax0_0 in T.serial((num_chunks + T.int64(31)) // T.int64(32), annotations={"pragma_auto_unroll_max_step": 64, "pragma_unroll_explicit": 1}): + with T.block("sum_exp"): + v0 = T.axis.spatial(batch_size, l0_l1_fused % (num_chunks * batch_size) // num_chunks) + v1 = T.axis.reduce(num_chunks, ax0_0 * T.int64(32) + ax0_1) + T.where(ax0_0 * T.int64(32) + ax0_1 < num_chunks) + T.reads(temperature[v0], chunked_sum[v0, v1], chunked_max[v0, v1], temp_max_shared[v0]) + T.writes(temp_sum_shared[v0]) + with T.init(): + temp_sum_shared[v0] = T.float32(0) + temp_sum_shared[v0] = temp_sum_shared[v0] + T.Select(temperature[v0] > T.float32(1.0000000000000001e-05), T.exp(chunked_sum[v0, v1] + chunked_max[v0, v1] - temp_max_shared[v0]), T.Cast("float32", chunked_max[v0, v1] == temp_max_shared[v0]) * chunked_sum[v0, v1]) + for l2_0 in T.serial(T.int64(4), annotations={"pragma_auto_unroll_max_step": 64, "pragma_unroll_explicit": 1}): + for l2_1 in T.thread_binding(T.int64(32), thread="threadIdx.y"): + for l2_2 in T.thread_binding(T.int64(32), thread="threadIdx.x"): + with T.block("log_pad"): + v0 = T.axis.spatial(batch_size, l0_l1_fused % (num_chunks * batch_size) // num_chunks) + v1 = T.axis.spatial(num_chunks, l0_l1_fused % num_chunks) + v2 = T.axis.spatial(T.int64(4096), l2_0 * T.int64(1024) + l2_1 * T.int64(32) + l2_2) + T.reads(temperature[v0], A[v0, v1 * T.int64(4096) + v2], temp_sum_shared[v0], temp_max_shared[v0]) + T.writes(softmax[v0, v1 * T.int64(4096) + v2]) + if v1 * T.int64(4096) + v2 < vocab_size: + softmax[v0, v1 * T.int64(4096) + v2] = T.if_then_else(temperature[v0] > T.float32(1.0000000000000001e-05), T.exp(A[v0, v1 * T.int64(4096) + v2] / temperature[v0] - (T.log(temp_sum_shared[v0]) + temp_max_shared[v0])), T.Cast("float32", A[v0, v1 * T.int64(4096) + v2] == temp_max_shared[v0]) / temp_sum_shared[v0]) + + @T.prim_func(private=True) + def take_sorted_probs(var_probs: T.handle, var_lv1: T.handle, var_take_sorted_probs: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) + batch_size, vocab_size = T.int64(), T.int64() + probs = T.match_buffer(var_probs, (batch_size, vocab_size)) + lv1 = T.match_buffer(var_lv1, (batch_size, vocab_size), "int32") + batch_size_1, vocab_size_1 = T.int64(), T.int64() + take_sorted_probs = T.match_buffer(var_take_sorted_probs, (batch_size_1, vocab_size_1)) + # with T.block("root"): + for i, j in T.grid(batch_size_1, vocab_size_1): + with T.block("take_sorted_probs"): + v_i, v_j = T.axis.remap("SS", [i, j]) + T.reads(probs[v_i, lv1[v_i, v_j]], lv1[v_i, v_j]) + T.writes(take_sorted_probs[v_i, v_j]) + take_sorted_probs[v_i, v_j] = probs[v_i, lv1[v_i, v_j]] + + @T.prim_func + def tir_kv_cache_debug_get_kv(var_pages: T.handle, var_position_map: T.handle, var_k_data: T.handle, var_v_data: T.handle, layer_id: T.int64): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) + num_pages, page_size = T.int64(), T.int64(is_size_var=True) + pages = T.match_buffer(var_pages, (num_pages, 2, 20, page_size, 64), "float16") + seqlen = T.int64(is_size_var=True) + position_map = T.match_buffer(var_position_map, (seqlen,), "int32", offset_factor=1) + k_data = T.match_buffer(var_k_data, (32, seqlen, 20, 64), "float16") + v_data = T.match_buffer(var_v_data, (32, seqlen, 20, 64), "float16") + # with T.block("root"): + for p, h, d in T.grid(seqlen, 20, 64): + with T.block("copy0"): + vp, vh, vd = T.axis.remap("SSS", [p, h, d]) + T.reads(position_map[vp], pages[T.Cast("int64", position_map[vp]) // page_size, 0:2, vh, T.Cast("int64", position_map[vp]) % page_size, vd]) + T.writes(k_data[layer_id, vp, vh, vd], v_data[layer_id, vp, vh, vd]) + position: T.int32 = position_map[vp] + k_data[layer_id, vp, vh, vd] = pages[T.Cast("int64", position) // page_size, 0, vh, T.Cast("int64", position) % page_size, vd] + v_data[layer_id, vp, vh, vd] = pages[T.Cast("int64", position) // page_size, 1, vh, T.Cast("int64", position) % page_size, vd] + + @T.prim_func + def tir_kv_cache_transpose_append(var_pages: T.handle, var_k_data: T.handle, var_v_data: T.handle, var_position_map: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "host": {"keys": ["cpu"], "kind": "llvm", "mcpu": "znver3", "mtriple": "x86_64-pc-linux-gnu", "tag": ""}, "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.noalias": T.bool(True)}) + num_pages = T.int64() + pages = T.match_buffer(var_pages, (num_pages, 2, 20, 16, 64), "float16") + ntoken = T.int64(is_size_var=True) + k_data = T.match_buffer(var_k_data, (ntoken, 20, 64), "float16") + v_data = T.match_buffer(var_v_data, (ntoken, 20, 64), "float16") + position_map = T.match_buffer(var_position_map, (ntoken,), "int32", offset_factor=1) + # with T.block("root"): + for global_pos, h, f in T.grid(ntoken, 20, 64): + if position_map[global_pos] != -1: + with T.block("k_transpose_append"): + vgpos, vh, vf = T.axis.remap("SSS", [global_pos, h, f]) + T.reads(position_map[vgpos], k_data[vgpos, vh, vf]) + T.writes(pages[position_map[vgpos] // 16, 0, vh, position_map[vgpos] % 16, vf]) + position: T.int32 = position_map[vgpos] + pages[position // 16, 0, vh, position % 16, vf] = k_data[vgpos, vh, vf] + with T.block("v_transpose_append"): + vgpos, vh, vf = T.axis.remap("SSS", [global_pos, h, f]) + T.reads(position_map[vgpos], v_data[vgpos, vh, vf]) + T.writes(pages[position_map[vgpos] // 16, 1, vh, position_map[vgpos] % 16, vf]) + position: T.int32 = position_map[vgpos] + pages[position // 16, 1, vh, position % 16, vf] = v_data[vgpos, vh, vf] + + @T.prim_func(private=True) + def top_p_pivot_cutoff(var_prob: T.handle, var_top_p_arr: T.handle, var_init_pivots: T.handle, var_final_pivot: T.handle, var_final_lsum: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1, "tir.noalias": T.bool(True)}) + B, N = T.int32(), T.int32() + prob = T.match_buffer(var_prob, (B, N)) + top_p_arr = T.match_buffer(var_top_p_arr, (B,)) + init_pivots = T.match_buffer(var_init_pivots, (B, 3)) + final_pivot = T.match_buffer(var_final_pivot, (B,)) + final_lsum = T.match_buffer(var_final_lsum, (B,)) + # with T.block("root"): + pivot = T.alloc_buffer((3,), scope="local") + top_p = T.alloc_buffer((1,), scope="local") + L = T.alloc_buffer((1,), scope="shared") + R_1 = T.alloc_buffer((1,), scope="shared") + L_local = T.alloc_buffer((1,), scope="local") + R_local = T.alloc_buffer((1,), scope="local") + q = T.alloc_buffer((1,), scope="local") + lsum = T.alloc_buffer((3,), scope="local") + lmin_broadcast = T.alloc_buffer((1,), scope="shared") + lmin_broadcast_local = T.alloc_buffer((1,), scope="local") + lmin = T.alloc_buffer((3,), scope="local") + cmin = T.alloc_buffer((3,), "int32", scope="local") + total_sum = T.alloc_buffer((1,), scope="local") + it = T.alloc_buffer((1,), "int32", scope="local") + es_local = T.alloc_buffer((1,), "bool", scope="local") + es = T.alloc_buffer((1,), "bool", scope="shared") + find_pivot_local = T.alloc_buffer((1,), "bool", scope="local") + find_pivot = T.alloc_buffer((1,), "bool", scope="shared") + total_sum_reduce = T.alloc_buffer((1,), scope="local") + lsum_reduce = T.alloc_buffer((1,), scope="local") + lmin_reduce = T.alloc_buffer((1,), scope="local") + cmin_reduce = T.alloc_buffer((1,), "int32", scope="local") + for _bx in T.thread_binding(B, thread="blockIdx.x"): + for _tx in T.thread_binding(1024, thread="threadIdx.x"): + with T.block("CTA"): + b, tx = T.axis.remap("SS", [_bx, _tx]) + T.reads(top_p_arr[b], top_p[0], L[0], R_1[0], init_pivots[b, 0:3], L_local[0], R_local[0], find_pivot_local[0], it[0], es_local[0], prob[b, it[0] * 1024 + tx], total_sum[0], q[0], pivot[T.min(0, it[0]):T.min(0, it[0]) + (T.max(2, it[0]) + 1 - T.min(0, it[0]))], lsum[T.min(0, it[0]):T.min(0, it[0]) + (T.max(2, it[0]) + 1 - T.min(0, it[0]))], lmin[T.min(0, it[0]):T.min(0, it[0]) + (T.max(2, it[0]) + 1 - T.min(0, it[0]))], cmin[T.min(0, it[0]):T.min(0, it[0]) + (T.max(2, it[0]) + 1 - T.min(0, it[0]))], total_sum_reduce[0], es[0], lmin_reduce[0], lmin_broadcast[0], lmin_broadcast_local[0], lsum_reduce[0], cmin_reduce[0], find_pivot[0]) + T.writes(top_p[0], L[0], R_1[0], find_pivot[0], L_local[0], R_local[0], pivot[0:3], find_pivot_local[0], final_lsum[b], final_pivot[b], lsum[0:3], lmin[0:3], cmin[0:3], total_sum[0], it[0], es_local[0], q[0], total_sum_reduce[0], es[0], lsum_reduce[0], lmin_reduce[0], lmin_broadcast[0], lmin_broadcast_local[0], cmin_reduce[0]) + top_p[0] = top_p_arr[b] + if tx == 0: + L[0] = T.float32(1) - top_p[0] + R_1[0] = T.float32(9.9999999999999995e-08) + find_pivot[0] = T.bool(False) + T.tvm_storage_sync("shared") + L_local[0] = L[0] + R_local[0] = R_1[0] + for i in T.unroll(3): + pivot[i] = init_pivots[b, i] + find_pivot_local[0] = T.bool(False) + if L_local[0] - R_local[0] <= T.float32(9.9999999999999995e-08): + if tx == 0: + final_lsum[b] = T.float32(1) + final_pivot[b] = T.float32(0) + find_pivot_local[0] = T.bool(True) + while T.tvm_thread_invariant(L_local[0] - R_local[0] > T.float32(9.9999999999999995e-08) and not find_pivot_local[0]): + T.tvm_storage_sync("shared") + for pidx in T.unroll(3): + lsum[pidx] = T.float32(0) + lmin[pidx] = T.float32(3.4028234663852886e+38) + cmin[pidx] = 0 + total_sum[0] = T.float32(0) + it[0] = 0 + es_local[0] = T.bool(False) + while it[0] < (N + 1024 - 1) // 1024 and not es_local[0]: + q[0] = T.if_then_else(it[0] * 1024 + tx < N, prob[b, it[0] * 1024 + tx], T.float32(0)) + total_sum[0] = total_sum[0] + q[0] + for pidx in T.unroll(3): + if q[0] >= pivot[pidx]: + lsum[pidx] = lsum[pidx] + q[0] + if lmin[pidx] > q[0]: + lmin[pidx] = q[0] + cmin[pidx] = 1 + else: + if lmin[pidx] == q[0]: + cmin[pidx] = cmin[pidx] + 1 + it[0] = it[0] + 1 + if it[0] % 32 == 0: + with T.block("block_cross_thread"): + T.reads(total_sum[0]) + T.writes(total_sum_reduce[0]) + T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) + T.tvm_thread_allreduce(T.uint32(1), total_sum[0], T.bool(True), total_sum_reduce[0], tx) + if tx == 0: + es[0] = T.float32(1) - total_sum_reduce[0] < pivot[2] + T.tvm_storage_sync("shared") + es_local[0] = es[0] + T.tvm_storage_sync("shared") + for pidx in range(3): + with T.block("block_cross_thread"): + T.reads(lsum[pidx]) + T.writes(lsum_reduce[0]) + T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) + T.tvm_thread_allreduce(T.uint32(1), lsum[pidx], T.bool(True), lsum_reduce[0], tx) + with T.block("block_cross_thread"): + T.reads(lmin[pidx]) + T.writes(lmin_reduce[0]) + T.attr(T.comm_reducer(lambda x0, y0: T.min(x0, y0), [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) + T.tvm_thread_allreduce(T.uint32(1), lmin[pidx], T.bool(True), lmin_reduce[0], tx) + if tx == 0: + lmin_broadcast[0] = lmin_reduce[0] + T.tvm_storage_sync("shared") + lmin_broadcast_local[0] = lmin_broadcast[0] + if lmin[pidx] > lmin_broadcast_local[0]: + cmin[pidx] = 0 + if tx == 0: + lsum[pidx] = lsum_reduce[0] + lmin[pidx] = lmin_reduce[0] + with T.block("block_cross_thread"): + T.reads(cmin[pidx]) + T.writes(cmin_reduce[0]) + T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [0]), "reduce_scope", T.reinterpret("handle", T.uint64(0))) + T.tvm_thread_allreduce(T.uint32(1), cmin[pidx], T.bool(True), cmin_reduce[0], tx) + if tx == 0: + cmin[pidx] = cmin_reduce[0] + T.tvm_storage_sync("shared") + if tx == 0: + it[0] = 0 + while it[0] < 3 and not find_pivot_local[0]: + if lsum[it[0]] >= top_p[0] and top_p[0] > lsum[it[0]] - T.Cast("float32", cmin[it[0]]) * lmin[it[0]]: + find_pivot[0] = T.bool(True) + find_pivot_local[0] = T.bool(True) + final_pivot[b] = pivot[it[0]] + final_lsum[b] = lsum[it[0]] + else: + if lsum[it[0]] - lmin[it[0]] * T.Cast("float32", cmin[it[0]]) >= top_p[0]: + R_1[0] = pivot[it[0]] + final_lsum[b] = lsum[it[0]] + else: + if lsum[it[0]] < top_p[0]: + L[0] = pivot[it[0]] + it[0] = it[0] + 1 + T.tvm_storage_sync("shared") + L_local[0] = L[0] + R_local[0] = R_1[0] + find_pivot_local[0] = find_pivot[0] + for pidx in T.unroll(3): + pivot[pidx] = L[0] - T.Cast("float32", pidx + 1) * (L_local[0] - R_local[0]) / T.float32(4) + if tx == 0: + if not find_pivot_local[0]: + final_pivot[b] = R_local[0] + if R_local[0] == T.float32(9.9999999999999995e-08): + final_lsum[b] = lsum[2] + + @T.prim_func(private=True) + def top_p_renorm_after_cutoff(var_prob: T.handle, var_final_pivot: T.handle, var_final_lsum: T.handle, var_renorm_prob: T.handle): + T.func_attr({"target": T.target({"arch": "sm_89", "keys": ["cuda", "gpu"], "kind": "cuda", "libs": ["thrust"], "max_num_threads": 1024, "max_shared_memory_per_block": 49152, "max_threads_per_block": 1024, "tag": "", "thread_warp_size": 32}), "tir.is_scheduled": 1, "tir.noalias": T.bool(True)}) + B, N = T.int32(), T.int32() + prob = T.match_buffer(var_prob, (B, N)) + final_pivot = T.match_buffer(var_final_pivot, (B,)) + final_lsum = T.match_buffer(var_final_lsum, (B,)) + renorm_prob = T.match_buffer(var_renorm_prob, (B, N)) + # with T.block("root"): + pivot = T.alloc_buffer((1,), scope="local") + lsum = T.alloc_buffer((1,), scope="local") + for _by in T.thread_binding(B, thread="blockIdx.y"): + for _bx in T.thread_binding((B + 511) // B, thread="blockIdx.x"): + for _tx in T.thread_binding(1024, thread="threadIdx.x"): + with T.block("CTA"): + by, bx, tx = T.axis.remap("SSS", [_by, _bx, _tx]) + T.reads(final_pivot[by], final_lsum[by], prob[by, T.Select(0 <= (B + 511) // B, 0, (((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024) - 1) * ((B + 511) // B)) * 1024 + bx * 1024 + tx:T.Select(0 <= (B + 511) // B, 0, (((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024) - 1) * ((B + 511) // B)) * 1024 + bx * 1024 + tx + (T.Select(0 <= (B + 511) // B, (N - 1) // ((B + 511) // B * 1024) * ((B + 511) // B), 0 - (((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024) - 1) * ((B + 511) // B)) * 1024 + 1)], pivot[0], lsum[0]) + T.writes(pivot[0], lsum[0], renorm_prob[by, T.Select(0 <= (B + 511) // B, 0, (((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024) - 1) * ((B + 511) // B)) * 1024 + bx * 1024 + tx:T.Select(0 <= (B + 511) // B, 0, (((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024) - 1) * ((B + 511) // B)) * 1024 + bx * 1024 + tx + (T.Select(0 <= (B + 511) // B, (N - 1) // ((B + 511) // B * 1024) * ((B + 511) // B), 0 - (((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024) - 1) * ((B + 511) // B)) * 1024 + 1)]) + pivot[0] = final_pivot[by] + lsum[0] = final_lsum[by] + for i in range(((B + 511) // B * 1024 + N - 1) // ((B + 511) // B * 1024)): + if i * ((512 + B - 1) // B) * 1024 + bx * 1024 + tx < N: + renorm_prob[by, i * ((512 + B - 1) // B) * 1024 + bx * 1024 + tx] = T.if_then_else(prob[by, i * ((512 + B - 1) // B) * 1024 + bx * 1024 + tx] >= pivot[0], prob[by, i * ((512 + B - 1) // B) * 1024 + bx * 1024 + tx] / lsum[0], T.float32(0)) + + @R.function + def argsort_probs(probs: R.Tensor(("batch_size", "vocab_size"), dtype="float32")) -> R.Tuple(R.Tensor(("batch_size", "vocab_size"), dtype="float32"), R.Tensor(("batch_size", "vocab_size"), dtype="int32")): + batch_size = T.int64() + vocab_size = T.int64() + R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "num_positions": 48, "num_samples": 8}}) + cls = Module + with R.dataflow(): + lv1: R.Tensor((batch_size, vocab_size), dtype="int32") = R.argsort(probs, axis=-1, descending=True, dtype="int32") + lv2 = R.call_tir(cls.take_sorted_probs, (probs, lv1), out_sinfo=R.Tensor((batch_size, vocab_size), dtype="float32")) + gv1: R.Tuple(R.Tensor((batch_size, vocab_size), dtype="float32"), R.Tensor((batch_size, vocab_size), dtype="int32")) = lv2, lv1 + R.output(gv1) + return gv1 + + @R.function + def batch_compute_cross_attn_kv(encoder_hidden_states: R.Tensor(("batch_size", 1500, 1280), dtype="float16"), paged_kv_cache: R.Object, packed_params: R.Tuple(R.Tensor((1280, 128, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1500, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((51866, 1280), dtype="float16"), R.Tensor((448, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"))) -> R.Object: + batch_size = T.int64() + R.func_attr({"num_input": 2, "relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) + with R.dataflow(): + model_decoder_layers_0_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[498] + model_decoder_layers_0_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[499] + model_decoder_layers_0_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[500] + model_decoder_layers_1_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[522] + model_decoder_layers_1_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[523] + model_decoder_layers_1_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[524] + model_decoder_layers_2_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[546] + model_decoder_layers_2_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[547] + model_decoder_layers_2_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[548] + model_decoder_layers_3_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[570] + model_decoder_layers_3_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[571] + model_decoder_layers_3_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[572] + model_decoder_layers_4_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[594] + model_decoder_layers_4_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[595] + model_decoder_layers_4_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[596] + model_decoder_layers_5_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[618] + model_decoder_layers_5_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[619] + model_decoder_layers_5_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[620] + model_decoder_layers_6_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[642] + model_decoder_layers_6_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[643] + model_decoder_layers_6_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[644] + model_decoder_layers_7_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[666] + model_decoder_layers_7_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[667] + model_decoder_layers_7_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[668] + model_decoder_layers_8_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[690] + model_decoder_layers_8_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[691] + model_decoder_layers_8_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[692] + model_decoder_layers_9_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[714] + model_decoder_layers_9_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[715] + model_decoder_layers_9_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[716] + model_decoder_layers_10_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[738] + model_decoder_layers_10_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[739] + model_decoder_layers_10_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[740] + model_decoder_layers_11_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[762] + model_decoder_layers_11_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[763] + model_decoder_layers_11_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[764] + model_decoder_layers_12_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[786] + model_decoder_layers_12_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[787] + model_decoder_layers_12_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[788] + model_decoder_layers_13_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[810] + model_decoder_layers_13_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[811] + model_decoder_layers_13_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[812] + model_decoder_layers_14_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[834] + model_decoder_layers_14_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[835] + model_decoder_layers_14_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[836] + model_decoder_layers_15_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[858] + model_decoder_layers_15_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[859] + model_decoder_layers_15_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[860] + model_decoder_layers_16_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[882] + model_decoder_layers_16_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[883] + model_decoder_layers_16_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[884] + model_decoder_layers_17_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[906] + model_decoder_layers_17_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[907] + model_decoder_layers_17_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[908] + model_decoder_layers_18_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[930] + model_decoder_layers_18_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[931] + model_decoder_layers_18_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[932] + model_decoder_layers_19_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[954] + model_decoder_layers_19_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[955] + model_decoder_layers_19_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[956] + model_decoder_layers_20_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[978] + model_decoder_layers_20_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[979] + model_decoder_layers_20_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[980] + model_decoder_layers_21_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1002] + model_decoder_layers_21_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1003] + model_decoder_layers_21_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1004] + model_decoder_layers_22_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1026] + model_decoder_layers_22_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1027] + model_decoder_layers_22_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1028] + model_decoder_layers_23_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1050] + model_decoder_layers_23_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1051] + model_decoder_layers_23_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1052] + model_decoder_layers_24_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1074] + model_decoder_layers_24_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1075] + model_decoder_layers_24_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1076] + model_decoder_layers_25_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1098] + model_decoder_layers_25_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1099] + model_decoder_layers_25_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1100] + model_decoder_layers_26_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1122] + model_decoder_layers_26_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1123] + model_decoder_layers_26_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1124] + model_decoder_layers_27_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1146] + model_decoder_layers_27_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1147] + model_decoder_layers_27_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1148] + model_decoder_layers_28_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1170] + model_decoder_layers_28_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1171] + model_decoder_layers_28_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1172] + model_decoder_layers_29_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1194] + model_decoder_layers_29_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1195] + model_decoder_layers_29_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1196] + model_decoder_layers_30_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1218] + model_decoder_layers_30_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1219] + model_decoder_layers_30_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1220] + model_decoder_layers_31_encoder_attn_k_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1242] + model_decoder_layers_31_encoder_attn_v_proj_weight1: R.Tensor((1280, 1280), dtype="float16") = packed_params[1243] + model_decoder_layers_31_encoder_attn_v_proj_bias1: R.Tensor((1280,), dtype="float16") = packed_params[1244] + lv = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_0_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape256: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv, R.shape([batch_size, 1500, 20, 64])) + lv_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_0_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_0_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape257: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv_1, R.shape([batch_size, 1500, 20, 64])) + reshape258: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape256, R.shape([batch_size * 1500, 20, 64])) + reshape259: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape257, R.shape([batch_size * 1500, 20, 64])) + lv36: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", paged_kv_cache, R.prim_value(0), reshape258, reshape259, sinfo_args=(R.Object,)) + lv1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_1_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape260: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv1, R.shape([batch_size, 1500, 20, 64])) + lv1_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_1_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_1_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape261: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv1_1, R.shape([batch_size, 1500, 20, 64])) + reshape262: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape260, R.shape([batch_size * 1500, 20, 64])) + reshape263: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape261, R.shape([batch_size * 1500, 20, 64])) + lv37: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv36, R.prim_value(1), reshape262, reshape263, sinfo_args=(R.Object,)) + lv2 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_2_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape264: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv2, R.shape([batch_size, 1500, 20, 64])) + lv2_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_2_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_2_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape265: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv2_1, R.shape([batch_size, 1500, 20, 64])) + reshape266: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape264, R.shape([batch_size * 1500, 20, 64])) + reshape267: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape265, R.shape([batch_size * 1500, 20, 64])) + lv38: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv37, R.prim_value(2), reshape266, reshape267, sinfo_args=(R.Object,)) + lv3 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_3_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape268: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv3, R.shape([batch_size, 1500, 20, 64])) + lv3_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_3_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_3_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape269: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv3_1, R.shape([batch_size, 1500, 20, 64])) + reshape270: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape268, R.shape([batch_size * 1500, 20, 64])) + reshape271: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape269, R.shape([batch_size * 1500, 20, 64])) + lv39: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv38, R.prim_value(3), reshape270, reshape271, sinfo_args=(R.Object,)) + lv4 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_4_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape272: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv4, R.shape([batch_size, 1500, 20, 64])) + lv4_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_4_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_4_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape273: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv4_1, R.shape([batch_size, 1500, 20, 64])) + reshape274: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape272, R.shape([batch_size * 1500, 20, 64])) + reshape275: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape273, R.shape([batch_size * 1500, 20, 64])) + lv40: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv39, R.prim_value(4), reshape274, reshape275, sinfo_args=(R.Object,)) + lv5 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_5_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape276: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv5, R.shape([batch_size, 1500, 20, 64])) + lv5_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_5_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_5_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape277: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv5_1, R.shape([batch_size, 1500, 20, 64])) + reshape278: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape276, R.shape([batch_size * 1500, 20, 64])) + reshape279: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape277, R.shape([batch_size * 1500, 20, 64])) + lv41: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv40, R.prim_value(5), reshape278, reshape279, sinfo_args=(R.Object,)) + lv6 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_6_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape280: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv6, R.shape([batch_size, 1500, 20, 64])) + lv6_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_6_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_6_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape281: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv6_1, R.shape([batch_size, 1500, 20, 64])) + reshape282: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape280, R.shape([batch_size * 1500, 20, 64])) + reshape283: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape281, R.shape([batch_size * 1500, 20, 64])) + lv42: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv41, R.prim_value(6), reshape282, reshape283, sinfo_args=(R.Object,)) + lv7 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_7_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape284: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv7, R.shape([batch_size, 1500, 20, 64])) + lv7_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_7_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_7_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape285: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv7_1, R.shape([batch_size, 1500, 20, 64])) + reshape286: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape284, R.shape([batch_size * 1500, 20, 64])) + reshape287: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape285, R.shape([batch_size * 1500, 20, 64])) + lv43: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv42, R.prim_value(7), reshape286, reshape287, sinfo_args=(R.Object,)) + lv8 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_8_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape288: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv8, R.shape([batch_size, 1500, 20, 64])) + lv8_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_8_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_8_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape289: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv8_1, R.shape([batch_size, 1500, 20, 64])) + reshape290: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape288, R.shape([batch_size * 1500, 20, 64])) + reshape291: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape289, R.shape([batch_size * 1500, 20, 64])) + lv44: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv43, R.prim_value(8), reshape290, reshape291, sinfo_args=(R.Object,)) + lv9 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_9_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape292: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv9, R.shape([batch_size, 1500, 20, 64])) + lv9_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_9_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_9_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape293: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv9_1, R.shape([batch_size, 1500, 20, 64])) + reshape294: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape292, R.shape([batch_size * 1500, 20, 64])) + reshape295: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape293, R.shape([batch_size * 1500, 20, 64])) + lv45: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv44, R.prim_value(9), reshape294, reshape295, sinfo_args=(R.Object,)) + lv10 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_10_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape296: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv10, R.shape([batch_size, 1500, 20, 64])) + lv10_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_10_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_10_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape297: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv10_1, R.shape([batch_size, 1500, 20, 64])) + reshape298: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape296, R.shape([batch_size * 1500, 20, 64])) + reshape299: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape297, R.shape([batch_size * 1500, 20, 64])) + lv46: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv45, R.prim_value(10), reshape298, reshape299, sinfo_args=(R.Object,)) + lv11 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_11_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape300: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv11, R.shape([batch_size, 1500, 20, 64])) + lv11_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_11_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_11_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape301: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv11_1, R.shape([batch_size, 1500, 20, 64])) + reshape302: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape300, R.shape([batch_size * 1500, 20, 64])) + reshape303: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape301, R.shape([batch_size * 1500, 20, 64])) + lv47: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv46, R.prim_value(11), reshape302, reshape303, sinfo_args=(R.Object,)) + lv12 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_12_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape304: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv12, R.shape([batch_size, 1500, 20, 64])) + lv12_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_12_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_12_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape305: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv12_1, R.shape([batch_size, 1500, 20, 64])) + reshape306: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape304, R.shape([batch_size * 1500, 20, 64])) + reshape307: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape305, R.shape([batch_size * 1500, 20, 64])) + lv48: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv47, R.prim_value(12), reshape306, reshape307, sinfo_args=(R.Object,)) + lv13 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_13_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape308: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv13, R.shape([batch_size, 1500, 20, 64])) + lv13_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_13_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_13_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape309: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv13_1, R.shape([batch_size, 1500, 20, 64])) + reshape310: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape308, R.shape([batch_size * 1500, 20, 64])) + reshape311: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape309, R.shape([batch_size * 1500, 20, 64])) + lv49: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv48, R.prim_value(13), reshape310, reshape311, sinfo_args=(R.Object,)) + lv14 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_14_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape312: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv14, R.shape([batch_size, 1500, 20, 64])) + lv14_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_14_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_14_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape313: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv14_1, R.shape([batch_size, 1500, 20, 64])) + reshape314: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape312, R.shape([batch_size * 1500, 20, 64])) + reshape315: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape313, R.shape([batch_size * 1500, 20, 64])) + lv50: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv49, R.prim_value(14), reshape314, reshape315, sinfo_args=(R.Object,)) + lv15 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_15_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape316: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv15, R.shape([batch_size, 1500, 20, 64])) + lv15_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_15_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_15_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape317: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv15_1, R.shape([batch_size, 1500, 20, 64])) + reshape318: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape316, R.shape([batch_size * 1500, 20, 64])) + reshape319: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape317, R.shape([batch_size * 1500, 20, 64])) + lv51: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv50, R.prim_value(15), reshape318, reshape319, sinfo_args=(R.Object,)) + lv16 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_16_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape320: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv16, R.shape([batch_size, 1500, 20, 64])) + lv16_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_16_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_16_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape321: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv16_1, R.shape([batch_size, 1500, 20, 64])) + reshape322: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape320, R.shape([batch_size * 1500, 20, 64])) + reshape323: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape321, R.shape([batch_size * 1500, 20, 64])) + lv52: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv51, R.prim_value(16), reshape322, reshape323, sinfo_args=(R.Object,)) + lv17 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_17_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape324: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv17, R.shape([batch_size, 1500, 20, 64])) + lv17_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_17_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_17_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape325: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv17_1, R.shape([batch_size, 1500, 20, 64])) + reshape326: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape324, R.shape([batch_size * 1500, 20, 64])) + reshape327: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape325, R.shape([batch_size * 1500, 20, 64])) + lv53: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv52, R.prim_value(17), reshape326, reshape327, sinfo_args=(R.Object,)) + lv18 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_18_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape328: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv18, R.shape([batch_size, 1500, 20, 64])) + lv18_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_18_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_18_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape329: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv18_1, R.shape([batch_size, 1500, 20, 64])) + reshape330: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape328, R.shape([batch_size * 1500, 20, 64])) + reshape331: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape329, R.shape([batch_size * 1500, 20, 64])) + lv54: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv53, R.prim_value(18), reshape330, reshape331, sinfo_args=(R.Object,)) + lv19 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_19_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape332: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv19, R.shape([batch_size, 1500, 20, 64])) + lv19_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_19_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_19_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape333: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv19_1, R.shape([batch_size, 1500, 20, 64])) + reshape334: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape332, R.shape([batch_size * 1500, 20, 64])) + reshape335: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape333, R.shape([batch_size * 1500, 20, 64])) + lv55: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv54, R.prim_value(19), reshape334, reshape335, sinfo_args=(R.Object,)) + lv20 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_20_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape336: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv20, R.shape([batch_size, 1500, 20, 64])) + lv20_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_20_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_20_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape337: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv20_1, R.shape([batch_size, 1500, 20, 64])) + reshape338: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape336, R.shape([batch_size * 1500, 20, 64])) + reshape339: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape337, R.shape([batch_size * 1500, 20, 64])) + lv56: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv55, R.prim_value(20), reshape338, reshape339, sinfo_args=(R.Object,)) + lv21 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_21_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape340: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv21, R.shape([batch_size, 1500, 20, 64])) + lv21_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_21_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_21_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape341: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv21_1, R.shape([batch_size, 1500, 20, 64])) + reshape342: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape340, R.shape([batch_size * 1500, 20, 64])) + reshape343: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape341, R.shape([batch_size * 1500, 20, 64])) + lv57: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv56, R.prim_value(21), reshape342, reshape343, sinfo_args=(R.Object,)) + lv22 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_22_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape344: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv22, R.shape([batch_size, 1500, 20, 64])) + lv22_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_22_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_22_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape345: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv22_1, R.shape([batch_size, 1500, 20, 64])) + reshape346: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape344, R.shape([batch_size * 1500, 20, 64])) + reshape347: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape345, R.shape([batch_size * 1500, 20, 64])) + lv58: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv57, R.prim_value(22), reshape346, reshape347, sinfo_args=(R.Object,)) + lv23 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_23_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape348: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv23, R.shape([batch_size, 1500, 20, 64])) + lv23_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_23_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_23_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape349: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv23_1, R.shape([batch_size, 1500, 20, 64])) + reshape350: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape348, R.shape([batch_size * 1500, 20, 64])) + reshape351: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape349, R.shape([batch_size * 1500, 20, 64])) + lv59: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv58, R.prim_value(23), reshape350, reshape351, sinfo_args=(R.Object,)) + lv24 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_24_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape352: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv24, R.shape([batch_size, 1500, 20, 64])) + lv24_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_24_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_24_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape353: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv24_1, R.shape([batch_size, 1500, 20, 64])) + reshape354: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape352, R.shape([batch_size * 1500, 20, 64])) + reshape355: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape353, R.shape([batch_size * 1500, 20, 64])) + lv60: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv59, R.prim_value(24), reshape354, reshape355, sinfo_args=(R.Object,)) + lv25 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_25_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape356: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv25, R.shape([batch_size, 1500, 20, 64])) + lv25_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_25_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_25_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape357: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv25_1, R.shape([batch_size, 1500, 20, 64])) + reshape358: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape356, R.shape([batch_size * 1500, 20, 64])) + reshape359: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape357, R.shape([batch_size * 1500, 20, 64])) + lv61: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv60, R.prim_value(25), reshape358, reshape359, sinfo_args=(R.Object,)) + lv26 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_26_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape360: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv26, R.shape([batch_size, 1500, 20, 64])) + lv26_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_26_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_26_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape361: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv26_1, R.shape([batch_size, 1500, 20, 64])) + reshape362: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape360, R.shape([batch_size * 1500, 20, 64])) + reshape363: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape361, R.shape([batch_size * 1500, 20, 64])) + lv62: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv61, R.prim_value(26), reshape362, reshape363, sinfo_args=(R.Object,)) + lv27 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_27_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape364: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv27, R.shape([batch_size, 1500, 20, 64])) + lv27_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_27_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_27_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape365: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv27_1, R.shape([batch_size, 1500, 20, 64])) + reshape366: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape364, R.shape([batch_size * 1500, 20, 64])) + reshape367: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape365, R.shape([batch_size * 1500, 20, 64])) + lv63: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv62, R.prim_value(27), reshape366, reshape367, sinfo_args=(R.Object,)) + lv28 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_28_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape368: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv28, R.shape([batch_size, 1500, 20, 64])) + lv28_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_28_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_28_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape369: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv28_1, R.shape([batch_size, 1500, 20, 64])) + reshape370: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape368, R.shape([batch_size * 1500, 20, 64])) + reshape371: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape369, R.shape([batch_size * 1500, 20, 64])) + lv64: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv63, R.prim_value(28), reshape370, reshape371, sinfo_args=(R.Object,)) + lv29 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_29_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape372: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv29, R.shape([batch_size, 1500, 20, 64])) + lv29_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_29_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_29_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape373: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv29_1, R.shape([batch_size, 1500, 20, 64])) + reshape374: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape372, R.shape([batch_size * 1500, 20, 64])) + reshape375: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape373, R.shape([batch_size * 1500, 20, 64])) + lv65: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv64, R.prim_value(29), reshape374, reshape375, sinfo_args=(R.Object,)) + lv30 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_30_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape376: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv30, R.shape([batch_size, 1500, 20, 64])) + lv30_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_30_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_30_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape377: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv30_1, R.shape([batch_size, 1500, 20, 64])) + reshape378: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape376, R.shape([batch_size * 1500, 20, 64])) + reshape379: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape377, R.shape([batch_size * 1500, 20, 64])) + lv66: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv65, R.prim_value(30), reshape378, reshape379, sinfo_args=(R.Object,)) + lv31 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_decoder_layers_31_encoder_attn_k_proj_weight1, encoder_hidden_states), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape380: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv31, R.shape([batch_size, 1500, 20, 64])) + lv31_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_decoder_layers_31_encoder_attn_v_proj_weight1, encoder_hidden_states, model_decoder_layers_31_encoder_attn_v_proj_bias1), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape381: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv31_1, R.shape([batch_size, 1500, 20, 64])) + reshape382: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape380, R.shape([batch_size * 1500, 20, 64])) + reshape383: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape381, R.shape([batch_size * 1500, 20, 64])) + lv67: R.Object = R.call_pure_packed("vm.builtin.attention_kv_cache_push_cross_attention_kv", lv66, R.prim_value(31), reshape382, reshape383, sinfo_args=(R.Object,)) + gv1: R.Object = lv67 + R.output(gv1) + return gv1 + + @R.function + def batch_decode(input_ids: R.Tensor(("batch_size", 1), dtype="int32"), paged_kv_cache: R.Object, packed_params: R.Tuple(R.Tensor((1280, 128, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1500, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((51866, 1280), dtype="float16"), R.Tensor((448, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"))) -> R.Tensor(("batch_size", 1, 51866), dtype="float32"): + batch_size = T.int64() + R.func_attr({"num_input": 2, "relax.memory_plan_dynamic_func_output": 1, "relax.rewrite_cuda_graph.capture_symbolic_vars": ["batch_size"], "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) + with R.dataflow(): + model_decoder_embed_tokens_weight3: R.Tensor((51866, 1280), dtype="float16") = packed_params[487] + model_decoder_embed_positions_weight3: R.Tensor((448, 1280), dtype="float16") = packed_params[488] + model_decoder_layers_0_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[489] + model_decoder_layers_0_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[490] + model_decoder_layers_0_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[491] + model_decoder_layers_0_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[492] + model_decoder_layers_0_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[493] + model_decoder_layers_0_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[494] + model_decoder_layers_0_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[495] + model_decoder_layers_0_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[496] + model_decoder_layers_0_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[497] + model_decoder_layers_0_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[501] + model_decoder_layers_0_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[502] + model_decoder_layers_0_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[503] + model_decoder_layers_0_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[504] + model_decoder_layers_0_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[505] + model_decoder_layers_0_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[506] + model_decoder_layers_0_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[507] + model_decoder_layers_0_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[508] + model_decoder_layers_0_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[509] + model_decoder_layers_0_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[510] + model_decoder_layers_0_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[511] + model_decoder_layers_0_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[512] + model_decoder_layers_1_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[513] + model_decoder_layers_1_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[514] + model_decoder_layers_1_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[515] + model_decoder_layers_1_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[516] + model_decoder_layers_1_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[517] + model_decoder_layers_1_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[518] + model_decoder_layers_1_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[519] + model_decoder_layers_1_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[520] + model_decoder_layers_1_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[521] + model_decoder_layers_1_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[525] + model_decoder_layers_1_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[526] + model_decoder_layers_1_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[527] + model_decoder_layers_1_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[528] + model_decoder_layers_1_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[529] + model_decoder_layers_1_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[530] + model_decoder_layers_1_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[531] + model_decoder_layers_1_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[532] + model_decoder_layers_1_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[533] + model_decoder_layers_1_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[534] + model_decoder_layers_1_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[535] + model_decoder_layers_1_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[536] + model_decoder_layers_2_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[537] + model_decoder_layers_2_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[538] + model_decoder_layers_2_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[539] + model_decoder_layers_2_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[540] + model_decoder_layers_2_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[541] + model_decoder_layers_2_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[542] + model_decoder_layers_2_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[543] + model_decoder_layers_2_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[544] + model_decoder_layers_2_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[545] + model_decoder_layers_2_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[549] + model_decoder_layers_2_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[550] + model_decoder_layers_2_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[551] + model_decoder_layers_2_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[552] + model_decoder_layers_2_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[553] + model_decoder_layers_2_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[554] + model_decoder_layers_2_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[555] + model_decoder_layers_2_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[556] + model_decoder_layers_2_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[557] + model_decoder_layers_2_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[558] + model_decoder_layers_2_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[559] + model_decoder_layers_2_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[560] + model_decoder_layers_3_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[561] + model_decoder_layers_3_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[562] + model_decoder_layers_3_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[563] + model_decoder_layers_3_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[564] + model_decoder_layers_3_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[565] + model_decoder_layers_3_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[566] + model_decoder_layers_3_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[567] + model_decoder_layers_3_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[568] + model_decoder_layers_3_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[569] + model_decoder_layers_3_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[573] + model_decoder_layers_3_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[574] + model_decoder_layers_3_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[575] + model_decoder_layers_3_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[576] + model_decoder_layers_3_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[577] + model_decoder_layers_3_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[578] + model_decoder_layers_3_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[579] + model_decoder_layers_3_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[580] + model_decoder_layers_3_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[581] + model_decoder_layers_3_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[582] + model_decoder_layers_3_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[583] + model_decoder_layers_3_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[584] + model_decoder_layers_4_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[585] + model_decoder_layers_4_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[586] + model_decoder_layers_4_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[587] + model_decoder_layers_4_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[588] + model_decoder_layers_4_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[589] + model_decoder_layers_4_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[590] + model_decoder_layers_4_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[591] + model_decoder_layers_4_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[592] + model_decoder_layers_4_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[593] + model_decoder_layers_4_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[597] + model_decoder_layers_4_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[598] + model_decoder_layers_4_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[599] + model_decoder_layers_4_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[600] + model_decoder_layers_4_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[601] + model_decoder_layers_4_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[602] + model_decoder_layers_4_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[603] + model_decoder_layers_4_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[604] + model_decoder_layers_4_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[605] + model_decoder_layers_4_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[606] + model_decoder_layers_4_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[607] + model_decoder_layers_4_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[608] + model_decoder_layers_5_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[609] + model_decoder_layers_5_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[610] + model_decoder_layers_5_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[611] + model_decoder_layers_5_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[612] + model_decoder_layers_5_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[613] + model_decoder_layers_5_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[614] + model_decoder_layers_5_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[615] + model_decoder_layers_5_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[616] + model_decoder_layers_5_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[617] + model_decoder_layers_5_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[621] + model_decoder_layers_5_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[622] + model_decoder_layers_5_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[623] + model_decoder_layers_5_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[624] + model_decoder_layers_5_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[625] + model_decoder_layers_5_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[626] + model_decoder_layers_5_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[627] + model_decoder_layers_5_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[628] + model_decoder_layers_5_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[629] + model_decoder_layers_5_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[630] + model_decoder_layers_5_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[631] + model_decoder_layers_5_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[632] + model_decoder_layers_6_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[633] + model_decoder_layers_6_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[634] + model_decoder_layers_6_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[635] + model_decoder_layers_6_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[636] + model_decoder_layers_6_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[637] + model_decoder_layers_6_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[638] + model_decoder_layers_6_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[639] + model_decoder_layers_6_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[640] + model_decoder_layers_6_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[641] + model_decoder_layers_6_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[645] + model_decoder_layers_6_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[646] + model_decoder_layers_6_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[647] + model_decoder_layers_6_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[648] + model_decoder_layers_6_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[649] + model_decoder_layers_6_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[650] + model_decoder_layers_6_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[651] + model_decoder_layers_6_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[652] + model_decoder_layers_6_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[653] + model_decoder_layers_6_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[654] + model_decoder_layers_6_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[655] + model_decoder_layers_6_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[656] + model_decoder_layers_7_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[657] + model_decoder_layers_7_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[658] + model_decoder_layers_7_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[659] + model_decoder_layers_7_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[660] + model_decoder_layers_7_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[661] + model_decoder_layers_7_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[662] + model_decoder_layers_7_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[663] + model_decoder_layers_7_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[664] + model_decoder_layers_7_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[665] + model_decoder_layers_7_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[669] + model_decoder_layers_7_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[670] + model_decoder_layers_7_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[671] + model_decoder_layers_7_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[672] + model_decoder_layers_7_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[673] + model_decoder_layers_7_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[674] + model_decoder_layers_7_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[675] + model_decoder_layers_7_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[676] + model_decoder_layers_7_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[677] + model_decoder_layers_7_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[678] + model_decoder_layers_7_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[679] + model_decoder_layers_7_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[680] + model_decoder_layers_8_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[681] + model_decoder_layers_8_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[682] + model_decoder_layers_8_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[683] + model_decoder_layers_8_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[684] + model_decoder_layers_8_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[685] + model_decoder_layers_8_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[686] + model_decoder_layers_8_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[687] + model_decoder_layers_8_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[688] + model_decoder_layers_8_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[689] + model_decoder_layers_8_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[693] + model_decoder_layers_8_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[694] + model_decoder_layers_8_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[695] + model_decoder_layers_8_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[696] + model_decoder_layers_8_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[697] + model_decoder_layers_8_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[698] + model_decoder_layers_8_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[699] + model_decoder_layers_8_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[700] + model_decoder_layers_8_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[701] + model_decoder_layers_8_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[702] + model_decoder_layers_8_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[703] + model_decoder_layers_8_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[704] + model_decoder_layers_9_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[705] + model_decoder_layers_9_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[706] + model_decoder_layers_9_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[707] + model_decoder_layers_9_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[708] + model_decoder_layers_9_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[709] + model_decoder_layers_9_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[710] + model_decoder_layers_9_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[711] + model_decoder_layers_9_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[712] + model_decoder_layers_9_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[713] + model_decoder_layers_9_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[717] + model_decoder_layers_9_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[718] + model_decoder_layers_9_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[719] + model_decoder_layers_9_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[720] + model_decoder_layers_9_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[721] + model_decoder_layers_9_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[722] + model_decoder_layers_9_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[723] + model_decoder_layers_9_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[724] + model_decoder_layers_9_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[725] + model_decoder_layers_9_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[726] + model_decoder_layers_9_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[727] + model_decoder_layers_9_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[728] + model_decoder_layers_10_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[729] + model_decoder_layers_10_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[730] + model_decoder_layers_10_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[731] + model_decoder_layers_10_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[732] + model_decoder_layers_10_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[733] + model_decoder_layers_10_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[734] + model_decoder_layers_10_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[735] + model_decoder_layers_10_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[736] + model_decoder_layers_10_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[737] + model_decoder_layers_10_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[741] + model_decoder_layers_10_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[742] + model_decoder_layers_10_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[743] + model_decoder_layers_10_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[744] + model_decoder_layers_10_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[745] + model_decoder_layers_10_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[746] + model_decoder_layers_10_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[747] + model_decoder_layers_10_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[748] + model_decoder_layers_10_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[749] + model_decoder_layers_10_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[750] + model_decoder_layers_10_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[751] + model_decoder_layers_10_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[752] + model_decoder_layers_11_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[753] + model_decoder_layers_11_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[754] + model_decoder_layers_11_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[755] + model_decoder_layers_11_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[756] + model_decoder_layers_11_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[757] + model_decoder_layers_11_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[758] + model_decoder_layers_11_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[759] + model_decoder_layers_11_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[760] + model_decoder_layers_11_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[761] + model_decoder_layers_11_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[765] + model_decoder_layers_11_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[766] + model_decoder_layers_11_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[767] + model_decoder_layers_11_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[768] + model_decoder_layers_11_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[769] + model_decoder_layers_11_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[770] + model_decoder_layers_11_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[771] + model_decoder_layers_11_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[772] + model_decoder_layers_11_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[773] + model_decoder_layers_11_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[774] + model_decoder_layers_11_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[775] + model_decoder_layers_11_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[776] + model_decoder_layers_12_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[777] + model_decoder_layers_12_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[778] + model_decoder_layers_12_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[779] + model_decoder_layers_12_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[780] + model_decoder_layers_12_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[781] + model_decoder_layers_12_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[782] + model_decoder_layers_12_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[783] + model_decoder_layers_12_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[784] + model_decoder_layers_12_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[785] + model_decoder_layers_12_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[789] + model_decoder_layers_12_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[790] + model_decoder_layers_12_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[791] + model_decoder_layers_12_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[792] + model_decoder_layers_12_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[793] + model_decoder_layers_12_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[794] + model_decoder_layers_12_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[795] + model_decoder_layers_12_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[796] + model_decoder_layers_12_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[797] + model_decoder_layers_12_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[798] + model_decoder_layers_12_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[799] + model_decoder_layers_12_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[800] + model_decoder_layers_13_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[801] + model_decoder_layers_13_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[802] + model_decoder_layers_13_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[803] + model_decoder_layers_13_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[804] + model_decoder_layers_13_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[805] + model_decoder_layers_13_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[806] + model_decoder_layers_13_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[807] + model_decoder_layers_13_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[808] + model_decoder_layers_13_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[809] + model_decoder_layers_13_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[813] + model_decoder_layers_13_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[814] + model_decoder_layers_13_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[815] + model_decoder_layers_13_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[816] + model_decoder_layers_13_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[817] + model_decoder_layers_13_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[818] + model_decoder_layers_13_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[819] + model_decoder_layers_13_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[820] + model_decoder_layers_13_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[821] + model_decoder_layers_13_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[822] + model_decoder_layers_13_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[823] + model_decoder_layers_13_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[824] + model_decoder_layers_14_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[825] + model_decoder_layers_14_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[826] + model_decoder_layers_14_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[827] + model_decoder_layers_14_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[828] + model_decoder_layers_14_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[829] + model_decoder_layers_14_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[830] + model_decoder_layers_14_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[831] + model_decoder_layers_14_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[832] + model_decoder_layers_14_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[833] + model_decoder_layers_14_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[837] + model_decoder_layers_14_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[838] + model_decoder_layers_14_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[839] + model_decoder_layers_14_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[840] + model_decoder_layers_14_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[841] + model_decoder_layers_14_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[842] + model_decoder_layers_14_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[843] + model_decoder_layers_14_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[844] + model_decoder_layers_14_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[845] + model_decoder_layers_14_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[846] + model_decoder_layers_14_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[847] + model_decoder_layers_14_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[848] + model_decoder_layers_15_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[849] + model_decoder_layers_15_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[850] + model_decoder_layers_15_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[851] + model_decoder_layers_15_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[852] + model_decoder_layers_15_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[853] + model_decoder_layers_15_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[854] + model_decoder_layers_15_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[855] + model_decoder_layers_15_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[856] + model_decoder_layers_15_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[857] + model_decoder_layers_15_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[861] + model_decoder_layers_15_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[862] + model_decoder_layers_15_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[863] + model_decoder_layers_15_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[864] + model_decoder_layers_15_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[865] + model_decoder_layers_15_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[866] + model_decoder_layers_15_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[867] + model_decoder_layers_15_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[868] + model_decoder_layers_15_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[869] + model_decoder_layers_15_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[870] + model_decoder_layers_15_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[871] + model_decoder_layers_15_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[872] + model_decoder_layers_16_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[873] + model_decoder_layers_16_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[874] + model_decoder_layers_16_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[875] + model_decoder_layers_16_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[876] + model_decoder_layers_16_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[877] + model_decoder_layers_16_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[878] + model_decoder_layers_16_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[879] + model_decoder_layers_16_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[880] + model_decoder_layers_16_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[881] + model_decoder_layers_16_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[885] + model_decoder_layers_16_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[886] + model_decoder_layers_16_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[887] + model_decoder_layers_16_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[888] + model_decoder_layers_16_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[889] + model_decoder_layers_16_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[890] + model_decoder_layers_16_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[891] + model_decoder_layers_16_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[892] + model_decoder_layers_16_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[893] + model_decoder_layers_16_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[894] + model_decoder_layers_16_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[895] + model_decoder_layers_16_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[896] + model_decoder_layers_17_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[897] + model_decoder_layers_17_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[898] + model_decoder_layers_17_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[899] + model_decoder_layers_17_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[900] + model_decoder_layers_17_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[901] + model_decoder_layers_17_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[902] + model_decoder_layers_17_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[903] + model_decoder_layers_17_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[904] + model_decoder_layers_17_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[905] + model_decoder_layers_17_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[909] + model_decoder_layers_17_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[910] + model_decoder_layers_17_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[911] + model_decoder_layers_17_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[912] + model_decoder_layers_17_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[913] + model_decoder_layers_17_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[914] + model_decoder_layers_17_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[915] + model_decoder_layers_17_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[916] + model_decoder_layers_17_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[917] + model_decoder_layers_17_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[918] + model_decoder_layers_17_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[919] + model_decoder_layers_17_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[920] + model_decoder_layers_18_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[921] + model_decoder_layers_18_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[922] + model_decoder_layers_18_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[923] + model_decoder_layers_18_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[924] + model_decoder_layers_18_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[925] + model_decoder_layers_18_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[926] + model_decoder_layers_18_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[927] + model_decoder_layers_18_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[928] + model_decoder_layers_18_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[929] + model_decoder_layers_18_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[933] + model_decoder_layers_18_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[934] + model_decoder_layers_18_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[935] + model_decoder_layers_18_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[936] + model_decoder_layers_18_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[937] + model_decoder_layers_18_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[938] + model_decoder_layers_18_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[939] + model_decoder_layers_18_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[940] + model_decoder_layers_18_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[941] + model_decoder_layers_18_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[942] + model_decoder_layers_18_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[943] + model_decoder_layers_18_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[944] + model_decoder_layers_19_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[945] + model_decoder_layers_19_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[946] + model_decoder_layers_19_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[947] + model_decoder_layers_19_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[948] + model_decoder_layers_19_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[949] + model_decoder_layers_19_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[950] + model_decoder_layers_19_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[951] + model_decoder_layers_19_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[952] + model_decoder_layers_19_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[953] + model_decoder_layers_19_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[957] + model_decoder_layers_19_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[958] + model_decoder_layers_19_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[959] + model_decoder_layers_19_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[960] + model_decoder_layers_19_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[961] + model_decoder_layers_19_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[962] + model_decoder_layers_19_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[963] + model_decoder_layers_19_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[964] + model_decoder_layers_19_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[965] + model_decoder_layers_19_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[966] + model_decoder_layers_19_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[967] + model_decoder_layers_19_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[968] + model_decoder_layers_20_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[969] + model_decoder_layers_20_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[970] + model_decoder_layers_20_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[971] + model_decoder_layers_20_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[972] + model_decoder_layers_20_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[973] + model_decoder_layers_20_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[974] + model_decoder_layers_20_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[975] + model_decoder_layers_20_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[976] + model_decoder_layers_20_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[977] + model_decoder_layers_20_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[981] + model_decoder_layers_20_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[982] + model_decoder_layers_20_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[983] + model_decoder_layers_20_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[984] + model_decoder_layers_20_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[985] + model_decoder_layers_20_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[986] + model_decoder_layers_20_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[987] + model_decoder_layers_20_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[988] + model_decoder_layers_20_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[989] + model_decoder_layers_20_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[990] + model_decoder_layers_20_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[991] + model_decoder_layers_20_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[992] + model_decoder_layers_21_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[993] + model_decoder_layers_21_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[994] + model_decoder_layers_21_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[995] + model_decoder_layers_21_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[996] + model_decoder_layers_21_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[997] + model_decoder_layers_21_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[998] + model_decoder_layers_21_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[999] + model_decoder_layers_21_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1000] + model_decoder_layers_21_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1001] + model_decoder_layers_21_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1005] + model_decoder_layers_21_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1006] + model_decoder_layers_21_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1007] + model_decoder_layers_21_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1008] + model_decoder_layers_21_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1009] + model_decoder_layers_21_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1010] + model_decoder_layers_21_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1011] + model_decoder_layers_21_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1012] + model_decoder_layers_21_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1013] + model_decoder_layers_21_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1014] + model_decoder_layers_21_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1015] + model_decoder_layers_21_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1016] + model_decoder_layers_22_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1017] + model_decoder_layers_22_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1018] + model_decoder_layers_22_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1019] + model_decoder_layers_22_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1020] + model_decoder_layers_22_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1021] + model_decoder_layers_22_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1022] + model_decoder_layers_22_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1023] + model_decoder_layers_22_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1024] + model_decoder_layers_22_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1025] + model_decoder_layers_22_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1029] + model_decoder_layers_22_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1030] + model_decoder_layers_22_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1031] + model_decoder_layers_22_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1032] + model_decoder_layers_22_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1033] + model_decoder_layers_22_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1034] + model_decoder_layers_22_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1035] + model_decoder_layers_22_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1036] + model_decoder_layers_22_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1037] + model_decoder_layers_22_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1038] + model_decoder_layers_22_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1039] + model_decoder_layers_22_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1040] + model_decoder_layers_23_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1041] + model_decoder_layers_23_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1042] + model_decoder_layers_23_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1043] + model_decoder_layers_23_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1044] + model_decoder_layers_23_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1045] + model_decoder_layers_23_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1046] + model_decoder_layers_23_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1047] + model_decoder_layers_23_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1048] + model_decoder_layers_23_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1049] + model_decoder_layers_23_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1053] + model_decoder_layers_23_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1054] + model_decoder_layers_23_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1055] + model_decoder_layers_23_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1056] + model_decoder_layers_23_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1057] + model_decoder_layers_23_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1058] + model_decoder_layers_23_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1059] + model_decoder_layers_23_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1060] + model_decoder_layers_23_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1061] + model_decoder_layers_23_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1062] + model_decoder_layers_23_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1063] + model_decoder_layers_23_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1064] + model_decoder_layers_24_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1065] + model_decoder_layers_24_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1066] + model_decoder_layers_24_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1067] + model_decoder_layers_24_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1068] + model_decoder_layers_24_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1069] + model_decoder_layers_24_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1070] + model_decoder_layers_24_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1071] + model_decoder_layers_24_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1072] + model_decoder_layers_24_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1073] + model_decoder_layers_24_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1077] + model_decoder_layers_24_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1078] + model_decoder_layers_24_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1079] + model_decoder_layers_24_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1080] + model_decoder_layers_24_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1081] + model_decoder_layers_24_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1082] + model_decoder_layers_24_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1083] + model_decoder_layers_24_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1084] + model_decoder_layers_24_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1085] + model_decoder_layers_24_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1086] + model_decoder_layers_24_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1087] + model_decoder_layers_24_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1088] + model_decoder_layers_25_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1089] + model_decoder_layers_25_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1090] + model_decoder_layers_25_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1091] + model_decoder_layers_25_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1092] + model_decoder_layers_25_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1093] + model_decoder_layers_25_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1094] + model_decoder_layers_25_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1095] + model_decoder_layers_25_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1096] + model_decoder_layers_25_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1097] + model_decoder_layers_25_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1101] + model_decoder_layers_25_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1102] + model_decoder_layers_25_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1103] + model_decoder_layers_25_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1104] + model_decoder_layers_25_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1105] + model_decoder_layers_25_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1106] + model_decoder_layers_25_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1107] + model_decoder_layers_25_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1108] + model_decoder_layers_25_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1109] + model_decoder_layers_25_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1110] + model_decoder_layers_25_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1111] + model_decoder_layers_25_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1112] + model_decoder_layers_26_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1113] + model_decoder_layers_26_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1114] + model_decoder_layers_26_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1115] + model_decoder_layers_26_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1116] + model_decoder_layers_26_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1117] + model_decoder_layers_26_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1118] + model_decoder_layers_26_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1119] + model_decoder_layers_26_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1120] + model_decoder_layers_26_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1121] + model_decoder_layers_26_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1125] + model_decoder_layers_26_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1126] + model_decoder_layers_26_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1127] + model_decoder_layers_26_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1128] + model_decoder_layers_26_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1129] + model_decoder_layers_26_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1130] + model_decoder_layers_26_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1131] + model_decoder_layers_26_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1132] + model_decoder_layers_26_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1133] + model_decoder_layers_26_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1134] + model_decoder_layers_26_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1135] + model_decoder_layers_26_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1136] + model_decoder_layers_27_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1137] + model_decoder_layers_27_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1138] + model_decoder_layers_27_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1139] + model_decoder_layers_27_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1140] + model_decoder_layers_27_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1141] + model_decoder_layers_27_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1142] + model_decoder_layers_27_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1143] + model_decoder_layers_27_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1144] + model_decoder_layers_27_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1145] + model_decoder_layers_27_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1149] + model_decoder_layers_27_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1150] + model_decoder_layers_27_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1151] + model_decoder_layers_27_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1152] + model_decoder_layers_27_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1153] + model_decoder_layers_27_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1154] + model_decoder_layers_27_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1155] + model_decoder_layers_27_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1156] + model_decoder_layers_27_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1157] + model_decoder_layers_27_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1158] + model_decoder_layers_27_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1159] + model_decoder_layers_27_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1160] + model_decoder_layers_28_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1161] + model_decoder_layers_28_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1162] + model_decoder_layers_28_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1163] + model_decoder_layers_28_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1164] + model_decoder_layers_28_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1165] + model_decoder_layers_28_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1166] + model_decoder_layers_28_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1167] + model_decoder_layers_28_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1168] + model_decoder_layers_28_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1169] + model_decoder_layers_28_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1173] + model_decoder_layers_28_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1174] + model_decoder_layers_28_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1175] + model_decoder_layers_28_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1176] + model_decoder_layers_28_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1177] + model_decoder_layers_28_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1178] + model_decoder_layers_28_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1179] + model_decoder_layers_28_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1180] + model_decoder_layers_28_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1181] + model_decoder_layers_28_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1182] + model_decoder_layers_28_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1183] + model_decoder_layers_28_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1184] + model_decoder_layers_29_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1185] + model_decoder_layers_29_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1186] + model_decoder_layers_29_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1187] + model_decoder_layers_29_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1188] + model_decoder_layers_29_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1189] + model_decoder_layers_29_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1190] + model_decoder_layers_29_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1191] + model_decoder_layers_29_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1192] + model_decoder_layers_29_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1193] + model_decoder_layers_29_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1197] + model_decoder_layers_29_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1198] + model_decoder_layers_29_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1199] + model_decoder_layers_29_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1200] + model_decoder_layers_29_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1201] + model_decoder_layers_29_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1202] + model_decoder_layers_29_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1203] + model_decoder_layers_29_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1204] + model_decoder_layers_29_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1205] + model_decoder_layers_29_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1206] + model_decoder_layers_29_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1207] + model_decoder_layers_29_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1208] + model_decoder_layers_30_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1209] + model_decoder_layers_30_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1210] + model_decoder_layers_30_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1211] + model_decoder_layers_30_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1212] + model_decoder_layers_30_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1213] + model_decoder_layers_30_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1214] + model_decoder_layers_30_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1215] + model_decoder_layers_30_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1216] + model_decoder_layers_30_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1217] + model_decoder_layers_30_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1221] + model_decoder_layers_30_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1222] + model_decoder_layers_30_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1223] + model_decoder_layers_30_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1224] + model_decoder_layers_30_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1225] + model_decoder_layers_30_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1226] + model_decoder_layers_30_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1227] + model_decoder_layers_30_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1228] + model_decoder_layers_30_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1229] + model_decoder_layers_30_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1230] + model_decoder_layers_30_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1231] + model_decoder_layers_30_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1232] + model_decoder_layers_31_self_attn_k_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1233] + model_decoder_layers_31_self_attn_v_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1234] + model_decoder_layers_31_self_attn_v_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1235] + model_decoder_layers_31_self_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1236] + model_decoder_layers_31_self_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1237] + model_decoder_layers_31_self_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1238] + model_decoder_layers_31_self_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1239] + model_decoder_layers_31_self_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1240] + model_decoder_layers_31_self_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1241] + model_decoder_layers_31_encoder_attn_q_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1245] + model_decoder_layers_31_encoder_attn_q_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1246] + model_decoder_layers_31_encoder_attn_out_proj_weight3: R.Tensor((1280, 1280), dtype="float16") = packed_params[1247] + model_decoder_layers_31_encoder_attn_out_proj_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1248] + model_decoder_layers_31_encoder_attn_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1249] + model_decoder_layers_31_encoder_attn_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1250] + model_decoder_layers_31_fc1_weight3: R.Tensor((5120, 1280), dtype="float16") = packed_params[1251] + model_decoder_layers_31_fc1_bias3: R.Tensor((5120,), dtype="float16") = packed_params[1252] + model_decoder_layers_31_fc2_weight3: R.Tensor((1280, 5120), dtype="float16") = packed_params[1253] + model_decoder_layers_31_fc2_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1254] + model_decoder_layers_31_final_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1255] + model_decoder_layers_31_final_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1256] + model_decoder_layer_norm_weight3: R.Tensor((1280,), dtype="float16") = packed_params[1257] + model_decoder_layer_norm_bias3: R.Tensor((1280,), dtype="float16") = packed_params[1258] + reshape707: R.Tensor((batch_size,), dtype="int32") = R.reshape(input_ids, R.shape([batch_size])) + take3: R.Tensor((batch_size, 1280), dtype="float16") = R.take(model_decoder_embed_tokens_weight3, reshape707, axis=0) + reshape708: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(take3, R.shape([batch_size, 1, 1280])) + lv133: R.Tensor((batch_size,), dtype="int32") = R.call_pure_packed("vm.builtin.attention_kv_cache_get_query_positions", paged_kv_cache, sinfo_args=(R.Tensor((batch_size,), dtype="int32"),)) + take4: R.Tensor((batch_size, 1280), dtype="float16") = R.take(model_decoder_embed_positions_weight3, lv133, axis=0) + reshape709: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(take4, R.shape([batch_size, 1, 1280])) + add578: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(reshape708, reshape709) + layer_norm162: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add578, model_decoder_layers_0_self_attn_layer_norm_weight3, model_decoder_layers_0_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv224 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_0_self_attn_q_proj_weight3, layer_norm162, model_decoder_layers_0_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape710: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv224, R.shape([batch_size, 1, 20, 64])) + lv65 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_0_self_attn_k_proj_weight3, layer_norm162), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape711: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv65, R.shape([batch_size, 1, 20, 64])) + lv225 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_0_self_attn_v_proj_weight3, layer_norm162, model_decoder_layers_0_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape712: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv225, R.shape([batch_size, 1, 20, 64])) + concat32: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape710, reshape711, reshape712), axis=2) + reshape713: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat32, R.shape([batch_size, 60, 64])) + lv134 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape713), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape714: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv134, R.shape([batch_size, 1, 20, 64])) + reshape715: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape714, R.shape([batch_size, 1, 1280])) + lv226 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_0_self_attn_out_proj_weight3, reshape715, model_decoder_layers_0_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add582: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add578, lv226) + layer_norm163: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add582, model_decoder_layers_0_encoder_attn_layer_norm_weight3, model_decoder_layers_0_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv227 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_0_encoder_attn_q_proj_weight3, layer_norm163, model_decoder_layers_0_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape716: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv227, R.shape([batch_size, 1, 20, 64])) + reshape717: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape716, R.shape([batch_size, 20, 64])) + lv135 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape717), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape718: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv135, R.shape([batch_size, 1, 20, 64])) + reshape719: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape718, R.shape([batch_size, 1, 1280])) + lv228 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_0_encoder_attn_out_proj_weight3, reshape719, model_decoder_layers_0_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add585: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add582, lv228) + layer_norm164: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add585, model_decoder_layers_0_final_layer_norm_weight3, model_decoder_layers_0_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv32 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_0_fc1_weight3, layer_norm164, model_decoder_layers_0_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv229 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_0_fc2_weight3, lv32, model_decoder_layers_0_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add588: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add585, lv229) + layer_norm165: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add588, model_decoder_layers_1_self_attn_layer_norm_weight3, model_decoder_layers_1_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv230 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_1_self_attn_q_proj_weight3, layer_norm165, model_decoder_layers_1_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape720: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv230, R.shape([batch_size, 1, 20, 64])) + lv66 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_1_self_attn_k_proj_weight3, layer_norm165), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape721: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv66, R.shape([batch_size, 1, 20, 64])) + lv231 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_1_self_attn_v_proj_weight3, layer_norm165, model_decoder_layers_1_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape722: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv231, R.shape([batch_size, 1, 20, 64])) + concat33: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape720, reshape721, reshape722), axis=2) + reshape723: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat33, R.shape([batch_size, 60, 64])) + lv136 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape723), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape724: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv136, R.shape([batch_size, 1, 20, 64])) + reshape725: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape724, R.shape([batch_size, 1, 1280])) + lv232 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_1_self_attn_out_proj_weight3, reshape725, model_decoder_layers_1_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add592: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add588, lv232) + layer_norm166: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add592, model_decoder_layers_1_encoder_attn_layer_norm_weight3, model_decoder_layers_1_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv233 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_1_encoder_attn_q_proj_weight3, layer_norm166, model_decoder_layers_1_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape726: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv233, R.shape([batch_size, 1, 20, 64])) + reshape727: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape726, R.shape([batch_size, 20, 64])) + lv137 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape727), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape728: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv137, R.shape([batch_size, 1, 20, 64])) + reshape729: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape728, R.shape([batch_size, 1, 1280])) + lv234 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_1_encoder_attn_out_proj_weight3, reshape729, model_decoder_layers_1_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add595: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add592, lv234) + layer_norm167: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add595, model_decoder_layers_1_final_layer_norm_weight3, model_decoder_layers_1_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv33 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_1_fc1_weight3, layer_norm167, model_decoder_layers_1_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv235 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_1_fc2_weight3, lv33, model_decoder_layers_1_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add598: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add595, lv235) + layer_norm168: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add598, model_decoder_layers_2_self_attn_layer_norm_weight3, model_decoder_layers_2_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv236 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_2_self_attn_q_proj_weight3, layer_norm168, model_decoder_layers_2_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape730: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv236, R.shape([batch_size, 1, 20, 64])) + lv67 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_2_self_attn_k_proj_weight3, layer_norm168), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape731: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv67, R.shape([batch_size, 1, 20, 64])) + lv237 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_2_self_attn_v_proj_weight3, layer_norm168, model_decoder_layers_2_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape732: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv237, R.shape([batch_size, 1, 20, 64])) + concat34: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape730, reshape731, reshape732), axis=2) + reshape733: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat34, R.shape([batch_size, 60, 64])) + lv138 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape733), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape734: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv138, R.shape([batch_size, 1, 20, 64])) + reshape735: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape734, R.shape([batch_size, 1, 1280])) + lv238 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_2_self_attn_out_proj_weight3, reshape735, model_decoder_layers_2_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add602: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add598, lv238) + layer_norm169: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add602, model_decoder_layers_2_encoder_attn_layer_norm_weight3, model_decoder_layers_2_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv239 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_2_encoder_attn_q_proj_weight3, layer_norm169, model_decoder_layers_2_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape736: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv239, R.shape([batch_size, 1, 20, 64])) + reshape737: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape736, R.shape([batch_size, 20, 64])) + lv139 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape737), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape738: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv139, R.shape([batch_size, 1, 20, 64])) + reshape739: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape738, R.shape([batch_size, 1, 1280])) + lv240 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_2_encoder_attn_out_proj_weight3, reshape739, model_decoder_layers_2_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add605: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add602, lv240) + layer_norm170: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add605, model_decoder_layers_2_final_layer_norm_weight3, model_decoder_layers_2_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv34 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_2_fc1_weight3, layer_norm170, model_decoder_layers_2_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv241 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_2_fc2_weight3, lv34, model_decoder_layers_2_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add608: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add605, lv241) + layer_norm171: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add608, model_decoder_layers_3_self_attn_layer_norm_weight3, model_decoder_layers_3_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv242 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_3_self_attn_q_proj_weight3, layer_norm171, model_decoder_layers_3_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape740: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv242, R.shape([batch_size, 1, 20, 64])) + lv68 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_3_self_attn_k_proj_weight3, layer_norm171), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape741: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv68, R.shape([batch_size, 1, 20, 64])) + lv243 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_3_self_attn_v_proj_weight3, layer_norm171, model_decoder_layers_3_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape742: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv243, R.shape([batch_size, 1, 20, 64])) + concat35: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape740, reshape741, reshape742), axis=2) + reshape743: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat35, R.shape([batch_size, 60, 64])) + lv140 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape743), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape744: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv140, R.shape([batch_size, 1, 20, 64])) + reshape745: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape744, R.shape([batch_size, 1, 1280])) + lv244 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_3_self_attn_out_proj_weight3, reshape745, model_decoder_layers_3_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add612: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add608, lv244) + layer_norm172: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add612, model_decoder_layers_3_encoder_attn_layer_norm_weight3, model_decoder_layers_3_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv245 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_3_encoder_attn_q_proj_weight3, layer_norm172, model_decoder_layers_3_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape746: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv245, R.shape([batch_size, 1, 20, 64])) + reshape747: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape746, R.shape([batch_size, 20, 64])) + lv141 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape747), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape748: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv141, R.shape([batch_size, 1, 20, 64])) + reshape749: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape748, R.shape([batch_size, 1, 1280])) + lv246 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_3_encoder_attn_out_proj_weight3, reshape749, model_decoder_layers_3_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add615: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add612, lv246) + layer_norm173: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add615, model_decoder_layers_3_final_layer_norm_weight3, model_decoder_layers_3_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv35 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_3_fc1_weight3, layer_norm173, model_decoder_layers_3_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv247 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_3_fc2_weight3, lv35, model_decoder_layers_3_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add618: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add615, lv247) + layer_norm174: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add618, model_decoder_layers_4_self_attn_layer_norm_weight3, model_decoder_layers_4_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv248 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_4_self_attn_q_proj_weight3, layer_norm174, model_decoder_layers_4_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape750: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv248, R.shape([batch_size, 1, 20, 64])) + lv69 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_4_self_attn_k_proj_weight3, layer_norm174), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape751: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv69, R.shape([batch_size, 1, 20, 64])) + lv249 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_4_self_attn_v_proj_weight3, layer_norm174, model_decoder_layers_4_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape752: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv249, R.shape([batch_size, 1, 20, 64])) + concat36: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape750, reshape751, reshape752), axis=2) + reshape753: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat36, R.shape([batch_size, 60, 64])) + lv142 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape753), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape754: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv142, R.shape([batch_size, 1, 20, 64])) + reshape755: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape754, R.shape([batch_size, 1, 1280])) + lv250 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_4_self_attn_out_proj_weight3, reshape755, model_decoder_layers_4_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add622: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add618, lv250) + layer_norm175: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add622, model_decoder_layers_4_encoder_attn_layer_norm_weight3, model_decoder_layers_4_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv251 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_4_encoder_attn_q_proj_weight3, layer_norm175, model_decoder_layers_4_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape756: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv251, R.shape([batch_size, 1, 20, 64])) + reshape757: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape756, R.shape([batch_size, 20, 64])) + lv143 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape757), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape758: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv143, R.shape([batch_size, 1, 20, 64])) + reshape759: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape758, R.shape([batch_size, 1, 1280])) + lv252 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_4_encoder_attn_out_proj_weight3, reshape759, model_decoder_layers_4_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add625: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add622, lv252) + layer_norm176: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add625, model_decoder_layers_4_final_layer_norm_weight3, model_decoder_layers_4_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv36 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_4_fc1_weight3, layer_norm176, model_decoder_layers_4_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv253 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_4_fc2_weight3, lv36, model_decoder_layers_4_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add628: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add625, lv253) + layer_norm177: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add628, model_decoder_layers_5_self_attn_layer_norm_weight3, model_decoder_layers_5_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv254 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_5_self_attn_q_proj_weight3, layer_norm177, model_decoder_layers_5_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape760: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv254, R.shape([batch_size, 1, 20, 64])) + lv70 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_5_self_attn_k_proj_weight3, layer_norm177), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape761: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv70, R.shape([batch_size, 1, 20, 64])) + lv255 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_5_self_attn_v_proj_weight3, layer_norm177, model_decoder_layers_5_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape762: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv255, R.shape([batch_size, 1, 20, 64])) + concat37: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape760, reshape761, reshape762), axis=2) + reshape763: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat37, R.shape([batch_size, 60, 64])) + lv144 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape763), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape764: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv144, R.shape([batch_size, 1, 20, 64])) + reshape765: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape764, R.shape([batch_size, 1, 1280])) + lv256 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_5_self_attn_out_proj_weight3, reshape765, model_decoder_layers_5_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add632: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add628, lv256) + layer_norm178: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add632, model_decoder_layers_5_encoder_attn_layer_norm_weight3, model_decoder_layers_5_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv257 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_5_encoder_attn_q_proj_weight3, layer_norm178, model_decoder_layers_5_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape766: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv257, R.shape([batch_size, 1, 20, 64])) + reshape767: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape766, R.shape([batch_size, 20, 64])) + lv145 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape767), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape768: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv145, R.shape([batch_size, 1, 20, 64])) + reshape769: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape768, R.shape([batch_size, 1, 1280])) + lv258 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_5_encoder_attn_out_proj_weight3, reshape769, model_decoder_layers_5_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add635: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add632, lv258) + layer_norm179: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add635, model_decoder_layers_5_final_layer_norm_weight3, model_decoder_layers_5_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv37 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_5_fc1_weight3, layer_norm179, model_decoder_layers_5_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv259 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_5_fc2_weight3, lv37, model_decoder_layers_5_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add638: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add635, lv259) + layer_norm180: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add638, model_decoder_layers_6_self_attn_layer_norm_weight3, model_decoder_layers_6_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv260 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_6_self_attn_q_proj_weight3, layer_norm180, model_decoder_layers_6_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape770: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv260, R.shape([batch_size, 1, 20, 64])) + lv71 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_6_self_attn_k_proj_weight3, layer_norm180), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape771: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv71, R.shape([batch_size, 1, 20, 64])) + lv261 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_6_self_attn_v_proj_weight3, layer_norm180, model_decoder_layers_6_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape772: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv261, R.shape([batch_size, 1, 20, 64])) + concat38: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape770, reshape771, reshape772), axis=2) + reshape773: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat38, R.shape([batch_size, 60, 64])) + lv146 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape773), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape774: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv146, R.shape([batch_size, 1, 20, 64])) + reshape775: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape774, R.shape([batch_size, 1, 1280])) + lv262 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_6_self_attn_out_proj_weight3, reshape775, model_decoder_layers_6_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add642: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add638, lv262) + layer_norm181: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add642, model_decoder_layers_6_encoder_attn_layer_norm_weight3, model_decoder_layers_6_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv263 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_6_encoder_attn_q_proj_weight3, layer_norm181, model_decoder_layers_6_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape776: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv263, R.shape([batch_size, 1, 20, 64])) + reshape777: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape776, R.shape([batch_size, 20, 64])) + lv147 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape777), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape778: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv147, R.shape([batch_size, 1, 20, 64])) + reshape779: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape778, R.shape([batch_size, 1, 1280])) + lv264 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_6_encoder_attn_out_proj_weight3, reshape779, model_decoder_layers_6_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add645: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add642, lv264) + layer_norm182: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add645, model_decoder_layers_6_final_layer_norm_weight3, model_decoder_layers_6_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv38 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_6_fc1_weight3, layer_norm182, model_decoder_layers_6_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv265 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_6_fc2_weight3, lv38, model_decoder_layers_6_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add648: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add645, lv265) + layer_norm183: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add648, model_decoder_layers_7_self_attn_layer_norm_weight3, model_decoder_layers_7_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv266 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_7_self_attn_q_proj_weight3, layer_norm183, model_decoder_layers_7_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape780: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv266, R.shape([batch_size, 1, 20, 64])) + lv72 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_7_self_attn_k_proj_weight3, layer_norm183), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape781: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv72, R.shape([batch_size, 1, 20, 64])) + lv267 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_7_self_attn_v_proj_weight3, layer_norm183, model_decoder_layers_7_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape782: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv267, R.shape([batch_size, 1, 20, 64])) + concat39: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape780, reshape781, reshape782), axis=2) + reshape783: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat39, R.shape([batch_size, 60, 64])) + lv148 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape783), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape784: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv148, R.shape([batch_size, 1, 20, 64])) + reshape785: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape784, R.shape([batch_size, 1, 1280])) + lv268 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_7_self_attn_out_proj_weight3, reshape785, model_decoder_layers_7_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add652: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add648, lv268) + layer_norm184: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add652, model_decoder_layers_7_encoder_attn_layer_norm_weight3, model_decoder_layers_7_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv269 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_7_encoder_attn_q_proj_weight3, layer_norm184, model_decoder_layers_7_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape786: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv269, R.shape([batch_size, 1, 20, 64])) + reshape787: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape786, R.shape([batch_size, 20, 64])) + lv149 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape787), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape788: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv149, R.shape([batch_size, 1, 20, 64])) + reshape789: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape788, R.shape([batch_size, 1, 1280])) + lv270 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_7_encoder_attn_out_proj_weight3, reshape789, model_decoder_layers_7_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add655: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add652, lv270) + layer_norm185: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add655, model_decoder_layers_7_final_layer_norm_weight3, model_decoder_layers_7_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv39 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_7_fc1_weight3, layer_norm185, model_decoder_layers_7_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv271 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_7_fc2_weight3, lv39, model_decoder_layers_7_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add658: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add655, lv271) + layer_norm186: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add658, model_decoder_layers_8_self_attn_layer_norm_weight3, model_decoder_layers_8_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv272 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_8_self_attn_q_proj_weight3, layer_norm186, model_decoder_layers_8_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape790: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv272, R.shape([batch_size, 1, 20, 64])) + lv73 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_8_self_attn_k_proj_weight3, layer_norm186), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape791: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv73, R.shape([batch_size, 1, 20, 64])) + lv273 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_8_self_attn_v_proj_weight3, layer_norm186, model_decoder_layers_8_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape792: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv273, R.shape([batch_size, 1, 20, 64])) + concat40: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape790, reshape791, reshape792), axis=2) + reshape793: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat40, R.shape([batch_size, 60, 64])) + lv150 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape793), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape794: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv150, R.shape([batch_size, 1, 20, 64])) + reshape795: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape794, R.shape([batch_size, 1, 1280])) + lv274 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_8_self_attn_out_proj_weight3, reshape795, model_decoder_layers_8_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add662: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add658, lv274) + layer_norm187: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add662, model_decoder_layers_8_encoder_attn_layer_norm_weight3, model_decoder_layers_8_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv275 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_8_encoder_attn_q_proj_weight3, layer_norm187, model_decoder_layers_8_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape796: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv275, R.shape([batch_size, 1, 20, 64])) + reshape797: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape796, R.shape([batch_size, 20, 64])) + lv151 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape797), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape798: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv151, R.shape([batch_size, 1, 20, 64])) + reshape799: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape798, R.shape([batch_size, 1, 1280])) + lv276 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_8_encoder_attn_out_proj_weight3, reshape799, model_decoder_layers_8_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add665: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add662, lv276) + layer_norm188: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add665, model_decoder_layers_8_final_layer_norm_weight3, model_decoder_layers_8_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv40 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_8_fc1_weight3, layer_norm188, model_decoder_layers_8_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv277 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_8_fc2_weight3, lv40, model_decoder_layers_8_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add668: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add665, lv277) + layer_norm189: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add668, model_decoder_layers_9_self_attn_layer_norm_weight3, model_decoder_layers_9_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv278 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_9_self_attn_q_proj_weight3, layer_norm189, model_decoder_layers_9_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape800: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv278, R.shape([batch_size, 1, 20, 64])) + lv74 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_9_self_attn_k_proj_weight3, layer_norm189), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape801: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv74, R.shape([batch_size, 1, 20, 64])) + lv279 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_9_self_attn_v_proj_weight3, layer_norm189, model_decoder_layers_9_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape802: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv279, R.shape([batch_size, 1, 20, 64])) + concat41: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape800, reshape801, reshape802), axis=2) + reshape803: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat41, R.shape([batch_size, 60, 64])) + lv152 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape803), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape804: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv152, R.shape([batch_size, 1, 20, 64])) + reshape805: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape804, R.shape([batch_size, 1, 1280])) + lv280 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_9_self_attn_out_proj_weight3, reshape805, model_decoder_layers_9_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add672: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add668, lv280) + layer_norm190: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add672, model_decoder_layers_9_encoder_attn_layer_norm_weight3, model_decoder_layers_9_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv281 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_9_encoder_attn_q_proj_weight3, layer_norm190, model_decoder_layers_9_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape806: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv281, R.shape([batch_size, 1, 20, 64])) + reshape807: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape806, R.shape([batch_size, 20, 64])) + lv153 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape807), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape808: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv153, R.shape([batch_size, 1, 20, 64])) + reshape809: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape808, R.shape([batch_size, 1, 1280])) + lv282 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_9_encoder_attn_out_proj_weight3, reshape809, model_decoder_layers_9_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add675: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add672, lv282) + layer_norm191: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add675, model_decoder_layers_9_final_layer_norm_weight3, model_decoder_layers_9_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv41 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_9_fc1_weight3, layer_norm191, model_decoder_layers_9_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv283 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_9_fc2_weight3, lv41, model_decoder_layers_9_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add678: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add675, lv283) + layer_norm192: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add678, model_decoder_layers_10_self_attn_layer_norm_weight3, model_decoder_layers_10_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv284 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_10_self_attn_q_proj_weight3, layer_norm192, model_decoder_layers_10_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape810: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv284, R.shape([batch_size, 1, 20, 64])) + lv75 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_10_self_attn_k_proj_weight3, layer_norm192), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape811: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv75, R.shape([batch_size, 1, 20, 64])) + lv285 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_10_self_attn_v_proj_weight3, layer_norm192, model_decoder_layers_10_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape812: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv285, R.shape([batch_size, 1, 20, 64])) + concat42: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape810, reshape811, reshape812), axis=2) + reshape813: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat42, R.shape([batch_size, 60, 64])) + lv154 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape813), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape814: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv154, R.shape([batch_size, 1, 20, 64])) + reshape815: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape814, R.shape([batch_size, 1, 1280])) + lv286 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_10_self_attn_out_proj_weight3, reshape815, model_decoder_layers_10_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add682: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add678, lv286) + layer_norm193: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add682, model_decoder_layers_10_encoder_attn_layer_norm_weight3, model_decoder_layers_10_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv287 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_10_encoder_attn_q_proj_weight3, layer_norm193, model_decoder_layers_10_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape816: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv287, R.shape([batch_size, 1, 20, 64])) + reshape817: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape816, R.shape([batch_size, 20, 64])) + lv155 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape817), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape818: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv155, R.shape([batch_size, 1, 20, 64])) + reshape819: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape818, R.shape([batch_size, 1, 1280])) + lv288 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_10_encoder_attn_out_proj_weight3, reshape819, model_decoder_layers_10_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add685: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add682, lv288) + layer_norm194: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add685, model_decoder_layers_10_final_layer_norm_weight3, model_decoder_layers_10_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv42 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_10_fc1_weight3, layer_norm194, model_decoder_layers_10_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv289 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_10_fc2_weight3, lv42, model_decoder_layers_10_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add688: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add685, lv289) + layer_norm195: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add688, model_decoder_layers_11_self_attn_layer_norm_weight3, model_decoder_layers_11_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv290 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_11_self_attn_q_proj_weight3, layer_norm195, model_decoder_layers_11_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape820: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv290, R.shape([batch_size, 1, 20, 64])) + lv76 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_11_self_attn_k_proj_weight3, layer_norm195), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape821: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv76, R.shape([batch_size, 1, 20, 64])) + lv291 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_11_self_attn_v_proj_weight3, layer_norm195, model_decoder_layers_11_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape822: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv291, R.shape([batch_size, 1, 20, 64])) + concat43: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape820, reshape821, reshape822), axis=2) + reshape823: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat43, R.shape([batch_size, 60, 64])) + lv156 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape823), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape824: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv156, R.shape([batch_size, 1, 20, 64])) + reshape825: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape824, R.shape([batch_size, 1, 1280])) + lv292 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_11_self_attn_out_proj_weight3, reshape825, model_decoder_layers_11_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add692: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add688, lv292) + layer_norm196: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add692, model_decoder_layers_11_encoder_attn_layer_norm_weight3, model_decoder_layers_11_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv293 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_11_encoder_attn_q_proj_weight3, layer_norm196, model_decoder_layers_11_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape826: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv293, R.shape([batch_size, 1, 20, 64])) + reshape827: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape826, R.shape([batch_size, 20, 64])) + lv157 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape827), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape828: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv157, R.shape([batch_size, 1, 20, 64])) + reshape829: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape828, R.shape([batch_size, 1, 1280])) + lv294 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_11_encoder_attn_out_proj_weight3, reshape829, model_decoder_layers_11_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add695: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add692, lv294) + layer_norm197: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add695, model_decoder_layers_11_final_layer_norm_weight3, model_decoder_layers_11_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv43 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_11_fc1_weight3, layer_norm197, model_decoder_layers_11_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv295 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_11_fc2_weight3, lv43, model_decoder_layers_11_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add698: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add695, lv295) + layer_norm198: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add698, model_decoder_layers_12_self_attn_layer_norm_weight3, model_decoder_layers_12_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv296 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_12_self_attn_q_proj_weight3, layer_norm198, model_decoder_layers_12_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape830: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv296, R.shape([batch_size, 1, 20, 64])) + lv77 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_12_self_attn_k_proj_weight3, layer_norm198), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape831: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv77, R.shape([batch_size, 1, 20, 64])) + lv297 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_12_self_attn_v_proj_weight3, layer_norm198, model_decoder_layers_12_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape832: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv297, R.shape([batch_size, 1, 20, 64])) + concat44: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape830, reshape831, reshape832), axis=2) + reshape833: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat44, R.shape([batch_size, 60, 64])) + lv158 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape833), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape834: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv158, R.shape([batch_size, 1, 20, 64])) + reshape835: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape834, R.shape([batch_size, 1, 1280])) + lv298 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_12_self_attn_out_proj_weight3, reshape835, model_decoder_layers_12_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add702: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add698, lv298) + layer_norm199: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add702, model_decoder_layers_12_encoder_attn_layer_norm_weight3, model_decoder_layers_12_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv299 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_12_encoder_attn_q_proj_weight3, layer_norm199, model_decoder_layers_12_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape836: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv299, R.shape([batch_size, 1, 20, 64])) + reshape837: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape836, R.shape([batch_size, 20, 64])) + lv159 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape837), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape838: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv159, R.shape([batch_size, 1, 20, 64])) + reshape839: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape838, R.shape([batch_size, 1, 1280])) + lv300 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_12_encoder_attn_out_proj_weight3, reshape839, model_decoder_layers_12_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add705: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add702, lv300) + layer_norm200: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add705, model_decoder_layers_12_final_layer_norm_weight3, model_decoder_layers_12_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv44 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_12_fc1_weight3, layer_norm200, model_decoder_layers_12_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv301 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_12_fc2_weight3, lv44, model_decoder_layers_12_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add708: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add705, lv301) + layer_norm201: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add708, model_decoder_layers_13_self_attn_layer_norm_weight3, model_decoder_layers_13_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv302 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_13_self_attn_q_proj_weight3, layer_norm201, model_decoder_layers_13_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape840: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv302, R.shape([batch_size, 1, 20, 64])) + lv78 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_13_self_attn_k_proj_weight3, layer_norm201), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape841: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv78, R.shape([batch_size, 1, 20, 64])) + lv303 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_13_self_attn_v_proj_weight3, layer_norm201, model_decoder_layers_13_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape842: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv303, R.shape([batch_size, 1, 20, 64])) + concat45: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape840, reshape841, reshape842), axis=2) + reshape843: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat45, R.shape([batch_size, 60, 64])) + lv160 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape843), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape844: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv160, R.shape([batch_size, 1, 20, 64])) + reshape845: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape844, R.shape([batch_size, 1, 1280])) + lv304 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_13_self_attn_out_proj_weight3, reshape845, model_decoder_layers_13_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add712: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add708, lv304) + layer_norm202: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add712, model_decoder_layers_13_encoder_attn_layer_norm_weight3, model_decoder_layers_13_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv305 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_13_encoder_attn_q_proj_weight3, layer_norm202, model_decoder_layers_13_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape846: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv305, R.shape([batch_size, 1, 20, 64])) + reshape847: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape846, R.shape([batch_size, 20, 64])) + lv161 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape847), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape848: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv161, R.shape([batch_size, 1, 20, 64])) + reshape849: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape848, R.shape([batch_size, 1, 1280])) + lv306 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_13_encoder_attn_out_proj_weight3, reshape849, model_decoder_layers_13_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add715: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add712, lv306) + layer_norm203: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add715, model_decoder_layers_13_final_layer_norm_weight3, model_decoder_layers_13_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv45 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_13_fc1_weight3, layer_norm203, model_decoder_layers_13_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv307 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_13_fc2_weight3, lv45, model_decoder_layers_13_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add718: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add715, lv307) + layer_norm204: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add718, model_decoder_layers_14_self_attn_layer_norm_weight3, model_decoder_layers_14_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv308 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_14_self_attn_q_proj_weight3, layer_norm204, model_decoder_layers_14_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape850: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv308, R.shape([batch_size, 1, 20, 64])) + lv79 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_14_self_attn_k_proj_weight3, layer_norm204), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape851: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv79, R.shape([batch_size, 1, 20, 64])) + lv309 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_14_self_attn_v_proj_weight3, layer_norm204, model_decoder_layers_14_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape852: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv309, R.shape([batch_size, 1, 20, 64])) + concat46: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape850, reshape851, reshape852), axis=2) + reshape853: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat46, R.shape([batch_size, 60, 64])) + lv162 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape853), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape854: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv162, R.shape([batch_size, 1, 20, 64])) + reshape855: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape854, R.shape([batch_size, 1, 1280])) + lv310 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_14_self_attn_out_proj_weight3, reshape855, model_decoder_layers_14_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add722: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add718, lv310) + layer_norm205: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add722, model_decoder_layers_14_encoder_attn_layer_norm_weight3, model_decoder_layers_14_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv311 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_14_encoder_attn_q_proj_weight3, layer_norm205, model_decoder_layers_14_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape856: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv311, R.shape([batch_size, 1, 20, 64])) + reshape857: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape856, R.shape([batch_size, 20, 64])) + lv163 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape857), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape858: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv163, R.shape([batch_size, 1, 20, 64])) + reshape859: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape858, R.shape([batch_size, 1, 1280])) + lv312 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_14_encoder_attn_out_proj_weight3, reshape859, model_decoder_layers_14_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add725: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add722, lv312) + layer_norm206: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add725, model_decoder_layers_14_final_layer_norm_weight3, model_decoder_layers_14_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv46 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_14_fc1_weight3, layer_norm206, model_decoder_layers_14_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv313 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_14_fc2_weight3, lv46, model_decoder_layers_14_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add728: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add725, lv313) + layer_norm207: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add728, model_decoder_layers_15_self_attn_layer_norm_weight3, model_decoder_layers_15_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv314 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_15_self_attn_q_proj_weight3, layer_norm207, model_decoder_layers_15_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape860: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv314, R.shape([batch_size, 1, 20, 64])) + lv80 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_15_self_attn_k_proj_weight3, layer_norm207), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape861: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv80, R.shape([batch_size, 1, 20, 64])) + lv315 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_15_self_attn_v_proj_weight3, layer_norm207, model_decoder_layers_15_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape862: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv315, R.shape([batch_size, 1, 20, 64])) + concat47: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape860, reshape861, reshape862), axis=2) + reshape863: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat47, R.shape([batch_size, 60, 64])) + lv164 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape863), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape864: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv164, R.shape([batch_size, 1, 20, 64])) + reshape865: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape864, R.shape([batch_size, 1, 1280])) + lv316 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_15_self_attn_out_proj_weight3, reshape865, model_decoder_layers_15_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add732: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add728, lv316) + layer_norm208: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add732, model_decoder_layers_15_encoder_attn_layer_norm_weight3, model_decoder_layers_15_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv317 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_15_encoder_attn_q_proj_weight3, layer_norm208, model_decoder_layers_15_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape866: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv317, R.shape([batch_size, 1, 20, 64])) + reshape867: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape866, R.shape([batch_size, 20, 64])) + lv165 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape867), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape868: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv165, R.shape([batch_size, 1, 20, 64])) + reshape869: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape868, R.shape([batch_size, 1, 1280])) + lv318 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_15_encoder_attn_out_proj_weight3, reshape869, model_decoder_layers_15_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add735: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add732, lv318) + layer_norm209: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add735, model_decoder_layers_15_final_layer_norm_weight3, model_decoder_layers_15_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv47 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_15_fc1_weight3, layer_norm209, model_decoder_layers_15_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv319 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_15_fc2_weight3, lv47, model_decoder_layers_15_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add738: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add735, lv319) + layer_norm210: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add738, model_decoder_layers_16_self_attn_layer_norm_weight3, model_decoder_layers_16_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv320 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_16_self_attn_q_proj_weight3, layer_norm210, model_decoder_layers_16_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape870: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv320, R.shape([batch_size, 1, 20, 64])) + lv81 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_16_self_attn_k_proj_weight3, layer_norm210), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape871: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv81, R.shape([batch_size, 1, 20, 64])) + lv321 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_16_self_attn_v_proj_weight3, layer_norm210, model_decoder_layers_16_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape872: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv321, R.shape([batch_size, 1, 20, 64])) + concat48: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape870, reshape871, reshape872), axis=2) + reshape873: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat48, R.shape([batch_size, 60, 64])) + lv166 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape873), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape874: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv166, R.shape([batch_size, 1, 20, 64])) + reshape875: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape874, R.shape([batch_size, 1, 1280])) + lv322 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_16_self_attn_out_proj_weight3, reshape875, model_decoder_layers_16_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add742: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add738, lv322) + layer_norm211: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add742, model_decoder_layers_16_encoder_attn_layer_norm_weight3, model_decoder_layers_16_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv323 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_16_encoder_attn_q_proj_weight3, layer_norm211, model_decoder_layers_16_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape876: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv323, R.shape([batch_size, 1, 20, 64])) + reshape877: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape876, R.shape([batch_size, 20, 64])) + lv167 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape877), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape878: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv167, R.shape([batch_size, 1, 20, 64])) + reshape879: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape878, R.shape([batch_size, 1, 1280])) + lv324 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_16_encoder_attn_out_proj_weight3, reshape879, model_decoder_layers_16_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add745: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add742, lv324) + layer_norm212: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add745, model_decoder_layers_16_final_layer_norm_weight3, model_decoder_layers_16_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv48 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_16_fc1_weight3, layer_norm212, model_decoder_layers_16_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv325 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_16_fc2_weight3, lv48, model_decoder_layers_16_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add748: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add745, lv325) + layer_norm213: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add748, model_decoder_layers_17_self_attn_layer_norm_weight3, model_decoder_layers_17_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv326 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_17_self_attn_q_proj_weight3, layer_norm213, model_decoder_layers_17_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape880: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv326, R.shape([batch_size, 1, 20, 64])) + lv82 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_17_self_attn_k_proj_weight3, layer_norm213), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape881: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv82, R.shape([batch_size, 1, 20, 64])) + lv327 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_17_self_attn_v_proj_weight3, layer_norm213, model_decoder_layers_17_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape882: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv327, R.shape([batch_size, 1, 20, 64])) + concat49: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape880, reshape881, reshape882), axis=2) + reshape883: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat49, R.shape([batch_size, 60, 64])) + lv168 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape883), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape884: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv168, R.shape([batch_size, 1, 20, 64])) + reshape885: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape884, R.shape([batch_size, 1, 1280])) + lv328 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_17_self_attn_out_proj_weight3, reshape885, model_decoder_layers_17_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add752: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add748, lv328) + layer_norm214: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add752, model_decoder_layers_17_encoder_attn_layer_norm_weight3, model_decoder_layers_17_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv329 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_17_encoder_attn_q_proj_weight3, layer_norm214, model_decoder_layers_17_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape886: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv329, R.shape([batch_size, 1, 20, 64])) + reshape887: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape886, R.shape([batch_size, 20, 64])) + lv169 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape887), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape888: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv169, R.shape([batch_size, 1, 20, 64])) + reshape889: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape888, R.shape([batch_size, 1, 1280])) + lv330 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_17_encoder_attn_out_proj_weight3, reshape889, model_decoder_layers_17_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add755: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add752, lv330) + layer_norm215: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add755, model_decoder_layers_17_final_layer_norm_weight3, model_decoder_layers_17_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv49 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_17_fc1_weight3, layer_norm215, model_decoder_layers_17_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv331 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_17_fc2_weight3, lv49, model_decoder_layers_17_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add758: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add755, lv331) + layer_norm216: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add758, model_decoder_layers_18_self_attn_layer_norm_weight3, model_decoder_layers_18_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv332 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_18_self_attn_q_proj_weight3, layer_norm216, model_decoder_layers_18_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape890: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv332, R.shape([batch_size, 1, 20, 64])) + lv83 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_18_self_attn_k_proj_weight3, layer_norm216), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape891: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv83, R.shape([batch_size, 1, 20, 64])) + lv333 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_18_self_attn_v_proj_weight3, layer_norm216, model_decoder_layers_18_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape892: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv333, R.shape([batch_size, 1, 20, 64])) + concat50: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape890, reshape891, reshape892), axis=2) + reshape893: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat50, R.shape([batch_size, 60, 64])) + lv170 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape893), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape894: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv170, R.shape([batch_size, 1, 20, 64])) + reshape895: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape894, R.shape([batch_size, 1, 1280])) + lv334 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_18_self_attn_out_proj_weight3, reshape895, model_decoder_layers_18_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add762: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add758, lv334) + layer_norm217: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add762, model_decoder_layers_18_encoder_attn_layer_norm_weight3, model_decoder_layers_18_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv335 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_18_encoder_attn_q_proj_weight3, layer_norm217, model_decoder_layers_18_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape896: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv335, R.shape([batch_size, 1, 20, 64])) + reshape897: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape896, R.shape([batch_size, 20, 64])) + lv171 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape897), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape898: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv171, R.shape([batch_size, 1, 20, 64])) + reshape899: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape898, R.shape([batch_size, 1, 1280])) + lv336 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_18_encoder_attn_out_proj_weight3, reshape899, model_decoder_layers_18_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add765: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add762, lv336) + layer_norm218: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add765, model_decoder_layers_18_final_layer_norm_weight3, model_decoder_layers_18_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv50 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_18_fc1_weight3, layer_norm218, model_decoder_layers_18_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv337 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_18_fc2_weight3, lv50, model_decoder_layers_18_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add768: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add765, lv337) + layer_norm219: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add768, model_decoder_layers_19_self_attn_layer_norm_weight3, model_decoder_layers_19_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv338 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_19_self_attn_q_proj_weight3, layer_norm219, model_decoder_layers_19_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape900: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv338, R.shape([batch_size, 1, 20, 64])) + lv84 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_19_self_attn_k_proj_weight3, layer_norm219), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape901: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv84, R.shape([batch_size, 1, 20, 64])) + lv339 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_19_self_attn_v_proj_weight3, layer_norm219, model_decoder_layers_19_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape902: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv339, R.shape([batch_size, 1, 20, 64])) + concat51: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape900, reshape901, reshape902), axis=2) + reshape903: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat51, R.shape([batch_size, 60, 64])) + lv172 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape903), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape904: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv172, R.shape([batch_size, 1, 20, 64])) + reshape905: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape904, R.shape([batch_size, 1, 1280])) + lv340 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_19_self_attn_out_proj_weight3, reshape905, model_decoder_layers_19_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add772: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add768, lv340) + layer_norm220: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add772, model_decoder_layers_19_encoder_attn_layer_norm_weight3, model_decoder_layers_19_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv341 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_19_encoder_attn_q_proj_weight3, layer_norm220, model_decoder_layers_19_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape906: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv341, R.shape([batch_size, 1, 20, 64])) + reshape907: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape906, R.shape([batch_size, 20, 64])) + lv173 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape907), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape908: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv173, R.shape([batch_size, 1, 20, 64])) + reshape909: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape908, R.shape([batch_size, 1, 1280])) + lv342 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_19_encoder_attn_out_proj_weight3, reshape909, model_decoder_layers_19_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add775: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add772, lv342) + layer_norm221: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add775, model_decoder_layers_19_final_layer_norm_weight3, model_decoder_layers_19_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv51 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_19_fc1_weight3, layer_norm221, model_decoder_layers_19_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv343 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_19_fc2_weight3, lv51, model_decoder_layers_19_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add778: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add775, lv343) + layer_norm222: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add778, model_decoder_layers_20_self_attn_layer_norm_weight3, model_decoder_layers_20_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv344 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_20_self_attn_q_proj_weight3, layer_norm222, model_decoder_layers_20_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape910: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv344, R.shape([batch_size, 1, 20, 64])) + lv85 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_20_self_attn_k_proj_weight3, layer_norm222), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape911: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv85, R.shape([batch_size, 1, 20, 64])) + lv345 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_20_self_attn_v_proj_weight3, layer_norm222, model_decoder_layers_20_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape912: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv345, R.shape([batch_size, 1, 20, 64])) + concat52: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape910, reshape911, reshape912), axis=2) + reshape913: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat52, R.shape([batch_size, 60, 64])) + lv174 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape913), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape914: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv174, R.shape([batch_size, 1, 20, 64])) + reshape915: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape914, R.shape([batch_size, 1, 1280])) + lv346 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_20_self_attn_out_proj_weight3, reshape915, model_decoder_layers_20_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add782: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add778, lv346) + layer_norm223: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add782, model_decoder_layers_20_encoder_attn_layer_norm_weight3, model_decoder_layers_20_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv347 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_20_encoder_attn_q_proj_weight3, layer_norm223, model_decoder_layers_20_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape916: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv347, R.shape([batch_size, 1, 20, 64])) + reshape917: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape916, R.shape([batch_size, 20, 64])) + lv175 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape917), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape918: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv175, R.shape([batch_size, 1, 20, 64])) + reshape919: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape918, R.shape([batch_size, 1, 1280])) + lv348 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_20_encoder_attn_out_proj_weight3, reshape919, model_decoder_layers_20_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add785: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add782, lv348) + layer_norm224: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add785, model_decoder_layers_20_final_layer_norm_weight3, model_decoder_layers_20_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv52 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_20_fc1_weight3, layer_norm224, model_decoder_layers_20_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv349 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_20_fc2_weight3, lv52, model_decoder_layers_20_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add788: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add785, lv349) + layer_norm225: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add788, model_decoder_layers_21_self_attn_layer_norm_weight3, model_decoder_layers_21_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv350 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_21_self_attn_q_proj_weight3, layer_norm225, model_decoder_layers_21_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape920: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv350, R.shape([batch_size, 1, 20, 64])) + lv86 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_21_self_attn_k_proj_weight3, layer_norm225), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape921: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv86, R.shape([batch_size, 1, 20, 64])) + lv351 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_21_self_attn_v_proj_weight3, layer_norm225, model_decoder_layers_21_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape922: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv351, R.shape([batch_size, 1, 20, 64])) + concat53: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape920, reshape921, reshape922), axis=2) + reshape923: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat53, R.shape([batch_size, 60, 64])) + lv176 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape923), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape924: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv176, R.shape([batch_size, 1, 20, 64])) + reshape925: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape924, R.shape([batch_size, 1, 1280])) + lv352 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_21_self_attn_out_proj_weight3, reshape925, model_decoder_layers_21_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add792: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add788, lv352) + layer_norm226: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add792, model_decoder_layers_21_encoder_attn_layer_norm_weight3, model_decoder_layers_21_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv353 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_21_encoder_attn_q_proj_weight3, layer_norm226, model_decoder_layers_21_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape926: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv353, R.shape([batch_size, 1, 20, 64])) + reshape927: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape926, R.shape([batch_size, 20, 64])) + lv177 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape927), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape928: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv177, R.shape([batch_size, 1, 20, 64])) + reshape929: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape928, R.shape([batch_size, 1, 1280])) + lv354 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_21_encoder_attn_out_proj_weight3, reshape929, model_decoder_layers_21_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add795: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add792, lv354) + layer_norm227: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add795, model_decoder_layers_21_final_layer_norm_weight3, model_decoder_layers_21_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv53 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_21_fc1_weight3, layer_norm227, model_decoder_layers_21_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv355 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_21_fc2_weight3, lv53, model_decoder_layers_21_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add798: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add795, lv355) + layer_norm228: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add798, model_decoder_layers_22_self_attn_layer_norm_weight3, model_decoder_layers_22_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv356 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_22_self_attn_q_proj_weight3, layer_norm228, model_decoder_layers_22_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape930: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv356, R.shape([batch_size, 1, 20, 64])) + lv87 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_22_self_attn_k_proj_weight3, layer_norm228), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape931: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv87, R.shape([batch_size, 1, 20, 64])) + lv357 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_22_self_attn_v_proj_weight3, layer_norm228, model_decoder_layers_22_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape932: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv357, R.shape([batch_size, 1, 20, 64])) + concat54: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape930, reshape931, reshape932), axis=2) + reshape933: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat54, R.shape([batch_size, 60, 64])) + lv178 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape933), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape934: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv178, R.shape([batch_size, 1, 20, 64])) + reshape935: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape934, R.shape([batch_size, 1, 1280])) + lv358 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_22_self_attn_out_proj_weight3, reshape935, model_decoder_layers_22_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add802: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add798, lv358) + layer_norm229: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add802, model_decoder_layers_22_encoder_attn_layer_norm_weight3, model_decoder_layers_22_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv359 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_22_encoder_attn_q_proj_weight3, layer_norm229, model_decoder_layers_22_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape936: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv359, R.shape([batch_size, 1, 20, 64])) + reshape937: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape936, R.shape([batch_size, 20, 64])) + lv179 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape937), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape938: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv179, R.shape([batch_size, 1, 20, 64])) + reshape939: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape938, R.shape([batch_size, 1, 1280])) + lv360 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_22_encoder_attn_out_proj_weight3, reshape939, model_decoder_layers_22_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add805: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add802, lv360) + layer_norm230: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add805, model_decoder_layers_22_final_layer_norm_weight3, model_decoder_layers_22_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv54 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_22_fc1_weight3, layer_norm230, model_decoder_layers_22_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv361 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_22_fc2_weight3, lv54, model_decoder_layers_22_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add808: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add805, lv361) + layer_norm231: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add808, model_decoder_layers_23_self_attn_layer_norm_weight3, model_decoder_layers_23_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv362 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_23_self_attn_q_proj_weight3, layer_norm231, model_decoder_layers_23_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape940: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv362, R.shape([batch_size, 1, 20, 64])) + lv88 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_23_self_attn_k_proj_weight3, layer_norm231), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape941: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv88, R.shape([batch_size, 1, 20, 64])) + lv363 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_23_self_attn_v_proj_weight3, layer_norm231, model_decoder_layers_23_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape942: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv363, R.shape([batch_size, 1, 20, 64])) + concat55: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape940, reshape941, reshape942), axis=2) + reshape943: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat55, R.shape([batch_size, 60, 64])) + lv180 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape943), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape944: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv180, R.shape([batch_size, 1, 20, 64])) + reshape945: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape944, R.shape([batch_size, 1, 1280])) + lv364 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_23_self_attn_out_proj_weight3, reshape945, model_decoder_layers_23_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add812: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add808, lv364) + layer_norm232: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add812, model_decoder_layers_23_encoder_attn_layer_norm_weight3, model_decoder_layers_23_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv365 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_23_encoder_attn_q_proj_weight3, layer_norm232, model_decoder_layers_23_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape946: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv365, R.shape([batch_size, 1, 20, 64])) + reshape947: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape946, R.shape([batch_size, 20, 64])) + lv181 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape947), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape948: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv181, R.shape([batch_size, 1, 20, 64])) + reshape949: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape948, R.shape([batch_size, 1, 1280])) + lv366 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_23_encoder_attn_out_proj_weight3, reshape949, model_decoder_layers_23_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add815: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add812, lv366) + layer_norm233: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add815, model_decoder_layers_23_final_layer_norm_weight3, model_decoder_layers_23_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv55 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_23_fc1_weight3, layer_norm233, model_decoder_layers_23_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv367 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_23_fc2_weight3, lv55, model_decoder_layers_23_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add818: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add815, lv367) + layer_norm234: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add818, model_decoder_layers_24_self_attn_layer_norm_weight3, model_decoder_layers_24_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv368 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_24_self_attn_q_proj_weight3, layer_norm234, model_decoder_layers_24_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape950: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv368, R.shape([batch_size, 1, 20, 64])) + lv89 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_24_self_attn_k_proj_weight3, layer_norm234), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape951: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv89, R.shape([batch_size, 1, 20, 64])) + lv369 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_24_self_attn_v_proj_weight3, layer_norm234, model_decoder_layers_24_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape952: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv369, R.shape([batch_size, 1, 20, 64])) + concat56: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape950, reshape951, reshape952), axis=2) + reshape953: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat56, R.shape([batch_size, 60, 64])) + lv182 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape953), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape954: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv182, R.shape([batch_size, 1, 20, 64])) + reshape955: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape954, R.shape([batch_size, 1, 1280])) + lv370 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_24_self_attn_out_proj_weight3, reshape955, model_decoder_layers_24_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add822: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add818, lv370) + layer_norm235: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add822, model_decoder_layers_24_encoder_attn_layer_norm_weight3, model_decoder_layers_24_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv371 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_24_encoder_attn_q_proj_weight3, layer_norm235, model_decoder_layers_24_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape956: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv371, R.shape([batch_size, 1, 20, 64])) + reshape957: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape956, R.shape([batch_size, 20, 64])) + lv183 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape957), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape958: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv183, R.shape([batch_size, 1, 20, 64])) + reshape959: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape958, R.shape([batch_size, 1, 1280])) + lv372 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_24_encoder_attn_out_proj_weight3, reshape959, model_decoder_layers_24_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add825: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add822, lv372) + layer_norm236: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add825, model_decoder_layers_24_final_layer_norm_weight3, model_decoder_layers_24_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv56 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_24_fc1_weight3, layer_norm236, model_decoder_layers_24_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv373 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_24_fc2_weight3, lv56, model_decoder_layers_24_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add828: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add825, lv373) + layer_norm237: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add828, model_decoder_layers_25_self_attn_layer_norm_weight3, model_decoder_layers_25_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv374 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_25_self_attn_q_proj_weight3, layer_norm237, model_decoder_layers_25_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape960: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv374, R.shape([batch_size, 1, 20, 64])) + lv90 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_25_self_attn_k_proj_weight3, layer_norm237), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape961: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv90, R.shape([batch_size, 1, 20, 64])) + lv375 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_25_self_attn_v_proj_weight3, layer_norm237, model_decoder_layers_25_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape962: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv375, R.shape([batch_size, 1, 20, 64])) + concat57: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape960, reshape961, reshape962), axis=2) + reshape963: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat57, R.shape([batch_size, 60, 64])) + lv184 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape963), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape964: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv184, R.shape([batch_size, 1, 20, 64])) + reshape965: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape964, R.shape([batch_size, 1, 1280])) + lv376 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_25_self_attn_out_proj_weight3, reshape965, model_decoder_layers_25_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add832: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add828, lv376) + layer_norm238: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add832, model_decoder_layers_25_encoder_attn_layer_norm_weight3, model_decoder_layers_25_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv377 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_25_encoder_attn_q_proj_weight3, layer_norm238, model_decoder_layers_25_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape966: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv377, R.shape([batch_size, 1, 20, 64])) + reshape967: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape966, R.shape([batch_size, 20, 64])) + lv185 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape967), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape968: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv185, R.shape([batch_size, 1, 20, 64])) + reshape969: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape968, R.shape([batch_size, 1, 1280])) + lv378 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_25_encoder_attn_out_proj_weight3, reshape969, model_decoder_layers_25_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add835: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add832, lv378) + layer_norm239: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add835, model_decoder_layers_25_final_layer_norm_weight3, model_decoder_layers_25_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv57 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_25_fc1_weight3, layer_norm239, model_decoder_layers_25_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv379 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_25_fc2_weight3, lv57, model_decoder_layers_25_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add838: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add835, lv379) + layer_norm240: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add838, model_decoder_layers_26_self_attn_layer_norm_weight3, model_decoder_layers_26_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv380 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_26_self_attn_q_proj_weight3, layer_norm240, model_decoder_layers_26_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape970: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv380, R.shape([batch_size, 1, 20, 64])) + lv91 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_26_self_attn_k_proj_weight3, layer_norm240), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape971: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv91, R.shape([batch_size, 1, 20, 64])) + lv381 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_26_self_attn_v_proj_weight3, layer_norm240, model_decoder_layers_26_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape972: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv381, R.shape([batch_size, 1, 20, 64])) + concat58: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape970, reshape971, reshape972), axis=2) + reshape973: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat58, R.shape([batch_size, 60, 64])) + lv186 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape973), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape974: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv186, R.shape([batch_size, 1, 20, 64])) + reshape975: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape974, R.shape([batch_size, 1, 1280])) + lv382 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_26_self_attn_out_proj_weight3, reshape975, model_decoder_layers_26_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add842: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add838, lv382) + layer_norm241: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add842, model_decoder_layers_26_encoder_attn_layer_norm_weight3, model_decoder_layers_26_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv383 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_26_encoder_attn_q_proj_weight3, layer_norm241, model_decoder_layers_26_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape976: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv383, R.shape([batch_size, 1, 20, 64])) + reshape977: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape976, R.shape([batch_size, 20, 64])) + lv187 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape977), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape978: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv187, R.shape([batch_size, 1, 20, 64])) + reshape979: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape978, R.shape([batch_size, 1, 1280])) + lv384 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_26_encoder_attn_out_proj_weight3, reshape979, model_decoder_layers_26_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add845: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add842, lv384) + layer_norm242: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add845, model_decoder_layers_26_final_layer_norm_weight3, model_decoder_layers_26_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv58 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_26_fc1_weight3, layer_norm242, model_decoder_layers_26_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv385 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_26_fc2_weight3, lv58, model_decoder_layers_26_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add848: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add845, lv385) + layer_norm243: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add848, model_decoder_layers_27_self_attn_layer_norm_weight3, model_decoder_layers_27_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv386 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_27_self_attn_q_proj_weight3, layer_norm243, model_decoder_layers_27_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape980: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv386, R.shape([batch_size, 1, 20, 64])) + lv92 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_27_self_attn_k_proj_weight3, layer_norm243), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape981: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv92, R.shape([batch_size, 1, 20, 64])) + lv387 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_27_self_attn_v_proj_weight3, layer_norm243, model_decoder_layers_27_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape982: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv387, R.shape([batch_size, 1, 20, 64])) + concat59: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape980, reshape981, reshape982), axis=2) + reshape983: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat59, R.shape([batch_size, 60, 64])) + lv188 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape983), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape984: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv188, R.shape([batch_size, 1, 20, 64])) + reshape985: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape984, R.shape([batch_size, 1, 1280])) + lv388 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_27_self_attn_out_proj_weight3, reshape985, model_decoder_layers_27_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add852: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add848, lv388) + layer_norm244: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add852, model_decoder_layers_27_encoder_attn_layer_norm_weight3, model_decoder_layers_27_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv389 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_27_encoder_attn_q_proj_weight3, layer_norm244, model_decoder_layers_27_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape986: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv389, R.shape([batch_size, 1, 20, 64])) + reshape987: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape986, R.shape([batch_size, 20, 64])) + lv189 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape987), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape988: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv189, R.shape([batch_size, 1, 20, 64])) + reshape989: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape988, R.shape([batch_size, 1, 1280])) + lv390 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_27_encoder_attn_out_proj_weight3, reshape989, model_decoder_layers_27_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add855: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add852, lv390) + layer_norm245: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add855, model_decoder_layers_27_final_layer_norm_weight3, model_decoder_layers_27_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv59 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_27_fc1_weight3, layer_norm245, model_decoder_layers_27_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv391 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_27_fc2_weight3, lv59, model_decoder_layers_27_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add858: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add855, lv391) + layer_norm246: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add858, model_decoder_layers_28_self_attn_layer_norm_weight3, model_decoder_layers_28_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv392 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_28_self_attn_q_proj_weight3, layer_norm246, model_decoder_layers_28_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape990: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv392, R.shape([batch_size, 1, 20, 64])) + lv93 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_28_self_attn_k_proj_weight3, layer_norm246), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape991: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv93, R.shape([batch_size, 1, 20, 64])) + lv393 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_28_self_attn_v_proj_weight3, layer_norm246, model_decoder_layers_28_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape992: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv393, R.shape([batch_size, 1, 20, 64])) + concat60: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape990, reshape991, reshape992), axis=2) + reshape993: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat60, R.shape([batch_size, 60, 64])) + lv190 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape993), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape994: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv190, R.shape([batch_size, 1, 20, 64])) + reshape995: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape994, R.shape([batch_size, 1, 1280])) + lv394 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_28_self_attn_out_proj_weight3, reshape995, model_decoder_layers_28_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add862: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add858, lv394) + layer_norm247: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add862, model_decoder_layers_28_encoder_attn_layer_norm_weight3, model_decoder_layers_28_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv395 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_28_encoder_attn_q_proj_weight3, layer_norm247, model_decoder_layers_28_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape996: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv395, R.shape([batch_size, 1, 20, 64])) + reshape997: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape996, R.shape([batch_size, 20, 64])) + lv191 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape997), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape998: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv191, R.shape([batch_size, 1, 20, 64])) + reshape999: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape998, R.shape([batch_size, 1, 1280])) + lv396 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_28_encoder_attn_out_proj_weight3, reshape999, model_decoder_layers_28_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add865: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add862, lv396) + layer_norm248: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add865, model_decoder_layers_28_final_layer_norm_weight3, model_decoder_layers_28_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv60 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_28_fc1_weight3, layer_norm248, model_decoder_layers_28_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv397 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_28_fc2_weight3, lv60, model_decoder_layers_28_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add868: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add865, lv397) + layer_norm249: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add868, model_decoder_layers_29_self_attn_layer_norm_weight3, model_decoder_layers_29_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv398 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_29_self_attn_q_proj_weight3, layer_norm249, model_decoder_layers_29_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape1000: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv398, R.shape([batch_size, 1, 20, 64])) + lv94 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_29_self_attn_k_proj_weight3, layer_norm249), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape1001: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv94, R.shape([batch_size, 1, 20, 64])) + lv399 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_29_self_attn_v_proj_weight3, layer_norm249, model_decoder_layers_29_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape1002: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv399, R.shape([batch_size, 1, 20, 64])) + concat61: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape1000, reshape1001, reshape1002), axis=2) + reshape1003: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat61, R.shape([batch_size, 60, 64])) + lv192 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape1003), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape1004: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv192, R.shape([batch_size, 1, 20, 64])) + reshape1005: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape1004, R.shape([batch_size, 1, 1280])) + lv400 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_29_self_attn_out_proj_weight3, reshape1005, model_decoder_layers_29_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add872: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add868, lv400) + layer_norm250: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add872, model_decoder_layers_29_encoder_attn_layer_norm_weight3, model_decoder_layers_29_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv401 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_29_encoder_attn_q_proj_weight3, layer_norm250, model_decoder_layers_29_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape1006: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv401, R.shape([batch_size, 1, 20, 64])) + reshape1007: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape1006, R.shape([batch_size, 20, 64])) + lv193 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape1007), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape1008: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv193, R.shape([batch_size, 1, 20, 64])) + reshape1009: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape1008, R.shape([batch_size, 1, 1280])) + lv402 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_29_encoder_attn_out_proj_weight3, reshape1009, model_decoder_layers_29_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add875: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add872, lv402) + layer_norm251: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add875, model_decoder_layers_29_final_layer_norm_weight3, model_decoder_layers_29_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv61 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_29_fc1_weight3, layer_norm251, model_decoder_layers_29_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv403 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_29_fc2_weight3, lv61, model_decoder_layers_29_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add878: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add875, lv403) + layer_norm252: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add878, model_decoder_layers_30_self_attn_layer_norm_weight3, model_decoder_layers_30_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv404 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_30_self_attn_q_proj_weight3, layer_norm252, model_decoder_layers_30_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape1010: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv404, R.shape([batch_size, 1, 20, 64])) + lv95 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_30_self_attn_k_proj_weight3, layer_norm252), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape1011: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv95, R.shape([batch_size, 1, 20, 64])) + lv405 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_30_self_attn_v_proj_weight3, layer_norm252, model_decoder_layers_30_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape1012: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv405, R.shape([batch_size, 1, 20, 64])) + concat62: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape1010, reshape1011, reshape1012), axis=2) + reshape1013: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat62, R.shape([batch_size, 60, 64])) + lv194 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape1013), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape1014: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv194, R.shape([batch_size, 1, 20, 64])) + reshape1015: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape1014, R.shape([batch_size, 1, 1280])) + lv406 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_30_self_attn_out_proj_weight3, reshape1015, model_decoder_layers_30_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add882: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add878, lv406) + layer_norm253: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add882, model_decoder_layers_30_encoder_attn_layer_norm_weight3, model_decoder_layers_30_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv407 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_30_encoder_attn_q_proj_weight3, layer_norm253, model_decoder_layers_30_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape1016: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv407, R.shape([batch_size, 1, 20, 64])) + reshape1017: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape1016, R.shape([batch_size, 20, 64])) + lv195 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape1017), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape1018: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv195, R.shape([batch_size, 1, 20, 64])) + reshape1019: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape1018, R.shape([batch_size, 1, 1280])) + lv408 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_30_encoder_attn_out_proj_weight3, reshape1019, model_decoder_layers_30_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add885: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add882, lv408) + layer_norm254: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add885, model_decoder_layers_30_final_layer_norm_weight3, model_decoder_layers_30_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv62 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_30_fc1_weight3, layer_norm254, model_decoder_layers_30_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv409 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_30_fc2_weight3, lv62, model_decoder_layers_30_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add888: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add885, lv409) + layer_norm255: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add888, model_decoder_layers_31_self_attn_layer_norm_weight3, model_decoder_layers_31_self_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv410 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_31_self_attn_q_proj_weight3, layer_norm255, model_decoder_layers_31_self_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape1020: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv410, R.shape([batch_size, 1, 20, 64])) + lv96 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul3_cublas", (model_decoder_layers_31_self_attn_k_proj_weight3, layer_norm255), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape1021: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv96, R.shape([batch_size, 1, 20, 64])) + lv411 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_31_self_attn_v_proj_weight3, layer_norm255, model_decoder_layers_31_self_attn_v_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape1022: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv411, R.shape([batch_size, 1, 20, 64])) + concat63: R.Tensor((batch_size, 1, 60, 64), dtype="float16") = R.concat((reshape1020, reshape1021, reshape1022), axis=2) + reshape1023: R.Tensor((batch_size, 60, 64), dtype="float16") = R.reshape(concat63, R.shape([batch_size, 60, 64])) + lv196 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape1023), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape1024: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv196, R.shape([batch_size, 1, 20, 64])) + reshape1025: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape1024, R.shape([batch_size, 1, 1280])) + lv412 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_31_self_attn_out_proj_weight3, reshape1025, model_decoder_layers_31_self_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add892: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add888, lv412) + layer_norm256: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add892, model_decoder_layers_31_encoder_attn_layer_norm_weight3, model_decoder_layers_31_encoder_attn_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv413 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_31_encoder_attn_q_proj_weight3, layer_norm256, model_decoder_layers_31_encoder_attn_q_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + reshape1026: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv413, R.shape([batch_size, 1, 20, 64])) + reshape1027: R.Tensor((batch_size, 20, 64), dtype="float16") = R.reshape(reshape1026, R.shape([batch_size, 20, 64])) + lv197 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape1027), out_sinfo=R.Tensor((batch_size, 20, 64), dtype="float16")) + reshape1028: R.Tensor((batch_size, 1, 20, 64), dtype="float16") = R.reshape(lv197, R.shape([batch_size, 1, 20, 64])) + reshape1029: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.reshape(reshape1028, R.shape([batch_size, 1, 1280])) + lv414 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add3_cublas", (model_decoder_layers_31_encoder_attn_out_proj_weight3, reshape1029, model_decoder_layers_31_encoder_attn_out_proj_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add895: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add892, lv414) + layer_norm257: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add895, model_decoder_layers_31_final_layer_norm_weight3, model_decoder_layers_31_final_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv63 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu1_cublas", (model_decoder_layers_31_fc1_weight3, layer_norm257, model_decoder_layers_31_fc1_bias3), out_sinfo=R.Tensor((batch_size, 1, 5120), dtype="float16")) + lv415 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add4_cublas", (model_decoder_layers_31_fc2_weight3, lv63, model_decoder_layers_31_fc2_bias3), out_sinfo=R.Tensor((batch_size, 1, 1280), dtype="float16")) + add898: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.add(add895, lv415) + layer_norm258: R.Tensor((batch_size, 1, 1280), dtype="float16") = R.nn.layer_norm(add898, model_decoder_layer_norm_weight3, model_decoder_layer_norm_bias3, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv97 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul4_cublas", (model_decoder_embed_tokens_weight3, layer_norm258), out_sinfo=R.Tensor((batch_size, 1, 51866), dtype="float32")) + gv3: R.Tensor((batch_size, 1, 51866), dtype="float32") = lv97 + R.output(gv3) + return gv3 + + @R.function + def batch_encode(input_features: R.Tensor(("batch_size", 128, 3000), dtype="float16"), paged_kv_cache: R.Object, packed_params: R.Tuple(R.Tensor((1280, 128, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1500, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((51866, 1280), dtype="float16"), R.Tensor((448, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"))) -> R.Tensor(("batch_size", 1500, 1280), dtype="float16"): + batch_size = T.int64() + R.func_attr({"num_input": 2, "relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) + with R.dataflow(): + model_encoder_conv1_weight: R.Tensor((1280, 128, 3), dtype="float16") = packed_params[0] + model_encoder_conv1_bias: R.Tensor((1280,), dtype="float16") = packed_params[1] + model_encoder_conv2_weight: R.Tensor((1280, 1280, 3), dtype="float16") = packed_params[2] + model_encoder_conv2_bias: R.Tensor((1280,), dtype="float16") = packed_params[3] + model_encoder_embed_positions_weight: R.Tensor((1500, 1280), dtype="float16") = packed_params[4] + model_encoder_layers_0_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[5] + model_encoder_layers_0_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[6] + model_encoder_layers_0_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[7] + model_encoder_layers_0_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[8] + model_encoder_layers_0_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[9] + model_encoder_layers_0_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[10] + model_encoder_layers_0_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[11] + model_encoder_layers_0_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[12] + model_encoder_layers_0_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[13] + model_encoder_layers_0_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[14] + model_encoder_layers_0_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[15] + model_encoder_layers_0_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[16] + model_encoder_layers_0_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[17] + model_encoder_layers_0_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[18] + model_encoder_layers_0_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[19] + model_encoder_layers_1_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[20] + model_encoder_layers_1_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[21] + model_encoder_layers_1_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[22] + model_encoder_layers_1_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[23] + model_encoder_layers_1_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[24] + model_encoder_layers_1_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[25] + model_encoder_layers_1_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[26] + model_encoder_layers_1_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[27] + model_encoder_layers_1_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[28] + model_encoder_layers_1_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[29] + model_encoder_layers_1_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[30] + model_encoder_layers_1_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[31] + model_encoder_layers_1_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[32] + model_encoder_layers_1_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[33] + model_encoder_layers_1_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[34] + model_encoder_layers_2_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[35] + model_encoder_layers_2_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[36] + model_encoder_layers_2_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[37] + model_encoder_layers_2_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[38] + model_encoder_layers_2_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[39] + model_encoder_layers_2_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[40] + model_encoder_layers_2_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[41] + model_encoder_layers_2_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[42] + model_encoder_layers_2_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[43] + model_encoder_layers_2_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[44] + model_encoder_layers_2_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[45] + model_encoder_layers_2_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[46] + model_encoder_layers_2_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[47] + model_encoder_layers_2_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[48] + model_encoder_layers_2_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[49] + model_encoder_layers_3_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[50] + model_encoder_layers_3_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[51] + model_encoder_layers_3_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[52] + model_encoder_layers_3_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[53] + model_encoder_layers_3_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[54] + model_encoder_layers_3_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[55] + model_encoder_layers_3_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[56] + model_encoder_layers_3_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[57] + model_encoder_layers_3_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[58] + model_encoder_layers_3_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[59] + model_encoder_layers_3_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[60] + model_encoder_layers_3_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[61] + model_encoder_layers_3_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[62] + model_encoder_layers_3_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[63] + model_encoder_layers_3_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[64] + model_encoder_layers_4_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[65] + model_encoder_layers_4_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[66] + model_encoder_layers_4_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[67] + model_encoder_layers_4_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[68] + model_encoder_layers_4_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[69] + model_encoder_layers_4_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[70] + model_encoder_layers_4_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[71] + model_encoder_layers_4_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[72] + model_encoder_layers_4_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[73] + model_encoder_layers_4_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[74] + model_encoder_layers_4_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[75] + model_encoder_layers_4_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[76] + model_encoder_layers_4_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[77] + model_encoder_layers_4_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[78] + model_encoder_layers_4_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[79] + model_encoder_layers_5_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[80] + model_encoder_layers_5_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[81] + model_encoder_layers_5_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[82] + model_encoder_layers_5_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[83] + model_encoder_layers_5_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[84] + model_encoder_layers_5_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[85] + model_encoder_layers_5_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[86] + model_encoder_layers_5_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[87] + model_encoder_layers_5_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[88] + model_encoder_layers_5_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[89] + model_encoder_layers_5_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[90] + model_encoder_layers_5_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[91] + model_encoder_layers_5_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[92] + model_encoder_layers_5_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[93] + model_encoder_layers_5_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[94] + model_encoder_layers_6_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[95] + model_encoder_layers_6_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[96] + model_encoder_layers_6_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[97] + model_encoder_layers_6_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[98] + model_encoder_layers_6_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[99] + model_encoder_layers_6_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[100] + model_encoder_layers_6_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[101] + model_encoder_layers_6_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[102] + model_encoder_layers_6_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[103] + model_encoder_layers_6_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[104] + model_encoder_layers_6_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[105] + model_encoder_layers_6_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[106] + model_encoder_layers_6_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[107] + model_encoder_layers_6_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[108] + model_encoder_layers_6_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[109] + model_encoder_layers_7_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[110] + model_encoder_layers_7_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[111] + model_encoder_layers_7_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[112] + model_encoder_layers_7_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[113] + model_encoder_layers_7_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[114] + model_encoder_layers_7_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[115] + model_encoder_layers_7_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[116] + model_encoder_layers_7_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[117] + model_encoder_layers_7_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[118] + model_encoder_layers_7_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[119] + model_encoder_layers_7_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[120] + model_encoder_layers_7_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[121] + model_encoder_layers_7_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[122] + model_encoder_layers_7_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[123] + model_encoder_layers_7_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[124] + model_encoder_layers_8_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[125] + model_encoder_layers_8_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[126] + model_encoder_layers_8_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[127] + model_encoder_layers_8_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[128] + model_encoder_layers_8_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[129] + model_encoder_layers_8_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[130] + model_encoder_layers_8_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[131] + model_encoder_layers_8_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[132] + model_encoder_layers_8_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[133] + model_encoder_layers_8_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[134] + model_encoder_layers_8_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[135] + model_encoder_layers_8_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[136] + model_encoder_layers_8_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[137] + model_encoder_layers_8_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[138] + model_encoder_layers_8_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[139] + model_encoder_layers_9_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[140] + model_encoder_layers_9_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[141] + model_encoder_layers_9_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[142] + model_encoder_layers_9_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[143] + model_encoder_layers_9_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[144] + model_encoder_layers_9_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[145] + model_encoder_layers_9_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[146] + model_encoder_layers_9_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[147] + model_encoder_layers_9_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[148] + model_encoder_layers_9_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[149] + model_encoder_layers_9_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[150] + model_encoder_layers_9_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[151] + model_encoder_layers_9_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[152] + model_encoder_layers_9_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[153] + model_encoder_layers_9_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[154] + model_encoder_layers_10_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[155] + model_encoder_layers_10_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[156] + model_encoder_layers_10_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[157] + model_encoder_layers_10_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[158] + model_encoder_layers_10_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[159] + model_encoder_layers_10_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[160] + model_encoder_layers_10_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[161] + model_encoder_layers_10_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[162] + model_encoder_layers_10_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[163] + model_encoder_layers_10_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[164] + model_encoder_layers_10_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[165] + model_encoder_layers_10_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[166] + model_encoder_layers_10_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[167] + model_encoder_layers_10_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[168] + model_encoder_layers_10_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[169] + model_encoder_layers_11_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[170] + model_encoder_layers_11_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[171] + model_encoder_layers_11_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[172] + model_encoder_layers_11_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[173] + model_encoder_layers_11_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[174] + model_encoder_layers_11_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[175] + model_encoder_layers_11_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[176] + model_encoder_layers_11_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[177] + model_encoder_layers_11_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[178] + model_encoder_layers_11_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[179] + model_encoder_layers_11_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[180] + model_encoder_layers_11_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[181] + model_encoder_layers_11_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[182] + model_encoder_layers_11_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[183] + model_encoder_layers_11_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[184] + model_encoder_layers_12_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[185] + model_encoder_layers_12_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[186] + model_encoder_layers_12_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[187] + model_encoder_layers_12_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[188] + model_encoder_layers_12_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[189] + model_encoder_layers_12_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[190] + model_encoder_layers_12_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[191] + model_encoder_layers_12_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[192] + model_encoder_layers_12_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[193] + model_encoder_layers_12_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[194] + model_encoder_layers_12_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[195] + model_encoder_layers_12_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[196] + model_encoder_layers_12_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[197] + model_encoder_layers_12_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[198] + model_encoder_layers_12_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[199] + model_encoder_layers_13_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[200] + model_encoder_layers_13_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[201] + model_encoder_layers_13_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[202] + model_encoder_layers_13_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[203] + model_encoder_layers_13_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[204] + model_encoder_layers_13_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[205] + model_encoder_layers_13_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[206] + model_encoder_layers_13_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[207] + model_encoder_layers_13_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[208] + model_encoder_layers_13_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[209] + model_encoder_layers_13_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[210] + model_encoder_layers_13_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[211] + model_encoder_layers_13_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[212] + model_encoder_layers_13_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[213] + model_encoder_layers_13_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[214] + model_encoder_layers_14_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[215] + model_encoder_layers_14_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[216] + model_encoder_layers_14_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[217] + model_encoder_layers_14_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[218] + model_encoder_layers_14_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[219] + model_encoder_layers_14_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[220] + model_encoder_layers_14_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[221] + model_encoder_layers_14_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[222] + model_encoder_layers_14_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[223] + model_encoder_layers_14_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[224] + model_encoder_layers_14_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[225] + model_encoder_layers_14_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[226] + model_encoder_layers_14_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[227] + model_encoder_layers_14_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[228] + model_encoder_layers_14_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[229] + model_encoder_layers_15_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[230] + model_encoder_layers_15_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[231] + model_encoder_layers_15_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[232] + model_encoder_layers_15_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[233] + model_encoder_layers_15_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[234] + model_encoder_layers_15_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[235] + model_encoder_layers_15_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[236] + model_encoder_layers_15_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[237] + model_encoder_layers_15_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[238] + model_encoder_layers_15_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[239] + model_encoder_layers_15_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[240] + model_encoder_layers_15_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[241] + model_encoder_layers_15_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[242] + model_encoder_layers_15_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[243] + model_encoder_layers_15_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[244] + model_encoder_layers_16_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[245] + model_encoder_layers_16_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[246] + model_encoder_layers_16_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[247] + model_encoder_layers_16_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[248] + model_encoder_layers_16_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[249] + model_encoder_layers_16_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[250] + model_encoder_layers_16_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[251] + model_encoder_layers_16_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[252] + model_encoder_layers_16_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[253] + model_encoder_layers_16_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[254] + model_encoder_layers_16_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[255] + model_encoder_layers_16_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[256] + model_encoder_layers_16_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[257] + model_encoder_layers_16_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[258] + model_encoder_layers_16_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[259] + model_encoder_layers_17_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[260] + model_encoder_layers_17_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[261] + model_encoder_layers_17_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[262] + model_encoder_layers_17_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[263] + model_encoder_layers_17_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[264] + model_encoder_layers_17_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[265] + model_encoder_layers_17_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[266] + model_encoder_layers_17_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[267] + model_encoder_layers_17_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[268] + model_encoder_layers_17_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[269] + model_encoder_layers_17_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[270] + model_encoder_layers_17_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[271] + model_encoder_layers_17_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[272] + model_encoder_layers_17_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[273] + model_encoder_layers_17_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[274] + model_encoder_layers_18_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[275] + model_encoder_layers_18_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[276] + model_encoder_layers_18_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[277] + model_encoder_layers_18_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[278] + model_encoder_layers_18_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[279] + model_encoder_layers_18_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[280] + model_encoder_layers_18_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[281] + model_encoder_layers_18_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[282] + model_encoder_layers_18_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[283] + model_encoder_layers_18_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[284] + model_encoder_layers_18_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[285] + model_encoder_layers_18_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[286] + model_encoder_layers_18_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[287] + model_encoder_layers_18_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[288] + model_encoder_layers_18_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[289] + model_encoder_layers_19_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[290] + model_encoder_layers_19_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[291] + model_encoder_layers_19_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[292] + model_encoder_layers_19_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[293] + model_encoder_layers_19_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[294] + model_encoder_layers_19_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[295] + model_encoder_layers_19_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[296] + model_encoder_layers_19_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[297] + model_encoder_layers_19_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[298] + model_encoder_layers_19_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[299] + model_encoder_layers_19_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[300] + model_encoder_layers_19_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[301] + model_encoder_layers_19_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[302] + model_encoder_layers_19_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[303] + model_encoder_layers_19_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[304] + model_encoder_layers_20_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[305] + model_encoder_layers_20_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[306] + model_encoder_layers_20_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[307] + model_encoder_layers_20_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[308] + model_encoder_layers_20_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[309] + model_encoder_layers_20_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[310] + model_encoder_layers_20_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[311] + model_encoder_layers_20_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[312] + model_encoder_layers_20_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[313] + model_encoder_layers_20_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[314] + model_encoder_layers_20_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[315] + model_encoder_layers_20_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[316] + model_encoder_layers_20_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[317] + model_encoder_layers_20_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[318] + model_encoder_layers_20_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[319] + model_encoder_layers_21_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[320] + model_encoder_layers_21_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[321] + model_encoder_layers_21_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[322] + model_encoder_layers_21_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[323] + model_encoder_layers_21_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[324] + model_encoder_layers_21_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[325] + model_encoder_layers_21_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[326] + model_encoder_layers_21_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[327] + model_encoder_layers_21_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[328] + model_encoder_layers_21_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[329] + model_encoder_layers_21_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[330] + model_encoder_layers_21_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[331] + model_encoder_layers_21_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[332] + model_encoder_layers_21_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[333] + model_encoder_layers_21_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[334] + model_encoder_layers_22_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[335] + model_encoder_layers_22_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[336] + model_encoder_layers_22_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[337] + model_encoder_layers_22_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[338] + model_encoder_layers_22_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[339] + model_encoder_layers_22_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[340] + model_encoder_layers_22_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[341] + model_encoder_layers_22_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[342] + model_encoder_layers_22_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[343] + model_encoder_layers_22_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[344] + model_encoder_layers_22_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[345] + model_encoder_layers_22_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[346] + model_encoder_layers_22_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[347] + model_encoder_layers_22_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[348] + model_encoder_layers_22_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[349] + model_encoder_layers_23_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[350] + model_encoder_layers_23_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[351] + model_encoder_layers_23_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[352] + model_encoder_layers_23_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[353] + model_encoder_layers_23_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[354] + model_encoder_layers_23_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[355] + model_encoder_layers_23_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[356] + model_encoder_layers_23_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[357] + model_encoder_layers_23_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[358] + model_encoder_layers_23_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[359] + model_encoder_layers_23_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[360] + model_encoder_layers_23_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[361] + model_encoder_layers_23_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[362] + model_encoder_layers_23_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[363] + model_encoder_layers_23_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[364] + model_encoder_layers_24_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[365] + model_encoder_layers_24_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[366] + model_encoder_layers_24_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[367] + model_encoder_layers_24_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[368] + model_encoder_layers_24_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[369] + model_encoder_layers_24_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[370] + model_encoder_layers_24_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[371] + model_encoder_layers_24_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[372] + model_encoder_layers_24_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[373] + model_encoder_layers_24_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[374] + model_encoder_layers_24_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[375] + model_encoder_layers_24_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[376] + model_encoder_layers_24_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[377] + model_encoder_layers_24_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[378] + model_encoder_layers_24_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[379] + model_encoder_layers_25_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[380] + model_encoder_layers_25_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[381] + model_encoder_layers_25_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[382] + model_encoder_layers_25_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[383] + model_encoder_layers_25_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[384] + model_encoder_layers_25_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[385] + model_encoder_layers_25_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[386] + model_encoder_layers_25_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[387] + model_encoder_layers_25_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[388] + model_encoder_layers_25_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[389] + model_encoder_layers_25_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[390] + model_encoder_layers_25_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[391] + model_encoder_layers_25_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[392] + model_encoder_layers_25_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[393] + model_encoder_layers_25_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[394] + model_encoder_layers_26_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[395] + model_encoder_layers_26_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[396] + model_encoder_layers_26_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[397] + model_encoder_layers_26_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[398] + model_encoder_layers_26_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[399] + model_encoder_layers_26_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[400] + model_encoder_layers_26_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[401] + model_encoder_layers_26_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[402] + model_encoder_layers_26_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[403] + model_encoder_layers_26_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[404] + model_encoder_layers_26_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[405] + model_encoder_layers_26_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[406] + model_encoder_layers_26_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[407] + model_encoder_layers_26_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[408] + model_encoder_layers_26_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[409] + model_encoder_layers_27_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[410] + model_encoder_layers_27_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[411] + model_encoder_layers_27_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[412] + model_encoder_layers_27_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[413] + model_encoder_layers_27_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[414] + model_encoder_layers_27_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[415] + model_encoder_layers_27_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[416] + model_encoder_layers_27_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[417] + model_encoder_layers_27_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[418] + model_encoder_layers_27_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[419] + model_encoder_layers_27_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[420] + model_encoder_layers_27_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[421] + model_encoder_layers_27_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[422] + model_encoder_layers_27_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[423] + model_encoder_layers_27_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[424] + model_encoder_layers_28_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[425] + model_encoder_layers_28_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[426] + model_encoder_layers_28_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[427] + model_encoder_layers_28_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[428] + model_encoder_layers_28_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[429] + model_encoder_layers_28_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[430] + model_encoder_layers_28_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[431] + model_encoder_layers_28_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[432] + model_encoder_layers_28_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[433] + model_encoder_layers_28_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[434] + model_encoder_layers_28_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[435] + model_encoder_layers_28_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[436] + model_encoder_layers_28_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[437] + model_encoder_layers_28_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[438] + model_encoder_layers_28_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[439] + model_encoder_layers_29_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[440] + model_encoder_layers_29_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[441] + model_encoder_layers_29_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[442] + model_encoder_layers_29_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[443] + model_encoder_layers_29_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[444] + model_encoder_layers_29_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[445] + model_encoder_layers_29_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[446] + model_encoder_layers_29_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[447] + model_encoder_layers_29_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[448] + model_encoder_layers_29_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[449] + model_encoder_layers_29_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[450] + model_encoder_layers_29_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[451] + model_encoder_layers_29_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[452] + model_encoder_layers_29_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[453] + model_encoder_layers_29_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[454] + model_encoder_layers_30_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[455] + model_encoder_layers_30_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[456] + model_encoder_layers_30_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[457] + model_encoder_layers_30_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[458] + model_encoder_layers_30_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[459] + model_encoder_layers_30_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[460] + model_encoder_layers_30_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[461] + model_encoder_layers_30_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[462] + model_encoder_layers_30_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[463] + model_encoder_layers_30_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[464] + model_encoder_layers_30_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[465] + model_encoder_layers_30_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[466] + model_encoder_layers_30_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[467] + model_encoder_layers_30_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[468] + model_encoder_layers_30_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[469] + model_encoder_layers_31_self_attn_k_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[470] + model_encoder_layers_31_self_attn_v_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[471] + model_encoder_layers_31_self_attn_v_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[472] + model_encoder_layers_31_self_attn_q_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[473] + model_encoder_layers_31_self_attn_q_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[474] + model_encoder_layers_31_self_attn_out_proj_weight: R.Tensor((1280, 1280), dtype="float16") = packed_params[475] + model_encoder_layers_31_self_attn_out_proj_bias: R.Tensor((1280,), dtype="float16") = packed_params[476] + model_encoder_layers_31_self_attn_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[477] + model_encoder_layers_31_self_attn_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[478] + model_encoder_layers_31_fc1_weight: R.Tensor((5120, 1280), dtype="float16") = packed_params[479] + model_encoder_layers_31_fc1_bias: R.Tensor((5120,), dtype="float16") = packed_params[480] + model_encoder_layers_31_fc2_weight: R.Tensor((1280, 5120), dtype="float16") = packed_params[481] + model_encoder_layers_31_fc2_bias: R.Tensor((1280,), dtype="float16") = packed_params[482] + model_encoder_layers_31_final_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[483] + model_encoder_layers_31_final_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[484] + model_encoder_layer_norm_weight: R.Tensor((1280,), dtype="float16") = packed_params[485] + model_encoder_layer_norm_bias: R.Tensor((1280,), dtype="float16") = packed_params[486] + lv: R.Tensor((batch_size, 1280, 3000), dtype="float16") = R.nn.conv1d(input_features, model_encoder_conv1_weight, strides=[1], padding=[1, 1], dilation=[1], groups=1, data_layout="NCW", kernel_layout="OIW", out_layout="NCW", out_dtype="void") + lv1: R.Tensor((1, 1280, 1), dtype="float16") = R.reshape(model_encoder_conv1_bias, R.shape([1, 1280, 1])) + conv1d: R.Tensor((batch_size, 1280, 3000), dtype="float16") = R.add(lv, lv1) + gelu: R.Tensor((batch_size, 1280, 3000), dtype="float16") = R.nn.gelu(conv1d) + lv2: R.Tensor((batch_size, 1280, 1500), dtype="float16") = R.nn.conv1d(gelu, model_encoder_conv2_weight, strides=[2], padding=[1, 1], dilation=[1], groups=1, data_layout="NCW", kernel_layout="OIW", out_layout="NCW", out_dtype="void") + lv3: R.Tensor((1, 1280, 1), dtype="float16") = R.reshape(model_encoder_conv2_bias, R.shape([1, 1280, 1])) + conv1d1: R.Tensor((batch_size, 1280, 1500), dtype="float16") = R.add(lv2, lv3) + gelu1: R.Tensor((batch_size, 1280, 1500), dtype="float16") = R.nn.gelu(conv1d1) + permute_dims: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.permute_dims(gelu1, axes=[0, 2, 1]) + add: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(permute_dims, model_encoder_embed_positions_weight) + layer_norm: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add, model_encoder_layers_0_self_attn_layer_norm_weight, model_encoder_layers_0_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv608 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_0_self_attn_q_proj_weight, layer_norm, model_encoder_layers_0_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv608, R.shape([batch_size, 1500, 20, 64])) + lv131 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_0_self_attn_k_proj_weight, layer_norm), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape1: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv131, R.shape([batch_size, 1500, 20, 64])) + lv609 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_0_self_attn_v_proj_weight, layer_norm, model_encoder_layers_0_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape2: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv609, R.shape([batch_size, 1500, 20, 64])) + reshape3: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape, R.shape([batch_size * 1500, 20, 64])) + reshape4: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape1, R.shape([batch_size * 1500, 20, 64])) + reshape5: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape2, R.shape([batch_size * 1500, 20, 64])) + lv4 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape3, reshape4, reshape5), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape6: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv4, R.shape([batch_size, 1500, 20, 64])) + reshape7: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape6, R.shape([batch_size, 1500, 1280])) + lv610 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_0_self_attn_out_proj_weight, reshape7, model_encoder_layers_0_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add4: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add, lv610) + layer_norm1: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add4, model_encoder_layers_0_final_layer_norm_weight, model_encoder_layers_0_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv96 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_0_fc1_weight, layer_norm1, model_encoder_layers_0_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv611 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_0_fc2_weight, lv96, model_encoder_layers_0_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add7: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add4, lv611) + maximum: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add7, R.const(-65504, "float16")) + minimum: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum, R.const(65504, "float16")) + layer_norm2: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum, model_encoder_layers_1_self_attn_layer_norm_weight, model_encoder_layers_1_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv612 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_1_self_attn_q_proj_weight, layer_norm2, model_encoder_layers_1_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape8: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv612, R.shape([batch_size, 1500, 20, 64])) + lv132 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_1_self_attn_k_proj_weight, layer_norm2), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape9: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv132, R.shape([batch_size, 1500, 20, 64])) + lv613 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_1_self_attn_v_proj_weight, layer_norm2, model_encoder_layers_1_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape10: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv613, R.shape([batch_size, 1500, 20, 64])) + reshape11: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape8, R.shape([batch_size * 1500, 20, 64])) + reshape12: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape9, R.shape([batch_size * 1500, 20, 64])) + reshape13: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape10, R.shape([batch_size * 1500, 20, 64])) + lv5 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape11, reshape12, reshape13), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape14: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv5, R.shape([batch_size, 1500, 20, 64])) + reshape15: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape14, R.shape([batch_size, 1500, 1280])) + lv614 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_1_self_attn_out_proj_weight, reshape15, model_encoder_layers_1_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add11: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum, lv614) + layer_norm3: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add11, model_encoder_layers_1_final_layer_norm_weight, model_encoder_layers_1_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv97 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_1_fc1_weight, layer_norm3, model_encoder_layers_1_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv615 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_1_fc2_weight, lv97, model_encoder_layers_1_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add14: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add11, lv615) + maximum1: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add14, R.const(-65504, "float16")) + minimum1: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum1, R.const(65504, "float16")) + layer_norm4: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum1, model_encoder_layers_2_self_attn_layer_norm_weight, model_encoder_layers_2_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv616 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_2_self_attn_q_proj_weight, layer_norm4, model_encoder_layers_2_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape16: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv616, R.shape([batch_size, 1500, 20, 64])) + lv133 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_2_self_attn_k_proj_weight, layer_norm4), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape17: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv133, R.shape([batch_size, 1500, 20, 64])) + lv617 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_2_self_attn_v_proj_weight, layer_norm4, model_encoder_layers_2_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape18: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv617, R.shape([batch_size, 1500, 20, 64])) + reshape19: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape16, R.shape([batch_size * 1500, 20, 64])) + reshape20: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape17, R.shape([batch_size * 1500, 20, 64])) + reshape21: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape18, R.shape([batch_size * 1500, 20, 64])) + lv6 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape19, reshape20, reshape21), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape22: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv6, R.shape([batch_size, 1500, 20, 64])) + reshape23: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape22, R.shape([batch_size, 1500, 1280])) + lv618 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_2_self_attn_out_proj_weight, reshape23, model_encoder_layers_2_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add18: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum1, lv618) + layer_norm5: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add18, model_encoder_layers_2_final_layer_norm_weight, model_encoder_layers_2_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv98 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_2_fc1_weight, layer_norm5, model_encoder_layers_2_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv619 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_2_fc2_weight, lv98, model_encoder_layers_2_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add21: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add18, lv619) + maximum2: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add21, R.const(-65504, "float16")) + minimum2: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum2, R.const(65504, "float16")) + layer_norm6: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum2, model_encoder_layers_3_self_attn_layer_norm_weight, model_encoder_layers_3_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv620 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_3_self_attn_q_proj_weight, layer_norm6, model_encoder_layers_3_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape24: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv620, R.shape([batch_size, 1500, 20, 64])) + lv134 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_3_self_attn_k_proj_weight, layer_norm6), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape25: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv134, R.shape([batch_size, 1500, 20, 64])) + lv621 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_3_self_attn_v_proj_weight, layer_norm6, model_encoder_layers_3_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape26: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv621, R.shape([batch_size, 1500, 20, 64])) + reshape27: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape24, R.shape([batch_size * 1500, 20, 64])) + reshape28: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape25, R.shape([batch_size * 1500, 20, 64])) + reshape29: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape26, R.shape([batch_size * 1500, 20, 64])) + lv7 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape27, reshape28, reshape29), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape30: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv7, R.shape([batch_size, 1500, 20, 64])) + reshape31: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape30, R.shape([batch_size, 1500, 1280])) + lv622 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_3_self_attn_out_proj_weight, reshape31, model_encoder_layers_3_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add25: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum2, lv622) + layer_norm7: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add25, model_encoder_layers_3_final_layer_norm_weight, model_encoder_layers_3_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv99 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_3_fc1_weight, layer_norm7, model_encoder_layers_3_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv623 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_3_fc2_weight, lv99, model_encoder_layers_3_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add28: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add25, lv623) + maximum3: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add28, R.const(-65504, "float16")) + minimum3: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum3, R.const(65504, "float16")) + layer_norm8: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum3, model_encoder_layers_4_self_attn_layer_norm_weight, model_encoder_layers_4_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv624 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_4_self_attn_q_proj_weight, layer_norm8, model_encoder_layers_4_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape32: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv624, R.shape([batch_size, 1500, 20, 64])) + lv135 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_4_self_attn_k_proj_weight, layer_norm8), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape33: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv135, R.shape([batch_size, 1500, 20, 64])) + lv625 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_4_self_attn_v_proj_weight, layer_norm8, model_encoder_layers_4_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape34: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv625, R.shape([batch_size, 1500, 20, 64])) + reshape35: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape32, R.shape([batch_size * 1500, 20, 64])) + reshape36: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape33, R.shape([batch_size * 1500, 20, 64])) + reshape37: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape34, R.shape([batch_size * 1500, 20, 64])) + lv8 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape35, reshape36, reshape37), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape38: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv8, R.shape([batch_size, 1500, 20, 64])) + reshape39: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape38, R.shape([batch_size, 1500, 1280])) + lv626 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_4_self_attn_out_proj_weight, reshape39, model_encoder_layers_4_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add32: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum3, lv626) + layer_norm9: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add32, model_encoder_layers_4_final_layer_norm_weight, model_encoder_layers_4_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv100 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_4_fc1_weight, layer_norm9, model_encoder_layers_4_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv627 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_4_fc2_weight, lv100, model_encoder_layers_4_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add35: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add32, lv627) + maximum4: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add35, R.const(-65504, "float16")) + minimum4: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum4, R.const(65504, "float16")) + layer_norm10: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum4, model_encoder_layers_5_self_attn_layer_norm_weight, model_encoder_layers_5_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv628 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_5_self_attn_q_proj_weight, layer_norm10, model_encoder_layers_5_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape40: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv628, R.shape([batch_size, 1500, 20, 64])) + lv136 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_5_self_attn_k_proj_weight, layer_norm10), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape41: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv136, R.shape([batch_size, 1500, 20, 64])) + lv629 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_5_self_attn_v_proj_weight, layer_norm10, model_encoder_layers_5_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape42: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv629, R.shape([batch_size, 1500, 20, 64])) + reshape43: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape40, R.shape([batch_size * 1500, 20, 64])) + reshape44: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape41, R.shape([batch_size * 1500, 20, 64])) + reshape45: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape42, R.shape([batch_size * 1500, 20, 64])) + lv9 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape43, reshape44, reshape45), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape46: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv9, R.shape([batch_size, 1500, 20, 64])) + reshape47: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape46, R.shape([batch_size, 1500, 1280])) + lv630 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_5_self_attn_out_proj_weight, reshape47, model_encoder_layers_5_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add39: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum4, lv630) + layer_norm11: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add39, model_encoder_layers_5_final_layer_norm_weight, model_encoder_layers_5_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv101 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_5_fc1_weight, layer_norm11, model_encoder_layers_5_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv631 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_5_fc2_weight, lv101, model_encoder_layers_5_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add42: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add39, lv631) + maximum5: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add42, R.const(-65504, "float16")) + minimum5: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum5, R.const(65504, "float16")) + layer_norm12: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum5, model_encoder_layers_6_self_attn_layer_norm_weight, model_encoder_layers_6_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv632 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_6_self_attn_q_proj_weight, layer_norm12, model_encoder_layers_6_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape48: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv632, R.shape([batch_size, 1500, 20, 64])) + lv137 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_6_self_attn_k_proj_weight, layer_norm12), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape49: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv137, R.shape([batch_size, 1500, 20, 64])) + lv633 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_6_self_attn_v_proj_weight, layer_norm12, model_encoder_layers_6_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape50: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv633, R.shape([batch_size, 1500, 20, 64])) + reshape51: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape48, R.shape([batch_size * 1500, 20, 64])) + reshape52: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape49, R.shape([batch_size * 1500, 20, 64])) + reshape53: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape50, R.shape([batch_size * 1500, 20, 64])) + lv10 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape51, reshape52, reshape53), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape54: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv10, R.shape([batch_size, 1500, 20, 64])) + reshape55: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape54, R.shape([batch_size, 1500, 1280])) + lv634 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_6_self_attn_out_proj_weight, reshape55, model_encoder_layers_6_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add46: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum5, lv634) + layer_norm13: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add46, model_encoder_layers_6_final_layer_norm_weight, model_encoder_layers_6_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv102 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_6_fc1_weight, layer_norm13, model_encoder_layers_6_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv635 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_6_fc2_weight, lv102, model_encoder_layers_6_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add49: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add46, lv635) + maximum6: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add49, R.const(-65504, "float16")) + minimum6: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum6, R.const(65504, "float16")) + layer_norm14: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum6, model_encoder_layers_7_self_attn_layer_norm_weight, model_encoder_layers_7_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv636 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_7_self_attn_q_proj_weight, layer_norm14, model_encoder_layers_7_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape56: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv636, R.shape([batch_size, 1500, 20, 64])) + lv138 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_7_self_attn_k_proj_weight, layer_norm14), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape57: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv138, R.shape([batch_size, 1500, 20, 64])) + lv637 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_7_self_attn_v_proj_weight, layer_norm14, model_encoder_layers_7_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape58: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv637, R.shape([batch_size, 1500, 20, 64])) + reshape59: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape56, R.shape([batch_size * 1500, 20, 64])) + reshape60: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape57, R.shape([batch_size * 1500, 20, 64])) + reshape61: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape58, R.shape([batch_size * 1500, 20, 64])) + lv11 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape59, reshape60, reshape61), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape62: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv11, R.shape([batch_size, 1500, 20, 64])) + reshape63: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape62, R.shape([batch_size, 1500, 1280])) + lv638 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_7_self_attn_out_proj_weight, reshape63, model_encoder_layers_7_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add53: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum6, lv638) + layer_norm15: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add53, model_encoder_layers_7_final_layer_norm_weight, model_encoder_layers_7_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv103 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_7_fc1_weight, layer_norm15, model_encoder_layers_7_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv639 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_7_fc2_weight, lv103, model_encoder_layers_7_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add56: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add53, lv639) + maximum7: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add56, R.const(-65504, "float16")) + minimum7: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum7, R.const(65504, "float16")) + layer_norm16: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum7, model_encoder_layers_8_self_attn_layer_norm_weight, model_encoder_layers_8_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv640 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_8_self_attn_q_proj_weight, layer_norm16, model_encoder_layers_8_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape64: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv640, R.shape([batch_size, 1500, 20, 64])) + lv139 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_8_self_attn_k_proj_weight, layer_norm16), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape65: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv139, R.shape([batch_size, 1500, 20, 64])) + lv641 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_8_self_attn_v_proj_weight, layer_norm16, model_encoder_layers_8_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape66: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv641, R.shape([batch_size, 1500, 20, 64])) + reshape67: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape64, R.shape([batch_size * 1500, 20, 64])) + reshape68: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape65, R.shape([batch_size * 1500, 20, 64])) + reshape69: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape66, R.shape([batch_size * 1500, 20, 64])) + lv12 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape67, reshape68, reshape69), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape70: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv12, R.shape([batch_size, 1500, 20, 64])) + reshape71: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape70, R.shape([batch_size, 1500, 1280])) + lv642 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_8_self_attn_out_proj_weight, reshape71, model_encoder_layers_8_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add60: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum7, lv642) + layer_norm17: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add60, model_encoder_layers_8_final_layer_norm_weight, model_encoder_layers_8_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv104 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_8_fc1_weight, layer_norm17, model_encoder_layers_8_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv643 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_8_fc2_weight, lv104, model_encoder_layers_8_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add63: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add60, lv643) + maximum8: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add63, R.const(-65504, "float16")) + minimum8: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum8, R.const(65504, "float16")) + layer_norm18: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum8, model_encoder_layers_9_self_attn_layer_norm_weight, model_encoder_layers_9_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv644 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_9_self_attn_q_proj_weight, layer_norm18, model_encoder_layers_9_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape72: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv644, R.shape([batch_size, 1500, 20, 64])) + lv140 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_9_self_attn_k_proj_weight, layer_norm18), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape73: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv140, R.shape([batch_size, 1500, 20, 64])) + lv645 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_9_self_attn_v_proj_weight, layer_norm18, model_encoder_layers_9_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape74: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv645, R.shape([batch_size, 1500, 20, 64])) + reshape75: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape72, R.shape([batch_size * 1500, 20, 64])) + reshape76: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape73, R.shape([batch_size * 1500, 20, 64])) + reshape77: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape74, R.shape([batch_size * 1500, 20, 64])) + lv13 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape75, reshape76, reshape77), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape78: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv13, R.shape([batch_size, 1500, 20, 64])) + reshape79: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape78, R.shape([batch_size, 1500, 1280])) + lv646 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_9_self_attn_out_proj_weight, reshape79, model_encoder_layers_9_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add67: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum8, lv646) + layer_norm19: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add67, model_encoder_layers_9_final_layer_norm_weight, model_encoder_layers_9_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv105 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_9_fc1_weight, layer_norm19, model_encoder_layers_9_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv647 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_9_fc2_weight, lv105, model_encoder_layers_9_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add70: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add67, lv647) + maximum9: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add70, R.const(-65504, "float16")) + minimum9: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum9, R.const(65504, "float16")) + layer_norm20: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum9, model_encoder_layers_10_self_attn_layer_norm_weight, model_encoder_layers_10_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv648 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_10_self_attn_q_proj_weight, layer_norm20, model_encoder_layers_10_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape80: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv648, R.shape([batch_size, 1500, 20, 64])) + lv141 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_10_self_attn_k_proj_weight, layer_norm20), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape81: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv141, R.shape([batch_size, 1500, 20, 64])) + lv649 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_10_self_attn_v_proj_weight, layer_norm20, model_encoder_layers_10_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape82: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv649, R.shape([batch_size, 1500, 20, 64])) + reshape83: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape80, R.shape([batch_size * 1500, 20, 64])) + reshape84: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape81, R.shape([batch_size * 1500, 20, 64])) + reshape85: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape82, R.shape([batch_size * 1500, 20, 64])) + lv14 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape83, reshape84, reshape85), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape86: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv14, R.shape([batch_size, 1500, 20, 64])) + reshape87: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape86, R.shape([batch_size, 1500, 1280])) + lv650 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_10_self_attn_out_proj_weight, reshape87, model_encoder_layers_10_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add74: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum9, lv650) + layer_norm21: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add74, model_encoder_layers_10_final_layer_norm_weight, model_encoder_layers_10_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv106 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_10_fc1_weight, layer_norm21, model_encoder_layers_10_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv651 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_10_fc2_weight, lv106, model_encoder_layers_10_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add77: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add74, lv651) + maximum10: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add77, R.const(-65504, "float16")) + minimum10: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum10, R.const(65504, "float16")) + layer_norm22: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum10, model_encoder_layers_11_self_attn_layer_norm_weight, model_encoder_layers_11_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv652 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_11_self_attn_q_proj_weight, layer_norm22, model_encoder_layers_11_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape88: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv652, R.shape([batch_size, 1500, 20, 64])) + lv142 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_11_self_attn_k_proj_weight, layer_norm22), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape89: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv142, R.shape([batch_size, 1500, 20, 64])) + lv653 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_11_self_attn_v_proj_weight, layer_norm22, model_encoder_layers_11_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape90: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv653, R.shape([batch_size, 1500, 20, 64])) + reshape91: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape88, R.shape([batch_size * 1500, 20, 64])) + reshape92: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape89, R.shape([batch_size * 1500, 20, 64])) + reshape93: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape90, R.shape([batch_size * 1500, 20, 64])) + lv15 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape91, reshape92, reshape93), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape94: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv15, R.shape([batch_size, 1500, 20, 64])) + reshape95: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape94, R.shape([batch_size, 1500, 1280])) + lv654 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_11_self_attn_out_proj_weight, reshape95, model_encoder_layers_11_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add81: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum10, lv654) + layer_norm23: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add81, model_encoder_layers_11_final_layer_norm_weight, model_encoder_layers_11_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv107 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_11_fc1_weight, layer_norm23, model_encoder_layers_11_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv655 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_11_fc2_weight, lv107, model_encoder_layers_11_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add84: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add81, lv655) + maximum11: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add84, R.const(-65504, "float16")) + minimum11: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum11, R.const(65504, "float16")) + layer_norm24: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum11, model_encoder_layers_12_self_attn_layer_norm_weight, model_encoder_layers_12_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv656 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_12_self_attn_q_proj_weight, layer_norm24, model_encoder_layers_12_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape96: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv656, R.shape([batch_size, 1500, 20, 64])) + lv143 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_12_self_attn_k_proj_weight, layer_norm24), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape97: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv143, R.shape([batch_size, 1500, 20, 64])) + lv657 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_12_self_attn_v_proj_weight, layer_norm24, model_encoder_layers_12_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape98: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv657, R.shape([batch_size, 1500, 20, 64])) + reshape99: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape96, R.shape([batch_size * 1500, 20, 64])) + reshape100: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape97, R.shape([batch_size * 1500, 20, 64])) + reshape101: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape98, R.shape([batch_size * 1500, 20, 64])) + lv16 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape99, reshape100, reshape101), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape102: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv16, R.shape([batch_size, 1500, 20, 64])) + reshape103: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape102, R.shape([batch_size, 1500, 1280])) + lv658 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_12_self_attn_out_proj_weight, reshape103, model_encoder_layers_12_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add88: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum11, lv658) + layer_norm25: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add88, model_encoder_layers_12_final_layer_norm_weight, model_encoder_layers_12_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv108 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_12_fc1_weight, layer_norm25, model_encoder_layers_12_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv659 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_12_fc2_weight, lv108, model_encoder_layers_12_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add91: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add88, lv659) + maximum12: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add91, R.const(-65504, "float16")) + minimum12: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum12, R.const(65504, "float16")) + layer_norm26: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum12, model_encoder_layers_13_self_attn_layer_norm_weight, model_encoder_layers_13_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv660 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_13_self_attn_q_proj_weight, layer_norm26, model_encoder_layers_13_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape104: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv660, R.shape([batch_size, 1500, 20, 64])) + lv144 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_13_self_attn_k_proj_weight, layer_norm26), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape105: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv144, R.shape([batch_size, 1500, 20, 64])) + lv661 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_13_self_attn_v_proj_weight, layer_norm26, model_encoder_layers_13_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape106: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv661, R.shape([batch_size, 1500, 20, 64])) + reshape107: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape104, R.shape([batch_size * 1500, 20, 64])) + reshape108: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape105, R.shape([batch_size * 1500, 20, 64])) + reshape109: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape106, R.shape([batch_size * 1500, 20, 64])) + lv17 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape107, reshape108, reshape109), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape110: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv17, R.shape([batch_size, 1500, 20, 64])) + reshape111: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape110, R.shape([batch_size, 1500, 1280])) + lv662 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_13_self_attn_out_proj_weight, reshape111, model_encoder_layers_13_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add95: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum12, lv662) + layer_norm27: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add95, model_encoder_layers_13_final_layer_norm_weight, model_encoder_layers_13_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv109 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_13_fc1_weight, layer_norm27, model_encoder_layers_13_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv663 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_13_fc2_weight, lv109, model_encoder_layers_13_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add98: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add95, lv663) + maximum13: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add98, R.const(-65504, "float16")) + minimum13: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum13, R.const(65504, "float16")) + layer_norm28: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum13, model_encoder_layers_14_self_attn_layer_norm_weight, model_encoder_layers_14_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv664 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_14_self_attn_q_proj_weight, layer_norm28, model_encoder_layers_14_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape112: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv664, R.shape([batch_size, 1500, 20, 64])) + lv145 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_14_self_attn_k_proj_weight, layer_norm28), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape113: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv145, R.shape([batch_size, 1500, 20, 64])) + lv665 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_14_self_attn_v_proj_weight, layer_norm28, model_encoder_layers_14_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape114: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv665, R.shape([batch_size, 1500, 20, 64])) + reshape115: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape112, R.shape([batch_size * 1500, 20, 64])) + reshape116: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape113, R.shape([batch_size * 1500, 20, 64])) + reshape117: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape114, R.shape([batch_size * 1500, 20, 64])) + lv18 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape115, reshape116, reshape117), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape118: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv18, R.shape([batch_size, 1500, 20, 64])) + reshape119: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape118, R.shape([batch_size, 1500, 1280])) + lv666 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_14_self_attn_out_proj_weight, reshape119, model_encoder_layers_14_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add102: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum13, lv666) + layer_norm29: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add102, model_encoder_layers_14_final_layer_norm_weight, model_encoder_layers_14_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv110 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_14_fc1_weight, layer_norm29, model_encoder_layers_14_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv667 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_14_fc2_weight, lv110, model_encoder_layers_14_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add105: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add102, lv667) + maximum14: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add105, R.const(-65504, "float16")) + minimum14: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum14, R.const(65504, "float16")) + layer_norm30: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum14, model_encoder_layers_15_self_attn_layer_norm_weight, model_encoder_layers_15_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv668 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_15_self_attn_q_proj_weight, layer_norm30, model_encoder_layers_15_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape120: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv668, R.shape([batch_size, 1500, 20, 64])) + lv146 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_15_self_attn_k_proj_weight, layer_norm30), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape121: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv146, R.shape([batch_size, 1500, 20, 64])) + lv669 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_15_self_attn_v_proj_weight, layer_norm30, model_encoder_layers_15_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape122: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv669, R.shape([batch_size, 1500, 20, 64])) + reshape123: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape120, R.shape([batch_size * 1500, 20, 64])) + reshape124: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape121, R.shape([batch_size * 1500, 20, 64])) + reshape125: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape122, R.shape([batch_size * 1500, 20, 64])) + lv19 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape123, reshape124, reshape125), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape126: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv19, R.shape([batch_size, 1500, 20, 64])) + reshape127: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape126, R.shape([batch_size, 1500, 1280])) + lv670 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_15_self_attn_out_proj_weight, reshape127, model_encoder_layers_15_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add109: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum14, lv670) + layer_norm31: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add109, model_encoder_layers_15_final_layer_norm_weight, model_encoder_layers_15_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv111 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_15_fc1_weight, layer_norm31, model_encoder_layers_15_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv671 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_15_fc2_weight, lv111, model_encoder_layers_15_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add112: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add109, lv671) + maximum15: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add112, R.const(-65504, "float16")) + minimum15: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum15, R.const(65504, "float16")) + layer_norm32: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum15, model_encoder_layers_16_self_attn_layer_norm_weight, model_encoder_layers_16_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv672 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_16_self_attn_q_proj_weight, layer_norm32, model_encoder_layers_16_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape128: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv672, R.shape([batch_size, 1500, 20, 64])) + lv147 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_16_self_attn_k_proj_weight, layer_norm32), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape129: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv147, R.shape([batch_size, 1500, 20, 64])) + lv673 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_16_self_attn_v_proj_weight, layer_norm32, model_encoder_layers_16_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape130: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv673, R.shape([batch_size, 1500, 20, 64])) + reshape131: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape128, R.shape([batch_size * 1500, 20, 64])) + reshape132: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape129, R.shape([batch_size * 1500, 20, 64])) + reshape133: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape130, R.shape([batch_size * 1500, 20, 64])) + lv20 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape131, reshape132, reshape133), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape134: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv20, R.shape([batch_size, 1500, 20, 64])) + reshape135: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape134, R.shape([batch_size, 1500, 1280])) + lv674 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_16_self_attn_out_proj_weight, reshape135, model_encoder_layers_16_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add116: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum15, lv674) + layer_norm33: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add116, model_encoder_layers_16_final_layer_norm_weight, model_encoder_layers_16_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv112 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_16_fc1_weight, layer_norm33, model_encoder_layers_16_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv675 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_16_fc2_weight, lv112, model_encoder_layers_16_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add119: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add116, lv675) + maximum16: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add119, R.const(-65504, "float16")) + minimum16: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum16, R.const(65504, "float16")) + layer_norm34: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum16, model_encoder_layers_17_self_attn_layer_norm_weight, model_encoder_layers_17_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv676 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_17_self_attn_q_proj_weight, layer_norm34, model_encoder_layers_17_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape136: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv676, R.shape([batch_size, 1500, 20, 64])) + lv148 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_17_self_attn_k_proj_weight, layer_norm34), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape137: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv148, R.shape([batch_size, 1500, 20, 64])) + lv677 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_17_self_attn_v_proj_weight, layer_norm34, model_encoder_layers_17_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape138: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv677, R.shape([batch_size, 1500, 20, 64])) + reshape139: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape136, R.shape([batch_size * 1500, 20, 64])) + reshape140: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape137, R.shape([batch_size * 1500, 20, 64])) + reshape141: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape138, R.shape([batch_size * 1500, 20, 64])) + lv21 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape139, reshape140, reshape141), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape142: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv21, R.shape([batch_size, 1500, 20, 64])) + reshape143: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape142, R.shape([batch_size, 1500, 1280])) + lv678 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_17_self_attn_out_proj_weight, reshape143, model_encoder_layers_17_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add123: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum16, lv678) + layer_norm35: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add123, model_encoder_layers_17_final_layer_norm_weight, model_encoder_layers_17_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv113 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_17_fc1_weight, layer_norm35, model_encoder_layers_17_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv679 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_17_fc2_weight, lv113, model_encoder_layers_17_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add126: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add123, lv679) + maximum17: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add126, R.const(-65504, "float16")) + minimum17: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum17, R.const(65504, "float16")) + layer_norm36: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum17, model_encoder_layers_18_self_attn_layer_norm_weight, model_encoder_layers_18_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv680 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_18_self_attn_q_proj_weight, layer_norm36, model_encoder_layers_18_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape144: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv680, R.shape([batch_size, 1500, 20, 64])) + lv149 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_18_self_attn_k_proj_weight, layer_norm36), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape145: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv149, R.shape([batch_size, 1500, 20, 64])) + lv681 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_18_self_attn_v_proj_weight, layer_norm36, model_encoder_layers_18_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape146: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv681, R.shape([batch_size, 1500, 20, 64])) + reshape147: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape144, R.shape([batch_size * 1500, 20, 64])) + reshape148: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape145, R.shape([batch_size * 1500, 20, 64])) + reshape149: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape146, R.shape([batch_size * 1500, 20, 64])) + lv22 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape147, reshape148, reshape149), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape150: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv22, R.shape([batch_size, 1500, 20, 64])) + reshape151: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape150, R.shape([batch_size, 1500, 1280])) + lv682 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_18_self_attn_out_proj_weight, reshape151, model_encoder_layers_18_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add130: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum17, lv682) + layer_norm37: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add130, model_encoder_layers_18_final_layer_norm_weight, model_encoder_layers_18_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv114 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_18_fc1_weight, layer_norm37, model_encoder_layers_18_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv683 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_18_fc2_weight, lv114, model_encoder_layers_18_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add133: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add130, lv683) + maximum18: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add133, R.const(-65504, "float16")) + minimum18: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum18, R.const(65504, "float16")) + layer_norm38: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum18, model_encoder_layers_19_self_attn_layer_norm_weight, model_encoder_layers_19_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv684 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_19_self_attn_q_proj_weight, layer_norm38, model_encoder_layers_19_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape152: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv684, R.shape([batch_size, 1500, 20, 64])) + lv150 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_19_self_attn_k_proj_weight, layer_norm38), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape153: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv150, R.shape([batch_size, 1500, 20, 64])) + lv685 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_19_self_attn_v_proj_weight, layer_norm38, model_encoder_layers_19_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape154: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv685, R.shape([batch_size, 1500, 20, 64])) + reshape155: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape152, R.shape([batch_size * 1500, 20, 64])) + reshape156: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape153, R.shape([batch_size * 1500, 20, 64])) + reshape157: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape154, R.shape([batch_size * 1500, 20, 64])) + lv23 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape155, reshape156, reshape157), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape158: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv23, R.shape([batch_size, 1500, 20, 64])) + reshape159: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape158, R.shape([batch_size, 1500, 1280])) + lv686 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_19_self_attn_out_proj_weight, reshape159, model_encoder_layers_19_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add137: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum18, lv686) + layer_norm39: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add137, model_encoder_layers_19_final_layer_norm_weight, model_encoder_layers_19_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv115 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_19_fc1_weight, layer_norm39, model_encoder_layers_19_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv687 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_19_fc2_weight, lv115, model_encoder_layers_19_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add140: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add137, lv687) + maximum19: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add140, R.const(-65504, "float16")) + minimum19: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum19, R.const(65504, "float16")) + layer_norm40: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum19, model_encoder_layers_20_self_attn_layer_norm_weight, model_encoder_layers_20_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv688 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_20_self_attn_q_proj_weight, layer_norm40, model_encoder_layers_20_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape160: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv688, R.shape([batch_size, 1500, 20, 64])) + lv151 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_20_self_attn_k_proj_weight, layer_norm40), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape161: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv151, R.shape([batch_size, 1500, 20, 64])) + lv689 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_20_self_attn_v_proj_weight, layer_norm40, model_encoder_layers_20_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape162: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv689, R.shape([batch_size, 1500, 20, 64])) + reshape163: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape160, R.shape([batch_size * 1500, 20, 64])) + reshape164: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape161, R.shape([batch_size * 1500, 20, 64])) + reshape165: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape162, R.shape([batch_size * 1500, 20, 64])) + lv24 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape163, reshape164, reshape165), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape166: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv24, R.shape([batch_size, 1500, 20, 64])) + reshape167: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape166, R.shape([batch_size, 1500, 1280])) + lv690 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_20_self_attn_out_proj_weight, reshape167, model_encoder_layers_20_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add144: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum19, lv690) + layer_norm41: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add144, model_encoder_layers_20_final_layer_norm_weight, model_encoder_layers_20_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv116 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_20_fc1_weight, layer_norm41, model_encoder_layers_20_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv691 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_20_fc2_weight, lv116, model_encoder_layers_20_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add147: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add144, lv691) + maximum20: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add147, R.const(-65504, "float16")) + minimum20: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum20, R.const(65504, "float16")) + layer_norm42: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum20, model_encoder_layers_21_self_attn_layer_norm_weight, model_encoder_layers_21_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv692 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_21_self_attn_q_proj_weight, layer_norm42, model_encoder_layers_21_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape168: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv692, R.shape([batch_size, 1500, 20, 64])) + lv152 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_21_self_attn_k_proj_weight, layer_norm42), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape169: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv152, R.shape([batch_size, 1500, 20, 64])) + lv693 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_21_self_attn_v_proj_weight, layer_norm42, model_encoder_layers_21_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape170: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv693, R.shape([batch_size, 1500, 20, 64])) + reshape171: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape168, R.shape([batch_size * 1500, 20, 64])) + reshape172: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape169, R.shape([batch_size * 1500, 20, 64])) + reshape173: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape170, R.shape([batch_size * 1500, 20, 64])) + lv25 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape171, reshape172, reshape173), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape174: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv25, R.shape([batch_size, 1500, 20, 64])) + reshape175: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape174, R.shape([batch_size, 1500, 1280])) + lv694 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_21_self_attn_out_proj_weight, reshape175, model_encoder_layers_21_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add151: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum20, lv694) + layer_norm43: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add151, model_encoder_layers_21_final_layer_norm_weight, model_encoder_layers_21_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv117 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_21_fc1_weight, layer_norm43, model_encoder_layers_21_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv695 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_21_fc2_weight, lv117, model_encoder_layers_21_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add154: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add151, lv695) + maximum21: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add154, R.const(-65504, "float16")) + minimum21: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum21, R.const(65504, "float16")) + layer_norm44: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum21, model_encoder_layers_22_self_attn_layer_norm_weight, model_encoder_layers_22_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv696 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_22_self_attn_q_proj_weight, layer_norm44, model_encoder_layers_22_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape176: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv696, R.shape([batch_size, 1500, 20, 64])) + lv153 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_22_self_attn_k_proj_weight, layer_norm44), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape177: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv153, R.shape([batch_size, 1500, 20, 64])) + lv697 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_22_self_attn_v_proj_weight, layer_norm44, model_encoder_layers_22_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape178: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv697, R.shape([batch_size, 1500, 20, 64])) + reshape179: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape176, R.shape([batch_size * 1500, 20, 64])) + reshape180: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape177, R.shape([batch_size * 1500, 20, 64])) + reshape181: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape178, R.shape([batch_size * 1500, 20, 64])) + lv26 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape179, reshape180, reshape181), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape182: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv26, R.shape([batch_size, 1500, 20, 64])) + reshape183: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape182, R.shape([batch_size, 1500, 1280])) + lv698 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_22_self_attn_out_proj_weight, reshape183, model_encoder_layers_22_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add158: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum21, lv698) + layer_norm45: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add158, model_encoder_layers_22_final_layer_norm_weight, model_encoder_layers_22_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv118 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_22_fc1_weight, layer_norm45, model_encoder_layers_22_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv699 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_22_fc2_weight, lv118, model_encoder_layers_22_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add161: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add158, lv699) + maximum22: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add161, R.const(-65504, "float16")) + minimum22: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum22, R.const(65504, "float16")) + layer_norm46: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum22, model_encoder_layers_23_self_attn_layer_norm_weight, model_encoder_layers_23_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv700 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_23_self_attn_q_proj_weight, layer_norm46, model_encoder_layers_23_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape184: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv700, R.shape([batch_size, 1500, 20, 64])) + lv154 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_23_self_attn_k_proj_weight, layer_norm46), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape185: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv154, R.shape([batch_size, 1500, 20, 64])) + lv701 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_23_self_attn_v_proj_weight, layer_norm46, model_encoder_layers_23_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape186: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv701, R.shape([batch_size, 1500, 20, 64])) + reshape187: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape184, R.shape([batch_size * 1500, 20, 64])) + reshape188: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape185, R.shape([batch_size * 1500, 20, 64])) + reshape189: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape186, R.shape([batch_size * 1500, 20, 64])) + lv27 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape187, reshape188, reshape189), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape190: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv27, R.shape([batch_size, 1500, 20, 64])) + reshape191: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape190, R.shape([batch_size, 1500, 1280])) + lv702 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_23_self_attn_out_proj_weight, reshape191, model_encoder_layers_23_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add165: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum22, lv702) + layer_norm47: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add165, model_encoder_layers_23_final_layer_norm_weight, model_encoder_layers_23_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv119 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_23_fc1_weight, layer_norm47, model_encoder_layers_23_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv703 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_23_fc2_weight, lv119, model_encoder_layers_23_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add168: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add165, lv703) + maximum23: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add168, R.const(-65504, "float16")) + minimum23: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum23, R.const(65504, "float16")) + layer_norm48: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum23, model_encoder_layers_24_self_attn_layer_norm_weight, model_encoder_layers_24_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv704 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_24_self_attn_q_proj_weight, layer_norm48, model_encoder_layers_24_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape192: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv704, R.shape([batch_size, 1500, 20, 64])) + lv155 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_24_self_attn_k_proj_weight, layer_norm48), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape193: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv155, R.shape([batch_size, 1500, 20, 64])) + lv705 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_24_self_attn_v_proj_weight, layer_norm48, model_encoder_layers_24_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape194: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv705, R.shape([batch_size, 1500, 20, 64])) + reshape195: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape192, R.shape([batch_size * 1500, 20, 64])) + reshape196: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape193, R.shape([batch_size * 1500, 20, 64])) + reshape197: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape194, R.shape([batch_size * 1500, 20, 64])) + lv28 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape195, reshape196, reshape197), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape198: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv28, R.shape([batch_size, 1500, 20, 64])) + reshape199: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape198, R.shape([batch_size, 1500, 1280])) + lv706 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_24_self_attn_out_proj_weight, reshape199, model_encoder_layers_24_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add172: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum23, lv706) + layer_norm49: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add172, model_encoder_layers_24_final_layer_norm_weight, model_encoder_layers_24_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv120 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_24_fc1_weight, layer_norm49, model_encoder_layers_24_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv707 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_24_fc2_weight, lv120, model_encoder_layers_24_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add175: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add172, lv707) + maximum24: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add175, R.const(-65504, "float16")) + minimum24: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum24, R.const(65504, "float16")) + layer_norm50: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum24, model_encoder_layers_25_self_attn_layer_norm_weight, model_encoder_layers_25_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv708 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_25_self_attn_q_proj_weight, layer_norm50, model_encoder_layers_25_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape200: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv708, R.shape([batch_size, 1500, 20, 64])) + lv156 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_25_self_attn_k_proj_weight, layer_norm50), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape201: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv156, R.shape([batch_size, 1500, 20, 64])) + lv709 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_25_self_attn_v_proj_weight, layer_norm50, model_encoder_layers_25_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape202: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv709, R.shape([batch_size, 1500, 20, 64])) + reshape203: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape200, R.shape([batch_size * 1500, 20, 64])) + reshape204: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape201, R.shape([batch_size * 1500, 20, 64])) + reshape205: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape202, R.shape([batch_size * 1500, 20, 64])) + lv29 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape203, reshape204, reshape205), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape206: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv29, R.shape([batch_size, 1500, 20, 64])) + reshape207: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape206, R.shape([batch_size, 1500, 1280])) + lv710 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_25_self_attn_out_proj_weight, reshape207, model_encoder_layers_25_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add179: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum24, lv710) + layer_norm51: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add179, model_encoder_layers_25_final_layer_norm_weight, model_encoder_layers_25_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv121 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_25_fc1_weight, layer_norm51, model_encoder_layers_25_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv711 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_25_fc2_weight, lv121, model_encoder_layers_25_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add182: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add179, lv711) + maximum25: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add182, R.const(-65504, "float16")) + minimum25: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum25, R.const(65504, "float16")) + layer_norm52: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum25, model_encoder_layers_26_self_attn_layer_norm_weight, model_encoder_layers_26_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv712 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_26_self_attn_q_proj_weight, layer_norm52, model_encoder_layers_26_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape208: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv712, R.shape([batch_size, 1500, 20, 64])) + lv157 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_26_self_attn_k_proj_weight, layer_norm52), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape209: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv157, R.shape([batch_size, 1500, 20, 64])) + lv713 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_26_self_attn_v_proj_weight, layer_norm52, model_encoder_layers_26_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape210: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv713, R.shape([batch_size, 1500, 20, 64])) + reshape211: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape208, R.shape([batch_size * 1500, 20, 64])) + reshape212: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape209, R.shape([batch_size * 1500, 20, 64])) + reshape213: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape210, R.shape([batch_size * 1500, 20, 64])) + lv30 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape211, reshape212, reshape213), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape214: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv30, R.shape([batch_size, 1500, 20, 64])) + reshape215: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape214, R.shape([batch_size, 1500, 1280])) + lv714 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_26_self_attn_out_proj_weight, reshape215, model_encoder_layers_26_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add186: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum25, lv714) + layer_norm53: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add186, model_encoder_layers_26_final_layer_norm_weight, model_encoder_layers_26_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv122 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_26_fc1_weight, layer_norm53, model_encoder_layers_26_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv715 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_26_fc2_weight, lv122, model_encoder_layers_26_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add189: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add186, lv715) + maximum26: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add189, R.const(-65504, "float16")) + minimum26: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum26, R.const(65504, "float16")) + layer_norm54: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum26, model_encoder_layers_27_self_attn_layer_norm_weight, model_encoder_layers_27_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv716 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_27_self_attn_q_proj_weight, layer_norm54, model_encoder_layers_27_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape216: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv716, R.shape([batch_size, 1500, 20, 64])) + lv158 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_27_self_attn_k_proj_weight, layer_norm54), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape217: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv158, R.shape([batch_size, 1500, 20, 64])) + lv717 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_27_self_attn_v_proj_weight, layer_norm54, model_encoder_layers_27_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape218: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv717, R.shape([batch_size, 1500, 20, 64])) + reshape219: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape216, R.shape([batch_size * 1500, 20, 64])) + reshape220: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape217, R.shape([batch_size * 1500, 20, 64])) + reshape221: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape218, R.shape([batch_size * 1500, 20, 64])) + lv31 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape219, reshape220, reshape221), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape222: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv31, R.shape([batch_size, 1500, 20, 64])) + reshape223: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape222, R.shape([batch_size, 1500, 1280])) + lv718 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_27_self_attn_out_proj_weight, reshape223, model_encoder_layers_27_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add193: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum26, lv718) + layer_norm55: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add193, model_encoder_layers_27_final_layer_norm_weight, model_encoder_layers_27_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv123 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_27_fc1_weight, layer_norm55, model_encoder_layers_27_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv719 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_27_fc2_weight, lv123, model_encoder_layers_27_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add196: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add193, lv719) + maximum27: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add196, R.const(-65504, "float16")) + minimum27: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum27, R.const(65504, "float16")) + layer_norm56: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum27, model_encoder_layers_28_self_attn_layer_norm_weight, model_encoder_layers_28_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv720 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_28_self_attn_q_proj_weight, layer_norm56, model_encoder_layers_28_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape224: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv720, R.shape([batch_size, 1500, 20, 64])) + lv159 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_28_self_attn_k_proj_weight, layer_norm56), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape225: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv159, R.shape([batch_size, 1500, 20, 64])) + lv721 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_28_self_attn_v_proj_weight, layer_norm56, model_encoder_layers_28_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape226: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv721, R.shape([batch_size, 1500, 20, 64])) + reshape227: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape224, R.shape([batch_size * 1500, 20, 64])) + reshape228: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape225, R.shape([batch_size * 1500, 20, 64])) + reshape229: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape226, R.shape([batch_size * 1500, 20, 64])) + lv32 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape227, reshape228, reshape229), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape230: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv32, R.shape([batch_size, 1500, 20, 64])) + reshape231: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape230, R.shape([batch_size, 1500, 1280])) + lv722 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_28_self_attn_out_proj_weight, reshape231, model_encoder_layers_28_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add200: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum27, lv722) + layer_norm57: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add200, model_encoder_layers_28_final_layer_norm_weight, model_encoder_layers_28_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv124 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_28_fc1_weight, layer_norm57, model_encoder_layers_28_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv723 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_28_fc2_weight, lv124, model_encoder_layers_28_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add203: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add200, lv723) + maximum28: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add203, R.const(-65504, "float16")) + minimum28: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum28, R.const(65504, "float16")) + layer_norm58: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum28, model_encoder_layers_29_self_attn_layer_norm_weight, model_encoder_layers_29_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv724 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_29_self_attn_q_proj_weight, layer_norm58, model_encoder_layers_29_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape232: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv724, R.shape([batch_size, 1500, 20, 64])) + lv160 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_29_self_attn_k_proj_weight, layer_norm58), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape233: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv160, R.shape([batch_size, 1500, 20, 64])) + lv725 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_29_self_attn_v_proj_weight, layer_norm58, model_encoder_layers_29_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape234: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv725, R.shape([batch_size, 1500, 20, 64])) + reshape235: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape232, R.shape([batch_size * 1500, 20, 64])) + reshape236: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape233, R.shape([batch_size * 1500, 20, 64])) + reshape237: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape234, R.shape([batch_size * 1500, 20, 64])) + lv33 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape235, reshape236, reshape237), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape238: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv33, R.shape([batch_size, 1500, 20, 64])) + reshape239: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape238, R.shape([batch_size, 1500, 1280])) + lv726 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_29_self_attn_out_proj_weight, reshape239, model_encoder_layers_29_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add207: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum28, lv726) + layer_norm59: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add207, model_encoder_layers_29_final_layer_norm_weight, model_encoder_layers_29_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv125 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_29_fc1_weight, layer_norm59, model_encoder_layers_29_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv727 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_29_fc2_weight, lv125, model_encoder_layers_29_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add210: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add207, lv727) + maximum29: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add210, R.const(-65504, "float16")) + minimum29: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum29, R.const(65504, "float16")) + layer_norm60: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum29, model_encoder_layers_30_self_attn_layer_norm_weight, model_encoder_layers_30_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv728 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_30_self_attn_q_proj_weight, layer_norm60, model_encoder_layers_30_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape240: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv728, R.shape([batch_size, 1500, 20, 64])) + lv161 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_30_self_attn_k_proj_weight, layer_norm60), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape241: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv161, R.shape([batch_size, 1500, 20, 64])) + lv729 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_30_self_attn_v_proj_weight, layer_norm60, model_encoder_layers_30_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape242: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv729, R.shape([batch_size, 1500, 20, 64])) + reshape243: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape240, R.shape([batch_size * 1500, 20, 64])) + reshape244: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape241, R.shape([batch_size * 1500, 20, 64])) + reshape245: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape242, R.shape([batch_size * 1500, 20, 64])) + lv34 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape243, reshape244, reshape245), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape246: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv34, R.shape([batch_size, 1500, 20, 64])) + reshape247: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape246, R.shape([batch_size, 1500, 1280])) + lv730 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_30_self_attn_out_proj_weight, reshape247, model_encoder_layers_30_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add214: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum29, lv730) + layer_norm61: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add214, model_encoder_layers_30_final_layer_norm_weight, model_encoder_layers_30_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv126 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_30_fc1_weight, layer_norm61, model_encoder_layers_30_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv731 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_30_fc2_weight, lv126, model_encoder_layers_30_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add217: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add214, lv731) + maximum30: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add217, R.const(-65504, "float16")) + minimum30: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum30, R.const(65504, "float16")) + layer_norm62: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum30, model_encoder_layers_31_self_attn_layer_norm_weight, model_encoder_layers_31_self_attn_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv732 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_31_self_attn_q_proj_weight, layer_norm62, model_encoder_layers_31_self_attn_q_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape248: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv732, R.shape([batch_size, 1500, 20, 64])) + lv162 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_cublas", (model_encoder_layers_31_self_attn_k_proj_weight, layer_norm62), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape249: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv162, R.shape([batch_size, 1500, 20, 64])) + lv733 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_31_self_attn_v_proj_weight, layer_norm62, model_encoder_layers_31_self_attn_v_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + reshape250: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv733, R.shape([batch_size, 1500, 20, 64])) + reshape251: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape248, R.shape([batch_size * 1500, 20, 64])) + reshape252: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape249, R.shape([batch_size * 1500, 20, 64])) + reshape253: R.Tensor((batch_size * 1500, 20, 64), dtype="float16") = R.reshape(reshape250, R.shape([batch_size * 1500, 20, 64])) + lv35 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_no_append", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape251, reshape252, reshape253), out_sinfo=R.Tensor((batch_size * 1500, 20, 64), dtype="float16")) + reshape254: R.Tensor((batch_size, 1500, 20, 64), dtype="float16") = R.reshape(lv35, R.shape([batch_size, 1500, 20, 64])) + reshape255: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.reshape(reshape254, R.shape([batch_size, 1500, 1280])) + lv734 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_cublas", (model_encoder_layers_31_self_attn_out_proj_weight, reshape255, model_encoder_layers_31_self_attn_out_proj_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add221: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(minimum30, lv734) + layer_norm63: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(add221, model_encoder_layers_31_final_layer_norm_weight, model_encoder_layers_31_final_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv127 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu2_cublas", (model_encoder_layers_31_fc1_weight, layer_norm63, model_encoder_layers_31_fc1_bias), out_sinfo=R.Tensor((batch_size, 1500, 5120), dtype="float16")) + lv735 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add5_cublas", (model_encoder_layers_31_fc2_weight, lv127, model_encoder_layers_31_fc2_bias), out_sinfo=R.Tensor((batch_size, 1500, 1280), dtype="float16")) + add224: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.add(add221, lv735) + maximum31: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.maximum(add224, R.const(-65504, "float16")) + minimum31: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.minimum(maximum31, R.const(65504, "float16")) + layer_norm64: R.Tensor((batch_size, 1500, 1280), dtype="float16") = R.nn.layer_norm(minimum31, model_encoder_layer_norm_weight, model_encoder_layer_norm_bias, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + gv: R.Tensor((batch_size, 1500, 1280), dtype="float16") = layer_norm64 + R.output(gv) + return gv + + @R.function + def batch_prefill(input_ids: R.Tensor((1, "seq_len"), dtype="int32"), logit_positions: R.Tensor(("batch_size",), dtype="int32"), paged_kv_cache: R.Object, packed_params: R.Tuple(R.Tensor((1280, 128, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1500, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((51866, 1280), dtype="float16"), R.Tensor((448, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"))) -> R.Tensor((1, "batch_size", 51866), dtype="float32"): + batch_size = T.int64() + seq_len = T.int64() + R.func_attr({"num_input": 3, "relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) + with R.dataflow(): + model_decoder_embed_tokens_weight2: R.Tensor((51866, 1280), dtype="float16") = packed_params[487] + model_decoder_embed_positions_weight2: R.Tensor((448, 1280), dtype="float16") = packed_params[488] + model_decoder_layers_0_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[489] + model_decoder_layers_0_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[490] + model_decoder_layers_0_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[491] + model_decoder_layers_0_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[492] + model_decoder_layers_0_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[493] + model_decoder_layers_0_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[494] + model_decoder_layers_0_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[495] + model_decoder_layers_0_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[496] + model_decoder_layers_0_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[497] + model_decoder_layers_0_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[501] + model_decoder_layers_0_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[502] + model_decoder_layers_0_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[503] + model_decoder_layers_0_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[504] + model_decoder_layers_0_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[505] + model_decoder_layers_0_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[506] + model_decoder_layers_0_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[507] + model_decoder_layers_0_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[508] + model_decoder_layers_0_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[509] + model_decoder_layers_0_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[510] + model_decoder_layers_0_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[511] + model_decoder_layers_0_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[512] + model_decoder_layers_1_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[513] + model_decoder_layers_1_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[514] + model_decoder_layers_1_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[515] + model_decoder_layers_1_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[516] + model_decoder_layers_1_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[517] + model_decoder_layers_1_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[518] + model_decoder_layers_1_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[519] + model_decoder_layers_1_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[520] + model_decoder_layers_1_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[521] + model_decoder_layers_1_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[525] + model_decoder_layers_1_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[526] + model_decoder_layers_1_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[527] + model_decoder_layers_1_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[528] + model_decoder_layers_1_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[529] + model_decoder_layers_1_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[530] + model_decoder_layers_1_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[531] + model_decoder_layers_1_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[532] + model_decoder_layers_1_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[533] + model_decoder_layers_1_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[534] + model_decoder_layers_1_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[535] + model_decoder_layers_1_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[536] + model_decoder_layers_2_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[537] + model_decoder_layers_2_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[538] + model_decoder_layers_2_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[539] + model_decoder_layers_2_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[540] + model_decoder_layers_2_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[541] + model_decoder_layers_2_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[542] + model_decoder_layers_2_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[543] + model_decoder_layers_2_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[544] + model_decoder_layers_2_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[545] + model_decoder_layers_2_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[549] + model_decoder_layers_2_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[550] + model_decoder_layers_2_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[551] + model_decoder_layers_2_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[552] + model_decoder_layers_2_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[553] + model_decoder_layers_2_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[554] + model_decoder_layers_2_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[555] + model_decoder_layers_2_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[556] + model_decoder_layers_2_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[557] + model_decoder_layers_2_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[558] + model_decoder_layers_2_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[559] + model_decoder_layers_2_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[560] + model_decoder_layers_3_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[561] + model_decoder_layers_3_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[562] + model_decoder_layers_3_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[563] + model_decoder_layers_3_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[564] + model_decoder_layers_3_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[565] + model_decoder_layers_3_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[566] + model_decoder_layers_3_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[567] + model_decoder_layers_3_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[568] + model_decoder_layers_3_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[569] + model_decoder_layers_3_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[573] + model_decoder_layers_3_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[574] + model_decoder_layers_3_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[575] + model_decoder_layers_3_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[576] + model_decoder_layers_3_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[577] + model_decoder_layers_3_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[578] + model_decoder_layers_3_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[579] + model_decoder_layers_3_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[580] + model_decoder_layers_3_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[581] + model_decoder_layers_3_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[582] + model_decoder_layers_3_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[583] + model_decoder_layers_3_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[584] + model_decoder_layers_4_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[585] + model_decoder_layers_4_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[586] + model_decoder_layers_4_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[587] + model_decoder_layers_4_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[588] + model_decoder_layers_4_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[589] + model_decoder_layers_4_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[590] + model_decoder_layers_4_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[591] + model_decoder_layers_4_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[592] + model_decoder_layers_4_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[593] + model_decoder_layers_4_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[597] + model_decoder_layers_4_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[598] + model_decoder_layers_4_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[599] + model_decoder_layers_4_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[600] + model_decoder_layers_4_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[601] + model_decoder_layers_4_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[602] + model_decoder_layers_4_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[603] + model_decoder_layers_4_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[604] + model_decoder_layers_4_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[605] + model_decoder_layers_4_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[606] + model_decoder_layers_4_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[607] + model_decoder_layers_4_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[608] + model_decoder_layers_5_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[609] + model_decoder_layers_5_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[610] + model_decoder_layers_5_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[611] + model_decoder_layers_5_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[612] + model_decoder_layers_5_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[613] + model_decoder_layers_5_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[614] + model_decoder_layers_5_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[615] + model_decoder_layers_5_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[616] + model_decoder_layers_5_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[617] + model_decoder_layers_5_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[621] + model_decoder_layers_5_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[622] + model_decoder_layers_5_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[623] + model_decoder_layers_5_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[624] + model_decoder_layers_5_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[625] + model_decoder_layers_5_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[626] + model_decoder_layers_5_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[627] + model_decoder_layers_5_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[628] + model_decoder_layers_5_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[629] + model_decoder_layers_5_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[630] + model_decoder_layers_5_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[631] + model_decoder_layers_5_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[632] + model_decoder_layers_6_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[633] + model_decoder_layers_6_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[634] + model_decoder_layers_6_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[635] + model_decoder_layers_6_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[636] + model_decoder_layers_6_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[637] + model_decoder_layers_6_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[638] + model_decoder_layers_6_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[639] + model_decoder_layers_6_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[640] + model_decoder_layers_6_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[641] + model_decoder_layers_6_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[645] + model_decoder_layers_6_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[646] + model_decoder_layers_6_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[647] + model_decoder_layers_6_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[648] + model_decoder_layers_6_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[649] + model_decoder_layers_6_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[650] + model_decoder_layers_6_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[651] + model_decoder_layers_6_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[652] + model_decoder_layers_6_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[653] + model_decoder_layers_6_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[654] + model_decoder_layers_6_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[655] + model_decoder_layers_6_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[656] + model_decoder_layers_7_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[657] + model_decoder_layers_7_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[658] + model_decoder_layers_7_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[659] + model_decoder_layers_7_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[660] + model_decoder_layers_7_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[661] + model_decoder_layers_7_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[662] + model_decoder_layers_7_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[663] + model_decoder_layers_7_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[664] + model_decoder_layers_7_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[665] + model_decoder_layers_7_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[669] + model_decoder_layers_7_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[670] + model_decoder_layers_7_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[671] + model_decoder_layers_7_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[672] + model_decoder_layers_7_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[673] + model_decoder_layers_7_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[674] + model_decoder_layers_7_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[675] + model_decoder_layers_7_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[676] + model_decoder_layers_7_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[677] + model_decoder_layers_7_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[678] + model_decoder_layers_7_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[679] + model_decoder_layers_7_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[680] + model_decoder_layers_8_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[681] + model_decoder_layers_8_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[682] + model_decoder_layers_8_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[683] + model_decoder_layers_8_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[684] + model_decoder_layers_8_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[685] + model_decoder_layers_8_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[686] + model_decoder_layers_8_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[687] + model_decoder_layers_8_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[688] + model_decoder_layers_8_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[689] + model_decoder_layers_8_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[693] + model_decoder_layers_8_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[694] + model_decoder_layers_8_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[695] + model_decoder_layers_8_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[696] + model_decoder_layers_8_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[697] + model_decoder_layers_8_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[698] + model_decoder_layers_8_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[699] + model_decoder_layers_8_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[700] + model_decoder_layers_8_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[701] + model_decoder_layers_8_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[702] + model_decoder_layers_8_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[703] + model_decoder_layers_8_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[704] + model_decoder_layers_9_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[705] + model_decoder_layers_9_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[706] + model_decoder_layers_9_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[707] + model_decoder_layers_9_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[708] + model_decoder_layers_9_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[709] + model_decoder_layers_9_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[710] + model_decoder_layers_9_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[711] + model_decoder_layers_9_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[712] + model_decoder_layers_9_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[713] + model_decoder_layers_9_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[717] + model_decoder_layers_9_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[718] + model_decoder_layers_9_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[719] + model_decoder_layers_9_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[720] + model_decoder_layers_9_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[721] + model_decoder_layers_9_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[722] + model_decoder_layers_9_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[723] + model_decoder_layers_9_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[724] + model_decoder_layers_9_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[725] + model_decoder_layers_9_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[726] + model_decoder_layers_9_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[727] + model_decoder_layers_9_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[728] + model_decoder_layers_10_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[729] + model_decoder_layers_10_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[730] + model_decoder_layers_10_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[731] + model_decoder_layers_10_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[732] + model_decoder_layers_10_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[733] + model_decoder_layers_10_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[734] + model_decoder_layers_10_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[735] + model_decoder_layers_10_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[736] + model_decoder_layers_10_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[737] + model_decoder_layers_10_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[741] + model_decoder_layers_10_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[742] + model_decoder_layers_10_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[743] + model_decoder_layers_10_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[744] + model_decoder_layers_10_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[745] + model_decoder_layers_10_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[746] + model_decoder_layers_10_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[747] + model_decoder_layers_10_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[748] + model_decoder_layers_10_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[749] + model_decoder_layers_10_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[750] + model_decoder_layers_10_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[751] + model_decoder_layers_10_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[752] + model_decoder_layers_11_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[753] + model_decoder_layers_11_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[754] + model_decoder_layers_11_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[755] + model_decoder_layers_11_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[756] + model_decoder_layers_11_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[757] + model_decoder_layers_11_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[758] + model_decoder_layers_11_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[759] + model_decoder_layers_11_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[760] + model_decoder_layers_11_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[761] + model_decoder_layers_11_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[765] + model_decoder_layers_11_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[766] + model_decoder_layers_11_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[767] + model_decoder_layers_11_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[768] + model_decoder_layers_11_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[769] + model_decoder_layers_11_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[770] + model_decoder_layers_11_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[771] + model_decoder_layers_11_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[772] + model_decoder_layers_11_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[773] + model_decoder_layers_11_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[774] + model_decoder_layers_11_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[775] + model_decoder_layers_11_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[776] + model_decoder_layers_12_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[777] + model_decoder_layers_12_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[778] + model_decoder_layers_12_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[779] + model_decoder_layers_12_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[780] + model_decoder_layers_12_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[781] + model_decoder_layers_12_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[782] + model_decoder_layers_12_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[783] + model_decoder_layers_12_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[784] + model_decoder_layers_12_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[785] + model_decoder_layers_12_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[789] + model_decoder_layers_12_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[790] + model_decoder_layers_12_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[791] + model_decoder_layers_12_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[792] + model_decoder_layers_12_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[793] + model_decoder_layers_12_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[794] + model_decoder_layers_12_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[795] + model_decoder_layers_12_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[796] + model_decoder_layers_12_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[797] + model_decoder_layers_12_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[798] + model_decoder_layers_12_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[799] + model_decoder_layers_12_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[800] + model_decoder_layers_13_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[801] + model_decoder_layers_13_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[802] + model_decoder_layers_13_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[803] + model_decoder_layers_13_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[804] + model_decoder_layers_13_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[805] + model_decoder_layers_13_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[806] + model_decoder_layers_13_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[807] + model_decoder_layers_13_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[808] + model_decoder_layers_13_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[809] + model_decoder_layers_13_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[813] + model_decoder_layers_13_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[814] + model_decoder_layers_13_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[815] + model_decoder_layers_13_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[816] + model_decoder_layers_13_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[817] + model_decoder_layers_13_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[818] + model_decoder_layers_13_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[819] + model_decoder_layers_13_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[820] + model_decoder_layers_13_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[821] + model_decoder_layers_13_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[822] + model_decoder_layers_13_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[823] + model_decoder_layers_13_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[824] + model_decoder_layers_14_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[825] + model_decoder_layers_14_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[826] + model_decoder_layers_14_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[827] + model_decoder_layers_14_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[828] + model_decoder_layers_14_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[829] + model_decoder_layers_14_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[830] + model_decoder_layers_14_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[831] + model_decoder_layers_14_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[832] + model_decoder_layers_14_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[833] + model_decoder_layers_14_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[837] + model_decoder_layers_14_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[838] + model_decoder_layers_14_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[839] + model_decoder_layers_14_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[840] + model_decoder_layers_14_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[841] + model_decoder_layers_14_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[842] + model_decoder_layers_14_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[843] + model_decoder_layers_14_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[844] + model_decoder_layers_14_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[845] + model_decoder_layers_14_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[846] + model_decoder_layers_14_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[847] + model_decoder_layers_14_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[848] + model_decoder_layers_15_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[849] + model_decoder_layers_15_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[850] + model_decoder_layers_15_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[851] + model_decoder_layers_15_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[852] + model_decoder_layers_15_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[853] + model_decoder_layers_15_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[854] + model_decoder_layers_15_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[855] + model_decoder_layers_15_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[856] + model_decoder_layers_15_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[857] + model_decoder_layers_15_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[861] + model_decoder_layers_15_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[862] + model_decoder_layers_15_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[863] + model_decoder_layers_15_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[864] + model_decoder_layers_15_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[865] + model_decoder_layers_15_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[866] + model_decoder_layers_15_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[867] + model_decoder_layers_15_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[868] + model_decoder_layers_15_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[869] + model_decoder_layers_15_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[870] + model_decoder_layers_15_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[871] + model_decoder_layers_15_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[872] + model_decoder_layers_16_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[873] + model_decoder_layers_16_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[874] + model_decoder_layers_16_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[875] + model_decoder_layers_16_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[876] + model_decoder_layers_16_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[877] + model_decoder_layers_16_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[878] + model_decoder_layers_16_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[879] + model_decoder_layers_16_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[880] + model_decoder_layers_16_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[881] + model_decoder_layers_16_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[885] + model_decoder_layers_16_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[886] + model_decoder_layers_16_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[887] + model_decoder_layers_16_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[888] + model_decoder_layers_16_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[889] + model_decoder_layers_16_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[890] + model_decoder_layers_16_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[891] + model_decoder_layers_16_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[892] + model_decoder_layers_16_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[893] + model_decoder_layers_16_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[894] + model_decoder_layers_16_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[895] + model_decoder_layers_16_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[896] + model_decoder_layers_17_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[897] + model_decoder_layers_17_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[898] + model_decoder_layers_17_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[899] + model_decoder_layers_17_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[900] + model_decoder_layers_17_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[901] + model_decoder_layers_17_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[902] + model_decoder_layers_17_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[903] + model_decoder_layers_17_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[904] + model_decoder_layers_17_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[905] + model_decoder_layers_17_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[909] + model_decoder_layers_17_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[910] + model_decoder_layers_17_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[911] + model_decoder_layers_17_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[912] + model_decoder_layers_17_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[913] + model_decoder_layers_17_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[914] + model_decoder_layers_17_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[915] + model_decoder_layers_17_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[916] + model_decoder_layers_17_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[917] + model_decoder_layers_17_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[918] + model_decoder_layers_17_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[919] + model_decoder_layers_17_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[920] + model_decoder_layers_18_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[921] + model_decoder_layers_18_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[922] + model_decoder_layers_18_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[923] + model_decoder_layers_18_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[924] + model_decoder_layers_18_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[925] + model_decoder_layers_18_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[926] + model_decoder_layers_18_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[927] + model_decoder_layers_18_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[928] + model_decoder_layers_18_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[929] + model_decoder_layers_18_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[933] + model_decoder_layers_18_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[934] + model_decoder_layers_18_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[935] + model_decoder_layers_18_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[936] + model_decoder_layers_18_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[937] + model_decoder_layers_18_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[938] + model_decoder_layers_18_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[939] + model_decoder_layers_18_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[940] + model_decoder_layers_18_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[941] + model_decoder_layers_18_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[942] + model_decoder_layers_18_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[943] + model_decoder_layers_18_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[944] + model_decoder_layers_19_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[945] + model_decoder_layers_19_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[946] + model_decoder_layers_19_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[947] + model_decoder_layers_19_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[948] + model_decoder_layers_19_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[949] + model_decoder_layers_19_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[950] + model_decoder_layers_19_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[951] + model_decoder_layers_19_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[952] + model_decoder_layers_19_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[953] + model_decoder_layers_19_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[957] + model_decoder_layers_19_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[958] + model_decoder_layers_19_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[959] + model_decoder_layers_19_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[960] + model_decoder_layers_19_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[961] + model_decoder_layers_19_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[962] + model_decoder_layers_19_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[963] + model_decoder_layers_19_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[964] + model_decoder_layers_19_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[965] + model_decoder_layers_19_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[966] + model_decoder_layers_19_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[967] + model_decoder_layers_19_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[968] + model_decoder_layers_20_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[969] + model_decoder_layers_20_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[970] + model_decoder_layers_20_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[971] + model_decoder_layers_20_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[972] + model_decoder_layers_20_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[973] + model_decoder_layers_20_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[974] + model_decoder_layers_20_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[975] + model_decoder_layers_20_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[976] + model_decoder_layers_20_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[977] + model_decoder_layers_20_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[981] + model_decoder_layers_20_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[982] + model_decoder_layers_20_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[983] + model_decoder_layers_20_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[984] + model_decoder_layers_20_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[985] + model_decoder_layers_20_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[986] + model_decoder_layers_20_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[987] + model_decoder_layers_20_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[988] + model_decoder_layers_20_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[989] + model_decoder_layers_20_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[990] + model_decoder_layers_20_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[991] + model_decoder_layers_20_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[992] + model_decoder_layers_21_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[993] + model_decoder_layers_21_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[994] + model_decoder_layers_21_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[995] + model_decoder_layers_21_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[996] + model_decoder_layers_21_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[997] + model_decoder_layers_21_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[998] + model_decoder_layers_21_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[999] + model_decoder_layers_21_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1000] + model_decoder_layers_21_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1001] + model_decoder_layers_21_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1005] + model_decoder_layers_21_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1006] + model_decoder_layers_21_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1007] + model_decoder_layers_21_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1008] + model_decoder_layers_21_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1009] + model_decoder_layers_21_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1010] + model_decoder_layers_21_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1011] + model_decoder_layers_21_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1012] + model_decoder_layers_21_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1013] + model_decoder_layers_21_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1014] + model_decoder_layers_21_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1015] + model_decoder_layers_21_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1016] + model_decoder_layers_22_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1017] + model_decoder_layers_22_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1018] + model_decoder_layers_22_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1019] + model_decoder_layers_22_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1020] + model_decoder_layers_22_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1021] + model_decoder_layers_22_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1022] + model_decoder_layers_22_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1023] + model_decoder_layers_22_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1024] + model_decoder_layers_22_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1025] + model_decoder_layers_22_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1029] + model_decoder_layers_22_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1030] + model_decoder_layers_22_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1031] + model_decoder_layers_22_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1032] + model_decoder_layers_22_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1033] + model_decoder_layers_22_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1034] + model_decoder_layers_22_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1035] + model_decoder_layers_22_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1036] + model_decoder_layers_22_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1037] + model_decoder_layers_22_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1038] + model_decoder_layers_22_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1039] + model_decoder_layers_22_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1040] + model_decoder_layers_23_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1041] + model_decoder_layers_23_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1042] + model_decoder_layers_23_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1043] + model_decoder_layers_23_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1044] + model_decoder_layers_23_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1045] + model_decoder_layers_23_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1046] + model_decoder_layers_23_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1047] + model_decoder_layers_23_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1048] + model_decoder_layers_23_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1049] + model_decoder_layers_23_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1053] + model_decoder_layers_23_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1054] + model_decoder_layers_23_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1055] + model_decoder_layers_23_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1056] + model_decoder_layers_23_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1057] + model_decoder_layers_23_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1058] + model_decoder_layers_23_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1059] + model_decoder_layers_23_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1060] + model_decoder_layers_23_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1061] + model_decoder_layers_23_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1062] + model_decoder_layers_23_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1063] + model_decoder_layers_23_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1064] + model_decoder_layers_24_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1065] + model_decoder_layers_24_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1066] + model_decoder_layers_24_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1067] + model_decoder_layers_24_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1068] + model_decoder_layers_24_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1069] + model_decoder_layers_24_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1070] + model_decoder_layers_24_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1071] + model_decoder_layers_24_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1072] + model_decoder_layers_24_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1073] + model_decoder_layers_24_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1077] + model_decoder_layers_24_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1078] + model_decoder_layers_24_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1079] + model_decoder_layers_24_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1080] + model_decoder_layers_24_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1081] + model_decoder_layers_24_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1082] + model_decoder_layers_24_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1083] + model_decoder_layers_24_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1084] + model_decoder_layers_24_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1085] + model_decoder_layers_24_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1086] + model_decoder_layers_24_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1087] + model_decoder_layers_24_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1088] + model_decoder_layers_25_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1089] + model_decoder_layers_25_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1090] + model_decoder_layers_25_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1091] + model_decoder_layers_25_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1092] + model_decoder_layers_25_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1093] + model_decoder_layers_25_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1094] + model_decoder_layers_25_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1095] + model_decoder_layers_25_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1096] + model_decoder_layers_25_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1097] + model_decoder_layers_25_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1101] + model_decoder_layers_25_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1102] + model_decoder_layers_25_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1103] + model_decoder_layers_25_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1104] + model_decoder_layers_25_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1105] + model_decoder_layers_25_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1106] + model_decoder_layers_25_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1107] + model_decoder_layers_25_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1108] + model_decoder_layers_25_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1109] + model_decoder_layers_25_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1110] + model_decoder_layers_25_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1111] + model_decoder_layers_25_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1112] + model_decoder_layers_26_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1113] + model_decoder_layers_26_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1114] + model_decoder_layers_26_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1115] + model_decoder_layers_26_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1116] + model_decoder_layers_26_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1117] + model_decoder_layers_26_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1118] + model_decoder_layers_26_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1119] + model_decoder_layers_26_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1120] + model_decoder_layers_26_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1121] + model_decoder_layers_26_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1125] + model_decoder_layers_26_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1126] + model_decoder_layers_26_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1127] + model_decoder_layers_26_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1128] + model_decoder_layers_26_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1129] + model_decoder_layers_26_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1130] + model_decoder_layers_26_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1131] + model_decoder_layers_26_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1132] + model_decoder_layers_26_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1133] + model_decoder_layers_26_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1134] + model_decoder_layers_26_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1135] + model_decoder_layers_26_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1136] + model_decoder_layers_27_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1137] + model_decoder_layers_27_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1138] + model_decoder_layers_27_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1139] + model_decoder_layers_27_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1140] + model_decoder_layers_27_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1141] + model_decoder_layers_27_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1142] + model_decoder_layers_27_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1143] + model_decoder_layers_27_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1144] + model_decoder_layers_27_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1145] + model_decoder_layers_27_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1149] + model_decoder_layers_27_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1150] + model_decoder_layers_27_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1151] + model_decoder_layers_27_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1152] + model_decoder_layers_27_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1153] + model_decoder_layers_27_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1154] + model_decoder_layers_27_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1155] + model_decoder_layers_27_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1156] + model_decoder_layers_27_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1157] + model_decoder_layers_27_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1158] + model_decoder_layers_27_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1159] + model_decoder_layers_27_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1160] + model_decoder_layers_28_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1161] + model_decoder_layers_28_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1162] + model_decoder_layers_28_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1163] + model_decoder_layers_28_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1164] + model_decoder_layers_28_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1165] + model_decoder_layers_28_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1166] + model_decoder_layers_28_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1167] + model_decoder_layers_28_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1168] + model_decoder_layers_28_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1169] + model_decoder_layers_28_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1173] + model_decoder_layers_28_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1174] + model_decoder_layers_28_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1175] + model_decoder_layers_28_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1176] + model_decoder_layers_28_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1177] + model_decoder_layers_28_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1178] + model_decoder_layers_28_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1179] + model_decoder_layers_28_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1180] + model_decoder_layers_28_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1181] + model_decoder_layers_28_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1182] + model_decoder_layers_28_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1183] + model_decoder_layers_28_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1184] + model_decoder_layers_29_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1185] + model_decoder_layers_29_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1186] + model_decoder_layers_29_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1187] + model_decoder_layers_29_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1188] + model_decoder_layers_29_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1189] + model_decoder_layers_29_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1190] + model_decoder_layers_29_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1191] + model_decoder_layers_29_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1192] + model_decoder_layers_29_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1193] + model_decoder_layers_29_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1197] + model_decoder_layers_29_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1198] + model_decoder_layers_29_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1199] + model_decoder_layers_29_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1200] + model_decoder_layers_29_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1201] + model_decoder_layers_29_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1202] + model_decoder_layers_29_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1203] + model_decoder_layers_29_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1204] + model_decoder_layers_29_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1205] + model_decoder_layers_29_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1206] + model_decoder_layers_29_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1207] + model_decoder_layers_29_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1208] + model_decoder_layers_30_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1209] + model_decoder_layers_30_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1210] + model_decoder_layers_30_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1211] + model_decoder_layers_30_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1212] + model_decoder_layers_30_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1213] + model_decoder_layers_30_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1214] + model_decoder_layers_30_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1215] + model_decoder_layers_30_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1216] + model_decoder_layers_30_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1217] + model_decoder_layers_30_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1221] + model_decoder_layers_30_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1222] + model_decoder_layers_30_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1223] + model_decoder_layers_30_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1224] + model_decoder_layers_30_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1225] + model_decoder_layers_30_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1226] + model_decoder_layers_30_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1227] + model_decoder_layers_30_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1228] + model_decoder_layers_30_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1229] + model_decoder_layers_30_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1230] + model_decoder_layers_30_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1231] + model_decoder_layers_30_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1232] + model_decoder_layers_31_self_attn_k_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1233] + model_decoder_layers_31_self_attn_v_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1234] + model_decoder_layers_31_self_attn_v_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1235] + model_decoder_layers_31_self_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1236] + model_decoder_layers_31_self_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1237] + model_decoder_layers_31_self_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1238] + model_decoder_layers_31_self_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1239] + model_decoder_layers_31_self_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1240] + model_decoder_layers_31_self_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1241] + model_decoder_layers_31_encoder_attn_q_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1245] + model_decoder_layers_31_encoder_attn_q_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1246] + model_decoder_layers_31_encoder_attn_out_proj_weight2: R.Tensor((1280, 1280), dtype="float16") = packed_params[1247] + model_decoder_layers_31_encoder_attn_out_proj_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1248] + model_decoder_layers_31_encoder_attn_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1249] + model_decoder_layers_31_encoder_attn_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1250] + model_decoder_layers_31_fc1_weight2: R.Tensor((5120, 1280), dtype="float16") = packed_params[1251] + model_decoder_layers_31_fc1_bias2: R.Tensor((5120,), dtype="float16") = packed_params[1252] + model_decoder_layers_31_fc2_weight2: R.Tensor((1280, 5120), dtype="float16") = packed_params[1253] + model_decoder_layers_31_fc2_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1254] + model_decoder_layers_31_final_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1255] + model_decoder_layers_31_final_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1256] + model_decoder_layer_norm_weight2: R.Tensor((1280,), dtype="float16") = packed_params[1257] + model_decoder_layer_norm_bias2: R.Tensor((1280,), dtype="float16") = packed_params[1258] + reshape384: R.Tensor((seq_len,), dtype="int32") = R.reshape(input_ids, R.shape([seq_len])) + take: R.Tensor((seq_len, 1280), dtype="float16") = R.take(model_decoder_embed_tokens_weight2, reshape384, axis=0) + reshape385: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(take, R.shape([1, seq_len, 1280])) + lv68: R.Tensor((seq_len,), dtype="int32") = R.call_pure_packed("vm.builtin.attention_kv_cache_get_query_positions", paged_kv_cache, sinfo_args=(R.Tensor((seq_len,), dtype="int32"),)) + take1: R.Tensor((seq_len, 1280), dtype="float16") = R.take(model_decoder_embed_positions_weight2, lv68, axis=0) + reshape386: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(take1, R.shape([1, seq_len, 1280])) + add257: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(reshape385, reshape386) + layer_norm65: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add257, model_decoder_layers_0_self_attn_layer_norm_weight2, model_decoder_layers_0_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv416 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_0_self_attn_q_proj_weight2, layer_norm65, model_decoder_layers_0_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape387: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv416, R.shape([1, seq_len, 20, 64])) + lv98 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_0_self_attn_k_proj_weight2, layer_norm65), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape388: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv98, R.shape([1, seq_len, 20, 64])) + lv417 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_0_self_attn_v_proj_weight2, layer_norm65, model_decoder_layers_0_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape389: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv417, R.shape([1, seq_len, 20, 64])) + concat: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape387, reshape388, reshape389), axis=2) + reshape390: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat, R.shape([seq_len, 60, 64])) + lv69 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape390), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape391: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv69, R.shape([1, seq_len, 20, 64])) + reshape392: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape391, R.shape([1, seq_len, 1280])) + lv418 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_0_self_attn_out_proj_weight2, reshape392, model_decoder_layers_0_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add261: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add257, lv418) + layer_norm66: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add261, model_decoder_layers_0_encoder_attn_layer_norm_weight2, model_decoder_layers_0_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv419 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_0_encoder_attn_q_proj_weight2, layer_norm66, model_decoder_layers_0_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape393: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv419, R.shape([1, seq_len, 20, 64])) + reshape394: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape393, R.shape([seq_len, 20, 64])) + lv70 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape394), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape395: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv70, R.shape([1, seq_len, 20, 64])) + reshape396: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape395, R.shape([1, seq_len, 1280])) + lv420 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_0_encoder_attn_out_proj_weight2, reshape396, model_decoder_layers_0_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add264: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add261, lv420) + layer_norm67: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add264, model_decoder_layers_0_final_layer_norm_weight2, model_decoder_layers_0_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv64 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_0_fc1_weight2, layer_norm67, model_decoder_layers_0_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv421 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_0_fc2_weight2, lv64, model_decoder_layers_0_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add267: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add264, lv421) + layer_norm68: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add267, model_decoder_layers_1_self_attn_layer_norm_weight2, model_decoder_layers_1_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv422 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_1_self_attn_q_proj_weight2, layer_norm68, model_decoder_layers_1_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape397: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv422, R.shape([1, seq_len, 20, 64])) + lv99 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_1_self_attn_k_proj_weight2, layer_norm68), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape398: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv99, R.shape([1, seq_len, 20, 64])) + lv423 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_1_self_attn_v_proj_weight2, layer_norm68, model_decoder_layers_1_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape399: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv423, R.shape([1, seq_len, 20, 64])) + concat1: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape397, reshape398, reshape399), axis=2) + reshape400: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat1, R.shape([seq_len, 60, 64])) + lv71 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape400), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape401: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv71, R.shape([1, seq_len, 20, 64])) + reshape402: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape401, R.shape([1, seq_len, 1280])) + lv424 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_1_self_attn_out_proj_weight2, reshape402, model_decoder_layers_1_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add271: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add267, lv424) + layer_norm69: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add271, model_decoder_layers_1_encoder_attn_layer_norm_weight2, model_decoder_layers_1_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv425 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_1_encoder_attn_q_proj_weight2, layer_norm69, model_decoder_layers_1_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape403: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv425, R.shape([1, seq_len, 20, 64])) + reshape404: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape403, R.shape([seq_len, 20, 64])) + lv72 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape404), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape405: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv72, R.shape([1, seq_len, 20, 64])) + reshape406: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape405, R.shape([1, seq_len, 1280])) + lv426 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_1_encoder_attn_out_proj_weight2, reshape406, model_decoder_layers_1_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add274: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add271, lv426) + layer_norm70: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add274, model_decoder_layers_1_final_layer_norm_weight2, model_decoder_layers_1_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv65 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_1_fc1_weight2, layer_norm70, model_decoder_layers_1_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv427 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_1_fc2_weight2, lv65, model_decoder_layers_1_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add277: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add274, lv427) + layer_norm71: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add277, model_decoder_layers_2_self_attn_layer_norm_weight2, model_decoder_layers_2_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv428 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_2_self_attn_q_proj_weight2, layer_norm71, model_decoder_layers_2_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape407: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv428, R.shape([1, seq_len, 20, 64])) + lv100 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_2_self_attn_k_proj_weight2, layer_norm71), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape408: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv100, R.shape([1, seq_len, 20, 64])) + lv429 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_2_self_attn_v_proj_weight2, layer_norm71, model_decoder_layers_2_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape409: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv429, R.shape([1, seq_len, 20, 64])) + concat2: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape407, reshape408, reshape409), axis=2) + reshape410: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat2, R.shape([seq_len, 60, 64])) + lv73 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape410), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape411: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv73, R.shape([1, seq_len, 20, 64])) + reshape412: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape411, R.shape([1, seq_len, 1280])) + lv430 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_2_self_attn_out_proj_weight2, reshape412, model_decoder_layers_2_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add281: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add277, lv430) + layer_norm72: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add281, model_decoder_layers_2_encoder_attn_layer_norm_weight2, model_decoder_layers_2_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv431 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_2_encoder_attn_q_proj_weight2, layer_norm72, model_decoder_layers_2_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape413: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv431, R.shape([1, seq_len, 20, 64])) + reshape414: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape413, R.shape([seq_len, 20, 64])) + lv74 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape414), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape415: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv74, R.shape([1, seq_len, 20, 64])) + reshape416: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape415, R.shape([1, seq_len, 1280])) + lv432 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_2_encoder_attn_out_proj_weight2, reshape416, model_decoder_layers_2_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add284: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add281, lv432) + layer_norm73: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add284, model_decoder_layers_2_final_layer_norm_weight2, model_decoder_layers_2_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv66 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_2_fc1_weight2, layer_norm73, model_decoder_layers_2_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv433 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_2_fc2_weight2, lv66, model_decoder_layers_2_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add287: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add284, lv433) + layer_norm74: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add287, model_decoder_layers_3_self_attn_layer_norm_weight2, model_decoder_layers_3_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv434 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_3_self_attn_q_proj_weight2, layer_norm74, model_decoder_layers_3_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape417: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv434, R.shape([1, seq_len, 20, 64])) + lv101 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_3_self_attn_k_proj_weight2, layer_norm74), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape418: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv101, R.shape([1, seq_len, 20, 64])) + lv435 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_3_self_attn_v_proj_weight2, layer_norm74, model_decoder_layers_3_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape419: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv435, R.shape([1, seq_len, 20, 64])) + concat3: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape417, reshape418, reshape419), axis=2) + reshape420: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat3, R.shape([seq_len, 60, 64])) + lv75 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape420), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape421: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv75, R.shape([1, seq_len, 20, 64])) + reshape422: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape421, R.shape([1, seq_len, 1280])) + lv436 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_3_self_attn_out_proj_weight2, reshape422, model_decoder_layers_3_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add291: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add287, lv436) + layer_norm75: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add291, model_decoder_layers_3_encoder_attn_layer_norm_weight2, model_decoder_layers_3_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv437 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_3_encoder_attn_q_proj_weight2, layer_norm75, model_decoder_layers_3_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape423: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv437, R.shape([1, seq_len, 20, 64])) + reshape424: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape423, R.shape([seq_len, 20, 64])) + lv76 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape424), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape425: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv76, R.shape([1, seq_len, 20, 64])) + reshape426: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape425, R.shape([1, seq_len, 1280])) + lv438 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_3_encoder_attn_out_proj_weight2, reshape426, model_decoder_layers_3_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add294: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add291, lv438) + layer_norm76: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add294, model_decoder_layers_3_final_layer_norm_weight2, model_decoder_layers_3_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv67 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_3_fc1_weight2, layer_norm76, model_decoder_layers_3_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv439 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_3_fc2_weight2, lv67, model_decoder_layers_3_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add297: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add294, lv439) + layer_norm77: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add297, model_decoder_layers_4_self_attn_layer_norm_weight2, model_decoder_layers_4_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv440 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_4_self_attn_q_proj_weight2, layer_norm77, model_decoder_layers_4_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape427: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv440, R.shape([1, seq_len, 20, 64])) + lv102 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_4_self_attn_k_proj_weight2, layer_norm77), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape428: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv102, R.shape([1, seq_len, 20, 64])) + lv441 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_4_self_attn_v_proj_weight2, layer_norm77, model_decoder_layers_4_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape429: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv441, R.shape([1, seq_len, 20, 64])) + concat4: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape427, reshape428, reshape429), axis=2) + reshape430: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat4, R.shape([seq_len, 60, 64])) + lv77 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape430), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape431: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv77, R.shape([1, seq_len, 20, 64])) + reshape432: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape431, R.shape([1, seq_len, 1280])) + lv442 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_4_self_attn_out_proj_weight2, reshape432, model_decoder_layers_4_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add301: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add297, lv442) + layer_norm78: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add301, model_decoder_layers_4_encoder_attn_layer_norm_weight2, model_decoder_layers_4_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv443 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_4_encoder_attn_q_proj_weight2, layer_norm78, model_decoder_layers_4_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape433: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv443, R.shape([1, seq_len, 20, 64])) + reshape434: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape433, R.shape([seq_len, 20, 64])) + lv78 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape434), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape435: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv78, R.shape([1, seq_len, 20, 64])) + reshape436: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape435, R.shape([1, seq_len, 1280])) + lv444 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_4_encoder_attn_out_proj_weight2, reshape436, model_decoder_layers_4_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add304: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add301, lv444) + layer_norm79: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add304, model_decoder_layers_4_final_layer_norm_weight2, model_decoder_layers_4_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv68_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_4_fc1_weight2, layer_norm79, model_decoder_layers_4_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv445 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_4_fc2_weight2, lv68_1, model_decoder_layers_4_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add307: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add304, lv445) + layer_norm80: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add307, model_decoder_layers_5_self_attn_layer_norm_weight2, model_decoder_layers_5_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv446 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_5_self_attn_q_proj_weight2, layer_norm80, model_decoder_layers_5_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape437: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv446, R.shape([1, seq_len, 20, 64])) + lv103 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_5_self_attn_k_proj_weight2, layer_norm80), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape438: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv103, R.shape([1, seq_len, 20, 64])) + lv447 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_5_self_attn_v_proj_weight2, layer_norm80, model_decoder_layers_5_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape439: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv447, R.shape([1, seq_len, 20, 64])) + concat5: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape437, reshape438, reshape439), axis=2) + reshape440: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat5, R.shape([seq_len, 60, 64])) + lv79 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape440), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape441: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv79, R.shape([1, seq_len, 20, 64])) + reshape442: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape441, R.shape([1, seq_len, 1280])) + lv448 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_5_self_attn_out_proj_weight2, reshape442, model_decoder_layers_5_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add311: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add307, lv448) + layer_norm81: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add311, model_decoder_layers_5_encoder_attn_layer_norm_weight2, model_decoder_layers_5_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv449 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_5_encoder_attn_q_proj_weight2, layer_norm81, model_decoder_layers_5_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape443: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv449, R.shape([1, seq_len, 20, 64])) + reshape444: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape443, R.shape([seq_len, 20, 64])) + lv80 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape444), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape445: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv80, R.shape([1, seq_len, 20, 64])) + reshape446: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape445, R.shape([1, seq_len, 1280])) + lv450 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_5_encoder_attn_out_proj_weight2, reshape446, model_decoder_layers_5_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add314: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add311, lv450) + layer_norm82: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add314, model_decoder_layers_5_final_layer_norm_weight2, model_decoder_layers_5_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv69_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_5_fc1_weight2, layer_norm82, model_decoder_layers_5_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv451 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_5_fc2_weight2, lv69_1, model_decoder_layers_5_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add317: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add314, lv451) + layer_norm83: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add317, model_decoder_layers_6_self_attn_layer_norm_weight2, model_decoder_layers_6_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv452 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_6_self_attn_q_proj_weight2, layer_norm83, model_decoder_layers_6_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape447: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv452, R.shape([1, seq_len, 20, 64])) + lv104 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_6_self_attn_k_proj_weight2, layer_norm83), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape448: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv104, R.shape([1, seq_len, 20, 64])) + lv453 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_6_self_attn_v_proj_weight2, layer_norm83, model_decoder_layers_6_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape449: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv453, R.shape([1, seq_len, 20, 64])) + concat6: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape447, reshape448, reshape449), axis=2) + reshape450: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat6, R.shape([seq_len, 60, 64])) + lv81 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape450), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape451: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv81, R.shape([1, seq_len, 20, 64])) + reshape452: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape451, R.shape([1, seq_len, 1280])) + lv454 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_6_self_attn_out_proj_weight2, reshape452, model_decoder_layers_6_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add321: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add317, lv454) + layer_norm84: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add321, model_decoder_layers_6_encoder_attn_layer_norm_weight2, model_decoder_layers_6_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv455 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_6_encoder_attn_q_proj_weight2, layer_norm84, model_decoder_layers_6_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape453: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv455, R.shape([1, seq_len, 20, 64])) + reshape454: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape453, R.shape([seq_len, 20, 64])) + lv82 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape454), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape455: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv82, R.shape([1, seq_len, 20, 64])) + reshape456: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape455, R.shape([1, seq_len, 1280])) + lv456 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_6_encoder_attn_out_proj_weight2, reshape456, model_decoder_layers_6_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add324: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add321, lv456) + layer_norm85: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add324, model_decoder_layers_6_final_layer_norm_weight2, model_decoder_layers_6_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv70_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_6_fc1_weight2, layer_norm85, model_decoder_layers_6_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv457 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_6_fc2_weight2, lv70_1, model_decoder_layers_6_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add327: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add324, lv457) + layer_norm86: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add327, model_decoder_layers_7_self_attn_layer_norm_weight2, model_decoder_layers_7_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv458 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_7_self_attn_q_proj_weight2, layer_norm86, model_decoder_layers_7_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape457: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv458, R.shape([1, seq_len, 20, 64])) + lv105 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_7_self_attn_k_proj_weight2, layer_norm86), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape458: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv105, R.shape([1, seq_len, 20, 64])) + lv459 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_7_self_attn_v_proj_weight2, layer_norm86, model_decoder_layers_7_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape459: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv459, R.shape([1, seq_len, 20, 64])) + concat7: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape457, reshape458, reshape459), axis=2) + reshape460: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat7, R.shape([seq_len, 60, 64])) + lv83 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape460), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape461: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv83, R.shape([1, seq_len, 20, 64])) + reshape462: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape461, R.shape([1, seq_len, 1280])) + lv460 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_7_self_attn_out_proj_weight2, reshape462, model_decoder_layers_7_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add331: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add327, lv460) + layer_norm87: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add331, model_decoder_layers_7_encoder_attn_layer_norm_weight2, model_decoder_layers_7_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv461 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_7_encoder_attn_q_proj_weight2, layer_norm87, model_decoder_layers_7_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape463: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv461, R.shape([1, seq_len, 20, 64])) + reshape464: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape463, R.shape([seq_len, 20, 64])) + lv84 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape464), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape465: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv84, R.shape([1, seq_len, 20, 64])) + reshape466: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape465, R.shape([1, seq_len, 1280])) + lv462 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_7_encoder_attn_out_proj_weight2, reshape466, model_decoder_layers_7_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add334: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add331, lv462) + layer_norm88: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add334, model_decoder_layers_7_final_layer_norm_weight2, model_decoder_layers_7_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv71_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_7_fc1_weight2, layer_norm88, model_decoder_layers_7_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv463 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_7_fc2_weight2, lv71_1, model_decoder_layers_7_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add337: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add334, lv463) + layer_norm89: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add337, model_decoder_layers_8_self_attn_layer_norm_weight2, model_decoder_layers_8_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv464 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_8_self_attn_q_proj_weight2, layer_norm89, model_decoder_layers_8_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape467: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv464, R.shape([1, seq_len, 20, 64])) + lv106 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_8_self_attn_k_proj_weight2, layer_norm89), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape468: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv106, R.shape([1, seq_len, 20, 64])) + lv465 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_8_self_attn_v_proj_weight2, layer_norm89, model_decoder_layers_8_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape469: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv465, R.shape([1, seq_len, 20, 64])) + concat8: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape467, reshape468, reshape469), axis=2) + reshape470: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat8, R.shape([seq_len, 60, 64])) + lv85 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape470), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape471: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv85, R.shape([1, seq_len, 20, 64])) + reshape472: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape471, R.shape([1, seq_len, 1280])) + lv466 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_8_self_attn_out_proj_weight2, reshape472, model_decoder_layers_8_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add341: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add337, lv466) + layer_norm90: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add341, model_decoder_layers_8_encoder_attn_layer_norm_weight2, model_decoder_layers_8_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv467 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_8_encoder_attn_q_proj_weight2, layer_norm90, model_decoder_layers_8_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape473: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv467, R.shape([1, seq_len, 20, 64])) + reshape474: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape473, R.shape([seq_len, 20, 64])) + lv86 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape474), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape475: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv86, R.shape([1, seq_len, 20, 64])) + reshape476: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape475, R.shape([1, seq_len, 1280])) + lv468 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_8_encoder_attn_out_proj_weight2, reshape476, model_decoder_layers_8_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add344: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add341, lv468) + layer_norm91: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add344, model_decoder_layers_8_final_layer_norm_weight2, model_decoder_layers_8_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv72_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_8_fc1_weight2, layer_norm91, model_decoder_layers_8_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv469 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_8_fc2_weight2, lv72_1, model_decoder_layers_8_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add347: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add344, lv469) + layer_norm92: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add347, model_decoder_layers_9_self_attn_layer_norm_weight2, model_decoder_layers_9_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv470 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_9_self_attn_q_proj_weight2, layer_norm92, model_decoder_layers_9_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape477: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv470, R.shape([1, seq_len, 20, 64])) + lv107 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_9_self_attn_k_proj_weight2, layer_norm92), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape478: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv107, R.shape([1, seq_len, 20, 64])) + lv471 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_9_self_attn_v_proj_weight2, layer_norm92, model_decoder_layers_9_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape479: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv471, R.shape([1, seq_len, 20, 64])) + concat9: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape477, reshape478, reshape479), axis=2) + reshape480: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat9, R.shape([seq_len, 60, 64])) + lv87 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape480), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape481: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv87, R.shape([1, seq_len, 20, 64])) + reshape482: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape481, R.shape([1, seq_len, 1280])) + lv472 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_9_self_attn_out_proj_weight2, reshape482, model_decoder_layers_9_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add351: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add347, lv472) + layer_norm93: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add351, model_decoder_layers_9_encoder_attn_layer_norm_weight2, model_decoder_layers_9_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv473 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_9_encoder_attn_q_proj_weight2, layer_norm93, model_decoder_layers_9_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape483: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv473, R.shape([1, seq_len, 20, 64])) + reshape484: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape483, R.shape([seq_len, 20, 64])) + lv88 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape484), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape485: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv88, R.shape([1, seq_len, 20, 64])) + reshape486: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape485, R.shape([1, seq_len, 1280])) + lv474 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_9_encoder_attn_out_proj_weight2, reshape486, model_decoder_layers_9_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add354: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add351, lv474) + layer_norm94: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add354, model_decoder_layers_9_final_layer_norm_weight2, model_decoder_layers_9_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv73_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_9_fc1_weight2, layer_norm94, model_decoder_layers_9_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv475 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_9_fc2_weight2, lv73_1, model_decoder_layers_9_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add357: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add354, lv475) + layer_norm95: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add357, model_decoder_layers_10_self_attn_layer_norm_weight2, model_decoder_layers_10_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv476 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_10_self_attn_q_proj_weight2, layer_norm95, model_decoder_layers_10_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape487: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv476, R.shape([1, seq_len, 20, 64])) + lv108 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_10_self_attn_k_proj_weight2, layer_norm95), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape488: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv108, R.shape([1, seq_len, 20, 64])) + lv477 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_10_self_attn_v_proj_weight2, layer_norm95, model_decoder_layers_10_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape489: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv477, R.shape([1, seq_len, 20, 64])) + concat10: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape487, reshape488, reshape489), axis=2) + reshape490: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat10, R.shape([seq_len, 60, 64])) + lv89 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape490), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape491: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv89, R.shape([1, seq_len, 20, 64])) + reshape492: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape491, R.shape([1, seq_len, 1280])) + lv478 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_10_self_attn_out_proj_weight2, reshape492, model_decoder_layers_10_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add361: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add357, lv478) + layer_norm96: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add361, model_decoder_layers_10_encoder_attn_layer_norm_weight2, model_decoder_layers_10_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv479 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_10_encoder_attn_q_proj_weight2, layer_norm96, model_decoder_layers_10_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape493: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv479, R.shape([1, seq_len, 20, 64])) + reshape494: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape493, R.shape([seq_len, 20, 64])) + lv90 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape494), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape495: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv90, R.shape([1, seq_len, 20, 64])) + reshape496: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape495, R.shape([1, seq_len, 1280])) + lv480 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_10_encoder_attn_out_proj_weight2, reshape496, model_decoder_layers_10_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add364: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add361, lv480) + layer_norm97: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add364, model_decoder_layers_10_final_layer_norm_weight2, model_decoder_layers_10_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv74_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_10_fc1_weight2, layer_norm97, model_decoder_layers_10_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv481 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_10_fc2_weight2, lv74_1, model_decoder_layers_10_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add367: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add364, lv481) + layer_norm98: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add367, model_decoder_layers_11_self_attn_layer_norm_weight2, model_decoder_layers_11_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv482 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_11_self_attn_q_proj_weight2, layer_norm98, model_decoder_layers_11_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape497: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv482, R.shape([1, seq_len, 20, 64])) + lv109 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_11_self_attn_k_proj_weight2, layer_norm98), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape498: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv109, R.shape([1, seq_len, 20, 64])) + lv483 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_11_self_attn_v_proj_weight2, layer_norm98, model_decoder_layers_11_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape499: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv483, R.shape([1, seq_len, 20, 64])) + concat11: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape497, reshape498, reshape499), axis=2) + reshape500: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat11, R.shape([seq_len, 60, 64])) + lv91 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape500), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape501: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv91, R.shape([1, seq_len, 20, 64])) + reshape502: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape501, R.shape([1, seq_len, 1280])) + lv484 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_11_self_attn_out_proj_weight2, reshape502, model_decoder_layers_11_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add371: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add367, lv484) + layer_norm99: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add371, model_decoder_layers_11_encoder_attn_layer_norm_weight2, model_decoder_layers_11_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv485 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_11_encoder_attn_q_proj_weight2, layer_norm99, model_decoder_layers_11_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape503: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv485, R.shape([1, seq_len, 20, 64])) + reshape504: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape503, R.shape([seq_len, 20, 64])) + lv92 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape504), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape505: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv92, R.shape([1, seq_len, 20, 64])) + reshape506: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape505, R.shape([1, seq_len, 1280])) + lv486 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_11_encoder_attn_out_proj_weight2, reshape506, model_decoder_layers_11_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add374: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add371, lv486) + layer_norm100: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add374, model_decoder_layers_11_final_layer_norm_weight2, model_decoder_layers_11_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv75_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_11_fc1_weight2, layer_norm100, model_decoder_layers_11_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv487 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_11_fc2_weight2, lv75_1, model_decoder_layers_11_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add377: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add374, lv487) + layer_norm101: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add377, model_decoder_layers_12_self_attn_layer_norm_weight2, model_decoder_layers_12_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv488 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_12_self_attn_q_proj_weight2, layer_norm101, model_decoder_layers_12_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape507: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv488, R.shape([1, seq_len, 20, 64])) + lv110 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_12_self_attn_k_proj_weight2, layer_norm101), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape508: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv110, R.shape([1, seq_len, 20, 64])) + lv489 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_12_self_attn_v_proj_weight2, layer_norm101, model_decoder_layers_12_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape509: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv489, R.shape([1, seq_len, 20, 64])) + concat12: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape507, reshape508, reshape509), axis=2) + reshape510: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat12, R.shape([seq_len, 60, 64])) + lv93 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape510), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape511: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv93, R.shape([1, seq_len, 20, 64])) + reshape512: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape511, R.shape([1, seq_len, 1280])) + lv490 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_12_self_attn_out_proj_weight2, reshape512, model_decoder_layers_12_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add381: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add377, lv490) + layer_norm102: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add381, model_decoder_layers_12_encoder_attn_layer_norm_weight2, model_decoder_layers_12_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv491 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_12_encoder_attn_q_proj_weight2, layer_norm102, model_decoder_layers_12_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape513: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv491, R.shape([1, seq_len, 20, 64])) + reshape514: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape513, R.shape([seq_len, 20, 64])) + lv94 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape514), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape515: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv94, R.shape([1, seq_len, 20, 64])) + reshape516: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape515, R.shape([1, seq_len, 1280])) + lv492 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_12_encoder_attn_out_proj_weight2, reshape516, model_decoder_layers_12_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add384: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add381, lv492) + layer_norm103: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add384, model_decoder_layers_12_final_layer_norm_weight2, model_decoder_layers_12_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv76_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_12_fc1_weight2, layer_norm103, model_decoder_layers_12_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv493 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_12_fc2_weight2, lv76_1, model_decoder_layers_12_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add387: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add384, lv493) + layer_norm104: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add387, model_decoder_layers_13_self_attn_layer_norm_weight2, model_decoder_layers_13_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv494 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_13_self_attn_q_proj_weight2, layer_norm104, model_decoder_layers_13_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape517: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv494, R.shape([1, seq_len, 20, 64])) + lv111 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_13_self_attn_k_proj_weight2, layer_norm104), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape518: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv111, R.shape([1, seq_len, 20, 64])) + lv495 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_13_self_attn_v_proj_weight2, layer_norm104, model_decoder_layers_13_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape519: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv495, R.shape([1, seq_len, 20, 64])) + concat13: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape517, reshape518, reshape519), axis=2) + reshape520: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat13, R.shape([seq_len, 60, 64])) + lv95 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape520), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape521: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv95, R.shape([1, seq_len, 20, 64])) + reshape522: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape521, R.shape([1, seq_len, 1280])) + lv496 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_13_self_attn_out_proj_weight2, reshape522, model_decoder_layers_13_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add391: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add387, lv496) + layer_norm105: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add391, model_decoder_layers_13_encoder_attn_layer_norm_weight2, model_decoder_layers_13_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv497 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_13_encoder_attn_q_proj_weight2, layer_norm105, model_decoder_layers_13_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape523: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv497, R.shape([1, seq_len, 20, 64])) + reshape524: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape523, R.shape([seq_len, 20, 64])) + lv96 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape524), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape525: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv96, R.shape([1, seq_len, 20, 64])) + reshape526: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape525, R.shape([1, seq_len, 1280])) + lv498 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_13_encoder_attn_out_proj_weight2, reshape526, model_decoder_layers_13_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add394: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add391, lv498) + layer_norm106: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add394, model_decoder_layers_13_final_layer_norm_weight2, model_decoder_layers_13_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv77_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_13_fc1_weight2, layer_norm106, model_decoder_layers_13_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv499 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_13_fc2_weight2, lv77_1, model_decoder_layers_13_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add397: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add394, lv499) + layer_norm107: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add397, model_decoder_layers_14_self_attn_layer_norm_weight2, model_decoder_layers_14_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv500 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_14_self_attn_q_proj_weight2, layer_norm107, model_decoder_layers_14_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape527: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv500, R.shape([1, seq_len, 20, 64])) + lv112 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_14_self_attn_k_proj_weight2, layer_norm107), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape528: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv112, R.shape([1, seq_len, 20, 64])) + lv501 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_14_self_attn_v_proj_weight2, layer_norm107, model_decoder_layers_14_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape529: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv501, R.shape([1, seq_len, 20, 64])) + concat14: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape527, reshape528, reshape529), axis=2) + reshape530: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat14, R.shape([seq_len, 60, 64])) + lv97 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape530), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape531: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv97, R.shape([1, seq_len, 20, 64])) + reshape532: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape531, R.shape([1, seq_len, 1280])) + lv502 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_14_self_attn_out_proj_weight2, reshape532, model_decoder_layers_14_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add401: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add397, lv502) + layer_norm108: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add401, model_decoder_layers_14_encoder_attn_layer_norm_weight2, model_decoder_layers_14_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv503 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_14_encoder_attn_q_proj_weight2, layer_norm108, model_decoder_layers_14_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape533: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv503, R.shape([1, seq_len, 20, 64])) + reshape534: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape533, R.shape([seq_len, 20, 64])) + lv98_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape534), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape535: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv98_1, R.shape([1, seq_len, 20, 64])) + reshape536: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape535, R.shape([1, seq_len, 1280])) + lv504 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_14_encoder_attn_out_proj_weight2, reshape536, model_decoder_layers_14_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add404: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add401, lv504) + layer_norm109: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add404, model_decoder_layers_14_final_layer_norm_weight2, model_decoder_layers_14_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv78_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_14_fc1_weight2, layer_norm109, model_decoder_layers_14_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv505 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_14_fc2_weight2, lv78_1, model_decoder_layers_14_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add407: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add404, lv505) + layer_norm110: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add407, model_decoder_layers_15_self_attn_layer_norm_weight2, model_decoder_layers_15_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv506 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_15_self_attn_q_proj_weight2, layer_norm110, model_decoder_layers_15_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape537: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv506, R.shape([1, seq_len, 20, 64])) + lv113 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_15_self_attn_k_proj_weight2, layer_norm110), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape538: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv113, R.shape([1, seq_len, 20, 64])) + lv507 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_15_self_attn_v_proj_weight2, layer_norm110, model_decoder_layers_15_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape539: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv507, R.shape([1, seq_len, 20, 64])) + concat15: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape537, reshape538, reshape539), axis=2) + reshape540: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat15, R.shape([seq_len, 60, 64])) + lv99_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape540), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape541: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv99_1, R.shape([1, seq_len, 20, 64])) + reshape542: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape541, R.shape([1, seq_len, 1280])) + lv508 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_15_self_attn_out_proj_weight2, reshape542, model_decoder_layers_15_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add411: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add407, lv508) + layer_norm111: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add411, model_decoder_layers_15_encoder_attn_layer_norm_weight2, model_decoder_layers_15_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv509 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_15_encoder_attn_q_proj_weight2, layer_norm111, model_decoder_layers_15_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape543: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv509, R.shape([1, seq_len, 20, 64])) + reshape544: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape543, R.shape([seq_len, 20, 64])) + lv100_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape544), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape545: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv100_1, R.shape([1, seq_len, 20, 64])) + reshape546: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape545, R.shape([1, seq_len, 1280])) + lv510 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_15_encoder_attn_out_proj_weight2, reshape546, model_decoder_layers_15_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add414: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add411, lv510) + layer_norm112: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add414, model_decoder_layers_15_final_layer_norm_weight2, model_decoder_layers_15_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv79_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_15_fc1_weight2, layer_norm112, model_decoder_layers_15_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv511 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_15_fc2_weight2, lv79_1, model_decoder_layers_15_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add417: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add414, lv511) + layer_norm113: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add417, model_decoder_layers_16_self_attn_layer_norm_weight2, model_decoder_layers_16_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv512 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_16_self_attn_q_proj_weight2, layer_norm113, model_decoder_layers_16_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape547: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv512, R.shape([1, seq_len, 20, 64])) + lv114 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_16_self_attn_k_proj_weight2, layer_norm113), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape548: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv114, R.shape([1, seq_len, 20, 64])) + lv513 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_16_self_attn_v_proj_weight2, layer_norm113, model_decoder_layers_16_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape549: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv513, R.shape([1, seq_len, 20, 64])) + concat16: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape547, reshape548, reshape549), axis=2) + reshape550: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat16, R.shape([seq_len, 60, 64])) + lv101_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape550), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape551: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv101_1, R.shape([1, seq_len, 20, 64])) + reshape552: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape551, R.shape([1, seq_len, 1280])) + lv514 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_16_self_attn_out_proj_weight2, reshape552, model_decoder_layers_16_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add421: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add417, lv514) + layer_norm114: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add421, model_decoder_layers_16_encoder_attn_layer_norm_weight2, model_decoder_layers_16_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv515 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_16_encoder_attn_q_proj_weight2, layer_norm114, model_decoder_layers_16_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape553: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv515, R.shape([1, seq_len, 20, 64])) + reshape554: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape553, R.shape([seq_len, 20, 64])) + lv102_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape554), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape555: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv102_1, R.shape([1, seq_len, 20, 64])) + reshape556: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape555, R.shape([1, seq_len, 1280])) + lv516 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_16_encoder_attn_out_proj_weight2, reshape556, model_decoder_layers_16_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add424: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add421, lv516) + layer_norm115: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add424, model_decoder_layers_16_final_layer_norm_weight2, model_decoder_layers_16_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv80_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_16_fc1_weight2, layer_norm115, model_decoder_layers_16_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv517 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_16_fc2_weight2, lv80_1, model_decoder_layers_16_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add427: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add424, lv517) + layer_norm116: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add427, model_decoder_layers_17_self_attn_layer_norm_weight2, model_decoder_layers_17_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv518 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_17_self_attn_q_proj_weight2, layer_norm116, model_decoder_layers_17_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape557: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv518, R.shape([1, seq_len, 20, 64])) + lv115 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_17_self_attn_k_proj_weight2, layer_norm116), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape558: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv115, R.shape([1, seq_len, 20, 64])) + lv519 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_17_self_attn_v_proj_weight2, layer_norm116, model_decoder_layers_17_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape559: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv519, R.shape([1, seq_len, 20, 64])) + concat17: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape557, reshape558, reshape559), axis=2) + reshape560: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat17, R.shape([seq_len, 60, 64])) + lv103_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape560), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape561: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv103_1, R.shape([1, seq_len, 20, 64])) + reshape562: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape561, R.shape([1, seq_len, 1280])) + lv520 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_17_self_attn_out_proj_weight2, reshape562, model_decoder_layers_17_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add431: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add427, lv520) + layer_norm117: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add431, model_decoder_layers_17_encoder_attn_layer_norm_weight2, model_decoder_layers_17_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv521 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_17_encoder_attn_q_proj_weight2, layer_norm117, model_decoder_layers_17_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape563: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv521, R.shape([1, seq_len, 20, 64])) + reshape564: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape563, R.shape([seq_len, 20, 64])) + lv104_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape564), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape565: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv104_1, R.shape([1, seq_len, 20, 64])) + reshape566: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape565, R.shape([1, seq_len, 1280])) + lv522 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_17_encoder_attn_out_proj_weight2, reshape566, model_decoder_layers_17_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add434: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add431, lv522) + layer_norm118: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add434, model_decoder_layers_17_final_layer_norm_weight2, model_decoder_layers_17_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv81_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_17_fc1_weight2, layer_norm118, model_decoder_layers_17_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv523 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_17_fc2_weight2, lv81_1, model_decoder_layers_17_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add437: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add434, lv523) + layer_norm119: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add437, model_decoder_layers_18_self_attn_layer_norm_weight2, model_decoder_layers_18_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv524 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_18_self_attn_q_proj_weight2, layer_norm119, model_decoder_layers_18_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape567: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv524, R.shape([1, seq_len, 20, 64])) + lv116 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_18_self_attn_k_proj_weight2, layer_norm119), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape568: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv116, R.shape([1, seq_len, 20, 64])) + lv525 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_18_self_attn_v_proj_weight2, layer_norm119, model_decoder_layers_18_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape569: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv525, R.shape([1, seq_len, 20, 64])) + concat18: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape567, reshape568, reshape569), axis=2) + reshape570: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat18, R.shape([seq_len, 60, 64])) + lv105_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape570), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape571: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv105_1, R.shape([1, seq_len, 20, 64])) + reshape572: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape571, R.shape([1, seq_len, 1280])) + lv526 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_18_self_attn_out_proj_weight2, reshape572, model_decoder_layers_18_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add441: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add437, lv526) + layer_norm120: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add441, model_decoder_layers_18_encoder_attn_layer_norm_weight2, model_decoder_layers_18_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv527 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_18_encoder_attn_q_proj_weight2, layer_norm120, model_decoder_layers_18_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape573: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv527, R.shape([1, seq_len, 20, 64])) + reshape574: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape573, R.shape([seq_len, 20, 64])) + lv106_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape574), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape575: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv106_1, R.shape([1, seq_len, 20, 64])) + reshape576: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape575, R.shape([1, seq_len, 1280])) + lv528 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_18_encoder_attn_out_proj_weight2, reshape576, model_decoder_layers_18_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add444: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add441, lv528) + layer_norm121: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add444, model_decoder_layers_18_final_layer_norm_weight2, model_decoder_layers_18_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv82_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_18_fc1_weight2, layer_norm121, model_decoder_layers_18_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv529 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_18_fc2_weight2, lv82_1, model_decoder_layers_18_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add447: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add444, lv529) + layer_norm122: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add447, model_decoder_layers_19_self_attn_layer_norm_weight2, model_decoder_layers_19_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv530 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_19_self_attn_q_proj_weight2, layer_norm122, model_decoder_layers_19_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape577: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv530, R.shape([1, seq_len, 20, 64])) + lv117 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_19_self_attn_k_proj_weight2, layer_norm122), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape578: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv117, R.shape([1, seq_len, 20, 64])) + lv531 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_19_self_attn_v_proj_weight2, layer_norm122, model_decoder_layers_19_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape579: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv531, R.shape([1, seq_len, 20, 64])) + concat19: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape577, reshape578, reshape579), axis=2) + reshape580: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat19, R.shape([seq_len, 60, 64])) + lv107_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape580), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape581: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv107_1, R.shape([1, seq_len, 20, 64])) + reshape582: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape581, R.shape([1, seq_len, 1280])) + lv532 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_19_self_attn_out_proj_weight2, reshape582, model_decoder_layers_19_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add451: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add447, lv532) + layer_norm123: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add451, model_decoder_layers_19_encoder_attn_layer_norm_weight2, model_decoder_layers_19_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv533 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_19_encoder_attn_q_proj_weight2, layer_norm123, model_decoder_layers_19_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape583: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv533, R.shape([1, seq_len, 20, 64])) + reshape584: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape583, R.shape([seq_len, 20, 64])) + lv108_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape584), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape585: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv108_1, R.shape([1, seq_len, 20, 64])) + reshape586: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape585, R.shape([1, seq_len, 1280])) + lv534 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_19_encoder_attn_out_proj_weight2, reshape586, model_decoder_layers_19_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add454: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add451, lv534) + layer_norm124: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add454, model_decoder_layers_19_final_layer_norm_weight2, model_decoder_layers_19_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv83_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_19_fc1_weight2, layer_norm124, model_decoder_layers_19_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv535 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_19_fc2_weight2, lv83_1, model_decoder_layers_19_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add457: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add454, lv535) + layer_norm125: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add457, model_decoder_layers_20_self_attn_layer_norm_weight2, model_decoder_layers_20_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv536 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_20_self_attn_q_proj_weight2, layer_norm125, model_decoder_layers_20_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape587: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv536, R.shape([1, seq_len, 20, 64])) + lv118 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_20_self_attn_k_proj_weight2, layer_norm125), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape588: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv118, R.shape([1, seq_len, 20, 64])) + lv537 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_20_self_attn_v_proj_weight2, layer_norm125, model_decoder_layers_20_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape589: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv537, R.shape([1, seq_len, 20, 64])) + concat20: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape587, reshape588, reshape589), axis=2) + reshape590: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat20, R.shape([seq_len, 60, 64])) + lv109_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape590), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape591: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv109_1, R.shape([1, seq_len, 20, 64])) + reshape592: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape591, R.shape([1, seq_len, 1280])) + lv538 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_20_self_attn_out_proj_weight2, reshape592, model_decoder_layers_20_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add461: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add457, lv538) + layer_norm126: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add461, model_decoder_layers_20_encoder_attn_layer_norm_weight2, model_decoder_layers_20_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv539 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_20_encoder_attn_q_proj_weight2, layer_norm126, model_decoder_layers_20_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape593: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv539, R.shape([1, seq_len, 20, 64])) + reshape594: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape593, R.shape([seq_len, 20, 64])) + lv110_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape594), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape595: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv110_1, R.shape([1, seq_len, 20, 64])) + reshape596: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape595, R.shape([1, seq_len, 1280])) + lv540 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_20_encoder_attn_out_proj_weight2, reshape596, model_decoder_layers_20_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add464: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add461, lv540) + layer_norm127: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add464, model_decoder_layers_20_final_layer_norm_weight2, model_decoder_layers_20_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv84_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_20_fc1_weight2, layer_norm127, model_decoder_layers_20_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv541 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_20_fc2_weight2, lv84_1, model_decoder_layers_20_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add467: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add464, lv541) + layer_norm128: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add467, model_decoder_layers_21_self_attn_layer_norm_weight2, model_decoder_layers_21_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv542 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_21_self_attn_q_proj_weight2, layer_norm128, model_decoder_layers_21_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape597: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv542, R.shape([1, seq_len, 20, 64])) + lv119 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_21_self_attn_k_proj_weight2, layer_norm128), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape598: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv119, R.shape([1, seq_len, 20, 64])) + lv543 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_21_self_attn_v_proj_weight2, layer_norm128, model_decoder_layers_21_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape599: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv543, R.shape([1, seq_len, 20, 64])) + concat21: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape597, reshape598, reshape599), axis=2) + reshape600: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat21, R.shape([seq_len, 60, 64])) + lv111_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape600), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape601: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv111_1, R.shape([1, seq_len, 20, 64])) + reshape602: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape601, R.shape([1, seq_len, 1280])) + lv544 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_21_self_attn_out_proj_weight2, reshape602, model_decoder_layers_21_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add471: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add467, lv544) + layer_norm129: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add471, model_decoder_layers_21_encoder_attn_layer_norm_weight2, model_decoder_layers_21_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv545 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_21_encoder_attn_q_proj_weight2, layer_norm129, model_decoder_layers_21_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape603: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv545, R.shape([1, seq_len, 20, 64])) + reshape604: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape603, R.shape([seq_len, 20, 64])) + lv112_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape604), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape605: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv112_1, R.shape([1, seq_len, 20, 64])) + reshape606: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape605, R.shape([1, seq_len, 1280])) + lv546 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_21_encoder_attn_out_proj_weight2, reshape606, model_decoder_layers_21_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add474: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add471, lv546) + layer_norm130: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add474, model_decoder_layers_21_final_layer_norm_weight2, model_decoder_layers_21_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv85_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_21_fc1_weight2, layer_norm130, model_decoder_layers_21_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv547 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_21_fc2_weight2, lv85_1, model_decoder_layers_21_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add477: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add474, lv547) + layer_norm131: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add477, model_decoder_layers_22_self_attn_layer_norm_weight2, model_decoder_layers_22_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv548 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_22_self_attn_q_proj_weight2, layer_norm131, model_decoder_layers_22_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape607: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv548, R.shape([1, seq_len, 20, 64])) + lv120 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_22_self_attn_k_proj_weight2, layer_norm131), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape608: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv120, R.shape([1, seq_len, 20, 64])) + lv549 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_22_self_attn_v_proj_weight2, layer_norm131, model_decoder_layers_22_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape609: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv549, R.shape([1, seq_len, 20, 64])) + concat22: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape607, reshape608, reshape609), axis=2) + reshape610: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat22, R.shape([seq_len, 60, 64])) + lv113_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape610), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape611: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv113_1, R.shape([1, seq_len, 20, 64])) + reshape612: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape611, R.shape([1, seq_len, 1280])) + lv550 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_22_self_attn_out_proj_weight2, reshape612, model_decoder_layers_22_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add481: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add477, lv550) + layer_norm132: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add481, model_decoder_layers_22_encoder_attn_layer_norm_weight2, model_decoder_layers_22_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv551 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_22_encoder_attn_q_proj_weight2, layer_norm132, model_decoder_layers_22_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape613: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv551, R.shape([1, seq_len, 20, 64])) + reshape614: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape613, R.shape([seq_len, 20, 64])) + lv114_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape614), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape615: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv114_1, R.shape([1, seq_len, 20, 64])) + reshape616: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape615, R.shape([1, seq_len, 1280])) + lv552 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_22_encoder_attn_out_proj_weight2, reshape616, model_decoder_layers_22_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add484: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add481, lv552) + layer_norm133: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add484, model_decoder_layers_22_final_layer_norm_weight2, model_decoder_layers_22_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv86_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_22_fc1_weight2, layer_norm133, model_decoder_layers_22_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv553 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_22_fc2_weight2, lv86_1, model_decoder_layers_22_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add487: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add484, lv553) + layer_norm134: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add487, model_decoder_layers_23_self_attn_layer_norm_weight2, model_decoder_layers_23_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv554 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_23_self_attn_q_proj_weight2, layer_norm134, model_decoder_layers_23_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape617: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv554, R.shape([1, seq_len, 20, 64])) + lv121 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_23_self_attn_k_proj_weight2, layer_norm134), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape618: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv121, R.shape([1, seq_len, 20, 64])) + lv555 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_23_self_attn_v_proj_weight2, layer_norm134, model_decoder_layers_23_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape619: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv555, R.shape([1, seq_len, 20, 64])) + concat23: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape617, reshape618, reshape619), axis=2) + reshape620: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat23, R.shape([seq_len, 60, 64])) + lv115_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape620), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape621: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv115_1, R.shape([1, seq_len, 20, 64])) + reshape622: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape621, R.shape([1, seq_len, 1280])) + lv556 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_23_self_attn_out_proj_weight2, reshape622, model_decoder_layers_23_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add491: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add487, lv556) + layer_norm135: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add491, model_decoder_layers_23_encoder_attn_layer_norm_weight2, model_decoder_layers_23_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv557 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_23_encoder_attn_q_proj_weight2, layer_norm135, model_decoder_layers_23_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape623: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv557, R.shape([1, seq_len, 20, 64])) + reshape624: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape623, R.shape([seq_len, 20, 64])) + lv116_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape624), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape625: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv116_1, R.shape([1, seq_len, 20, 64])) + reshape626: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape625, R.shape([1, seq_len, 1280])) + lv558 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_23_encoder_attn_out_proj_weight2, reshape626, model_decoder_layers_23_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add494: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add491, lv558) + layer_norm136: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add494, model_decoder_layers_23_final_layer_norm_weight2, model_decoder_layers_23_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv87_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_23_fc1_weight2, layer_norm136, model_decoder_layers_23_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv559 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_23_fc2_weight2, lv87_1, model_decoder_layers_23_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add497: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add494, lv559) + layer_norm137: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add497, model_decoder_layers_24_self_attn_layer_norm_weight2, model_decoder_layers_24_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv560 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_24_self_attn_q_proj_weight2, layer_norm137, model_decoder_layers_24_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape627: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv560, R.shape([1, seq_len, 20, 64])) + lv122 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_24_self_attn_k_proj_weight2, layer_norm137), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape628: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv122, R.shape([1, seq_len, 20, 64])) + lv561 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_24_self_attn_v_proj_weight2, layer_norm137, model_decoder_layers_24_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape629: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv561, R.shape([1, seq_len, 20, 64])) + concat24: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape627, reshape628, reshape629), axis=2) + reshape630: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat24, R.shape([seq_len, 60, 64])) + lv117_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape630), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape631: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv117_1, R.shape([1, seq_len, 20, 64])) + reshape632: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape631, R.shape([1, seq_len, 1280])) + lv562 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_24_self_attn_out_proj_weight2, reshape632, model_decoder_layers_24_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add501: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add497, lv562) + layer_norm138: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add501, model_decoder_layers_24_encoder_attn_layer_norm_weight2, model_decoder_layers_24_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv563 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_24_encoder_attn_q_proj_weight2, layer_norm138, model_decoder_layers_24_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape633: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv563, R.shape([1, seq_len, 20, 64])) + reshape634: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape633, R.shape([seq_len, 20, 64])) + lv118_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape634), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape635: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv118_1, R.shape([1, seq_len, 20, 64])) + reshape636: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape635, R.shape([1, seq_len, 1280])) + lv564 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_24_encoder_attn_out_proj_weight2, reshape636, model_decoder_layers_24_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add504: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add501, lv564) + layer_norm139: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add504, model_decoder_layers_24_final_layer_norm_weight2, model_decoder_layers_24_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv88_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_24_fc1_weight2, layer_norm139, model_decoder_layers_24_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv565 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_24_fc2_weight2, lv88_1, model_decoder_layers_24_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add507: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add504, lv565) + layer_norm140: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add507, model_decoder_layers_25_self_attn_layer_norm_weight2, model_decoder_layers_25_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv566 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_25_self_attn_q_proj_weight2, layer_norm140, model_decoder_layers_25_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape637: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv566, R.shape([1, seq_len, 20, 64])) + lv123 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_25_self_attn_k_proj_weight2, layer_norm140), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape638: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv123, R.shape([1, seq_len, 20, 64])) + lv567 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_25_self_attn_v_proj_weight2, layer_norm140, model_decoder_layers_25_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape639: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv567, R.shape([1, seq_len, 20, 64])) + concat25: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape637, reshape638, reshape639), axis=2) + reshape640: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat25, R.shape([seq_len, 60, 64])) + lv119_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape640), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape641: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv119_1, R.shape([1, seq_len, 20, 64])) + reshape642: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape641, R.shape([1, seq_len, 1280])) + lv568 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_25_self_attn_out_proj_weight2, reshape642, model_decoder_layers_25_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add511: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add507, lv568) + layer_norm141: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add511, model_decoder_layers_25_encoder_attn_layer_norm_weight2, model_decoder_layers_25_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv569 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_25_encoder_attn_q_proj_weight2, layer_norm141, model_decoder_layers_25_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape643: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv569, R.shape([1, seq_len, 20, 64])) + reshape644: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape643, R.shape([seq_len, 20, 64])) + lv120_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape644), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape645: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv120_1, R.shape([1, seq_len, 20, 64])) + reshape646: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape645, R.shape([1, seq_len, 1280])) + lv570 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_25_encoder_attn_out_proj_weight2, reshape646, model_decoder_layers_25_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add514: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add511, lv570) + layer_norm142: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add514, model_decoder_layers_25_final_layer_norm_weight2, model_decoder_layers_25_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv89_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_25_fc1_weight2, layer_norm142, model_decoder_layers_25_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv571 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_25_fc2_weight2, lv89_1, model_decoder_layers_25_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add517: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add514, lv571) + layer_norm143: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add517, model_decoder_layers_26_self_attn_layer_norm_weight2, model_decoder_layers_26_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv572 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_26_self_attn_q_proj_weight2, layer_norm143, model_decoder_layers_26_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape647: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv572, R.shape([1, seq_len, 20, 64])) + lv124 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_26_self_attn_k_proj_weight2, layer_norm143), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape648: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv124, R.shape([1, seq_len, 20, 64])) + lv573 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_26_self_attn_v_proj_weight2, layer_norm143, model_decoder_layers_26_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape649: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv573, R.shape([1, seq_len, 20, 64])) + concat26: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape647, reshape648, reshape649), axis=2) + reshape650: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat26, R.shape([seq_len, 60, 64])) + lv121_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape650), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape651: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv121_1, R.shape([1, seq_len, 20, 64])) + reshape652: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape651, R.shape([1, seq_len, 1280])) + lv574 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_26_self_attn_out_proj_weight2, reshape652, model_decoder_layers_26_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add521: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add517, lv574) + layer_norm144: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add521, model_decoder_layers_26_encoder_attn_layer_norm_weight2, model_decoder_layers_26_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv575 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_26_encoder_attn_q_proj_weight2, layer_norm144, model_decoder_layers_26_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape653: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv575, R.shape([1, seq_len, 20, 64])) + reshape654: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape653, R.shape([seq_len, 20, 64])) + lv122_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape654), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape655: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv122_1, R.shape([1, seq_len, 20, 64])) + reshape656: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape655, R.shape([1, seq_len, 1280])) + lv576 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_26_encoder_attn_out_proj_weight2, reshape656, model_decoder_layers_26_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add524: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add521, lv576) + layer_norm145: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add524, model_decoder_layers_26_final_layer_norm_weight2, model_decoder_layers_26_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv90_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_26_fc1_weight2, layer_norm145, model_decoder_layers_26_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv577 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_26_fc2_weight2, lv90_1, model_decoder_layers_26_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add527: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add524, lv577) + layer_norm146: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add527, model_decoder_layers_27_self_attn_layer_norm_weight2, model_decoder_layers_27_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv578 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_27_self_attn_q_proj_weight2, layer_norm146, model_decoder_layers_27_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape657: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv578, R.shape([1, seq_len, 20, 64])) + lv125 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_27_self_attn_k_proj_weight2, layer_norm146), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape658: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv125, R.shape([1, seq_len, 20, 64])) + lv579 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_27_self_attn_v_proj_weight2, layer_norm146, model_decoder_layers_27_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape659: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv579, R.shape([1, seq_len, 20, 64])) + concat27: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape657, reshape658, reshape659), axis=2) + reshape660: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat27, R.shape([seq_len, 60, 64])) + lv123_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape660), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape661: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv123_1, R.shape([1, seq_len, 20, 64])) + reshape662: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape661, R.shape([1, seq_len, 1280])) + lv580 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_27_self_attn_out_proj_weight2, reshape662, model_decoder_layers_27_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add531: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add527, lv580) + layer_norm147: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add531, model_decoder_layers_27_encoder_attn_layer_norm_weight2, model_decoder_layers_27_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv581 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_27_encoder_attn_q_proj_weight2, layer_norm147, model_decoder_layers_27_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape663: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv581, R.shape([1, seq_len, 20, 64])) + reshape664: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape663, R.shape([seq_len, 20, 64])) + lv124_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape664), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape665: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv124_1, R.shape([1, seq_len, 20, 64])) + reshape666: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape665, R.shape([1, seq_len, 1280])) + lv582 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_27_encoder_attn_out_proj_weight2, reshape666, model_decoder_layers_27_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add534: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add531, lv582) + layer_norm148: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add534, model_decoder_layers_27_final_layer_norm_weight2, model_decoder_layers_27_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv91_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_27_fc1_weight2, layer_norm148, model_decoder_layers_27_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv583 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_27_fc2_weight2, lv91_1, model_decoder_layers_27_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add537: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add534, lv583) + layer_norm149: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add537, model_decoder_layers_28_self_attn_layer_norm_weight2, model_decoder_layers_28_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv584 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_28_self_attn_q_proj_weight2, layer_norm149, model_decoder_layers_28_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape667: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv584, R.shape([1, seq_len, 20, 64])) + lv126 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_28_self_attn_k_proj_weight2, layer_norm149), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape668: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv126, R.shape([1, seq_len, 20, 64])) + lv585 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_28_self_attn_v_proj_weight2, layer_norm149, model_decoder_layers_28_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape669: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv585, R.shape([1, seq_len, 20, 64])) + concat28: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape667, reshape668, reshape669), axis=2) + reshape670: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat28, R.shape([seq_len, 60, 64])) + lv125_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape670), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape671: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv125_1, R.shape([1, seq_len, 20, 64])) + reshape672: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape671, R.shape([1, seq_len, 1280])) + lv586 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_28_self_attn_out_proj_weight2, reshape672, model_decoder_layers_28_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add541: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add537, lv586) + layer_norm150: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add541, model_decoder_layers_28_encoder_attn_layer_norm_weight2, model_decoder_layers_28_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv587 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_28_encoder_attn_q_proj_weight2, layer_norm150, model_decoder_layers_28_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape673: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv587, R.shape([1, seq_len, 20, 64])) + reshape674: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape673, R.shape([seq_len, 20, 64])) + lv126_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape674), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape675: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv126_1, R.shape([1, seq_len, 20, 64])) + reshape676: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape675, R.shape([1, seq_len, 1280])) + lv588 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_28_encoder_attn_out_proj_weight2, reshape676, model_decoder_layers_28_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add544: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add541, lv588) + layer_norm151: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add544, model_decoder_layers_28_final_layer_norm_weight2, model_decoder_layers_28_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv92_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_28_fc1_weight2, layer_norm151, model_decoder_layers_28_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv589 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_28_fc2_weight2, lv92_1, model_decoder_layers_28_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add547: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add544, lv589) + layer_norm152: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add547, model_decoder_layers_29_self_attn_layer_norm_weight2, model_decoder_layers_29_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv590 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_29_self_attn_q_proj_weight2, layer_norm152, model_decoder_layers_29_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape677: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv590, R.shape([1, seq_len, 20, 64])) + lv127 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_29_self_attn_k_proj_weight2, layer_norm152), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape678: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv127, R.shape([1, seq_len, 20, 64])) + lv591 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_29_self_attn_v_proj_weight2, layer_norm152, model_decoder_layers_29_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape679: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv591, R.shape([1, seq_len, 20, 64])) + concat29: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape677, reshape678, reshape679), axis=2) + reshape680: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat29, R.shape([seq_len, 60, 64])) + lv127_1 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape680), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape681: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv127_1, R.shape([1, seq_len, 20, 64])) + reshape682: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape681, R.shape([1, seq_len, 1280])) + lv592 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_29_self_attn_out_proj_weight2, reshape682, model_decoder_layers_29_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add551: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add547, lv592) + layer_norm153: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add551, model_decoder_layers_29_encoder_attn_layer_norm_weight2, model_decoder_layers_29_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv593 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_29_encoder_attn_q_proj_weight2, layer_norm153, model_decoder_layers_29_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape683: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv593, R.shape([1, seq_len, 20, 64])) + reshape684: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape683, R.shape([seq_len, 20, 64])) + lv128 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape684), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape685: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv128, R.shape([1, seq_len, 20, 64])) + reshape686: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape685, R.shape([1, seq_len, 1280])) + lv594 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_29_encoder_attn_out_proj_weight2, reshape686, model_decoder_layers_29_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add554: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add551, lv594) + layer_norm154: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add554, model_decoder_layers_29_final_layer_norm_weight2, model_decoder_layers_29_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv93_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_29_fc1_weight2, layer_norm154, model_decoder_layers_29_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv595 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_29_fc2_weight2, lv93_1, model_decoder_layers_29_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add557: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add554, lv595) + layer_norm155: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add557, model_decoder_layers_30_self_attn_layer_norm_weight2, model_decoder_layers_30_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv596 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_30_self_attn_q_proj_weight2, layer_norm155, model_decoder_layers_30_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape687: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv596, R.shape([1, seq_len, 20, 64])) + lv128_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_30_self_attn_k_proj_weight2, layer_norm155), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape688: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv128_1, R.shape([1, seq_len, 20, 64])) + lv597 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_30_self_attn_v_proj_weight2, layer_norm155, model_decoder_layers_30_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape689: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv597, R.shape([1, seq_len, 20, 64])) + concat30: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape687, reshape688, reshape689), axis=2) + reshape690: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat30, R.shape([seq_len, 60, 64])) + lv129 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape690), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape691: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv129, R.shape([1, seq_len, 20, 64])) + reshape692: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape691, R.shape([1, seq_len, 1280])) + lv598 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_30_self_attn_out_proj_weight2, reshape692, model_decoder_layers_30_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add561: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add557, lv598) + layer_norm156: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add561, model_decoder_layers_30_encoder_attn_layer_norm_weight2, model_decoder_layers_30_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv599 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_30_encoder_attn_q_proj_weight2, layer_norm156, model_decoder_layers_30_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape693: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv599, R.shape([1, seq_len, 20, 64])) + reshape694: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape693, R.shape([seq_len, 20, 64])) + lv130 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape694), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape695: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv130, R.shape([1, seq_len, 20, 64])) + reshape696: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape695, R.shape([1, seq_len, 1280])) + lv600 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_30_encoder_attn_out_proj_weight2, reshape696, model_decoder_layers_30_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add564: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add561, lv600) + layer_norm157: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add564, model_decoder_layers_30_final_layer_norm_weight2, model_decoder_layers_30_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv94_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_30_fc1_weight2, layer_norm157, model_decoder_layers_30_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv601 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_30_fc2_weight2, lv94_1, model_decoder_layers_30_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add567: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add564, lv601) + layer_norm158: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add567, model_decoder_layers_31_self_attn_layer_norm_weight2, model_decoder_layers_31_self_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv602 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_31_self_attn_q_proj_weight2, layer_norm158, model_decoder_layers_31_self_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape697: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv602, R.shape([1, seq_len, 20, 64])) + lv129_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_31_self_attn_k_proj_weight2, layer_norm158), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape698: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv129_1, R.shape([1, seq_len, 20, 64])) + lv603 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_31_self_attn_v_proj_weight2, layer_norm158, model_decoder_layers_31_self_attn_v_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape699: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv603, R.shape([1, seq_len, 20, 64])) + concat31: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape697, reshape698, reshape699), axis=2) + reshape700: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat31, R.shape([seq_len, 60, 64])) + lv131 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape700), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape701: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv131, R.shape([1, seq_len, 20, 64])) + reshape702: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape701, R.shape([1, seq_len, 1280])) + lv604 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_31_self_attn_out_proj_weight2, reshape702, model_decoder_layers_31_self_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add571: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add567, lv604) + layer_norm159: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add571, model_decoder_layers_31_encoder_attn_layer_norm_weight2, model_decoder_layers_31_encoder_attn_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv605 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_31_encoder_attn_q_proj_weight2, layer_norm159, model_decoder_layers_31_encoder_attn_q_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape703: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv605, R.shape([1, seq_len, 20, 64])) + reshape704: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape703, R.shape([seq_len, 20, 64])) + lv132 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape704), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape705: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv132, R.shape([1, seq_len, 20, 64])) + reshape706: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape705, R.shape([1, seq_len, 1280])) + lv606 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_31_encoder_attn_out_proj_weight2, reshape706, model_decoder_layers_31_encoder_attn_out_proj_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add574: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add571, lv606) + layer_norm160: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add574, model_decoder_layers_31_final_layer_norm_weight2, model_decoder_layers_31_final_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv95_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_31_fc1_weight2, layer_norm160, model_decoder_layers_31_fc1_bias2), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv607 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_31_fc2_weight2, lv95_1, model_decoder_layers_31_fc2_bias2), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add577: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add574, lv607) + layer_norm161: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add577, model_decoder_layer_norm_weight2, model_decoder_layer_norm_bias2, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + take2: R.Tensor((1, batch_size, 1280), dtype="float16") = R.take(layer_norm161, logit_positions, axis=1) + lv130_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul5_cublas", (model_decoder_embed_tokens_weight2, take2), out_sinfo=R.Tensor((1, batch_size, 51866), dtype="float32")) + gv2: R.Tensor((1, batch_size, 51866), dtype="float32") = lv130_1 + R.output(gv2) + return gv2 + + @R.function + def create_tir_paged_kv_cache(max_batch_size_: R.Shape(["max_batch_size"]), max_total_seq_len_: R.Shape(["max_total_seq_len"]), prefill_chunk_size_: R.Shape(["prefill_chunk_size"]), page_size_: R.Shape(["page_size"]), support_sliding_window_: R.Shape(["support_sliding_window"])) -> R.Object: + max_batch_size = T.int64() + max_total_seq_len = T.int64() + prefill_chunk_size = T.int64() + page_size = T.int64() + support_sliding_window = T.int64() + R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) + cls = Module + gv: R.Tensor((), dtype="float16") = R.zeros(R.shape([]), dtype="float16") + paged_kv_cache: R.Object = R.call_pure_packed("vm.builtin.paged_attention_kv_cache_create_reduced", R.shape([max_batch_size, max_total_seq_len, prefill_chunk_size, page_size, support_sliding_window]), R.prim_value(32), R.prim_value(20), R.prim_value(20), R.prim_value(64), R.prim_value(0), R.prim_value(1), R.prim_value(1), gv, cls.tir_kv_cache_transpose_append, cls.batch_prefill_paged_kv, cls.batch_decode_paged_kv, cls.batch_prefill_paged_kv_sliding_window, cls.batch_decode_paged_kv_sliding_window, cls.batch_prefill_ragged_kv, cls.merge_state_inplace, cls.fused_rope, cls.copy_single_page, cls.tir_kv_cache_debug_get_kv, cls.compact_kv_copy, cls.batch_tree_attn, sinfo_args=(R.Object,)) + return paged_kv_cache + + @R.function + def decode(input_ids: R.Tensor((1, 1), dtype="int32"), paged_kv_cache: R.Object, packed_params: R.Tuple(R.Tensor((1280, 128, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1500, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((51866, 1280), dtype="float16"), R.Tensor((448, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"))) -> R.Tensor((1, 1, 51866), dtype="float32"): + R.func_attr({"num_input": 2, "relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) + cls = Module + with R.dataflow(): + model_decoder_embed_tokens_weight5: R.Tensor((51866, 1280), dtype="float16") = packed_params[487] + model_decoder_embed_positions_weight5: R.Tensor((448, 1280), dtype="float16") = packed_params[488] + model_decoder_layers_0_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[489] + model_decoder_layers_0_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[490] + model_decoder_layers_0_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[491] + model_decoder_layers_0_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[492] + model_decoder_layers_0_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[493] + model_decoder_layers_0_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[494] + model_decoder_layers_0_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[495] + model_decoder_layers_0_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[496] + model_decoder_layers_0_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[497] + model_decoder_layers_0_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[501] + model_decoder_layers_0_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[502] + model_decoder_layers_0_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[503] + model_decoder_layers_0_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[504] + model_decoder_layers_0_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[505] + model_decoder_layers_0_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[506] + model_decoder_layers_0_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[507] + model_decoder_layers_0_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[508] + model_decoder_layers_0_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[509] + model_decoder_layers_0_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[510] + model_decoder_layers_0_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[511] + model_decoder_layers_0_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[512] + model_decoder_layers_1_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[513] + model_decoder_layers_1_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[514] + model_decoder_layers_1_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[515] + model_decoder_layers_1_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[516] + model_decoder_layers_1_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[517] + model_decoder_layers_1_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[518] + model_decoder_layers_1_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[519] + model_decoder_layers_1_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[520] + model_decoder_layers_1_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[521] + model_decoder_layers_1_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[525] + model_decoder_layers_1_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[526] + model_decoder_layers_1_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[527] + model_decoder_layers_1_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[528] + model_decoder_layers_1_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[529] + model_decoder_layers_1_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[530] + model_decoder_layers_1_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[531] + model_decoder_layers_1_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[532] + model_decoder_layers_1_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[533] + model_decoder_layers_1_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[534] + model_decoder_layers_1_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[535] + model_decoder_layers_1_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[536] + model_decoder_layers_2_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[537] + model_decoder_layers_2_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[538] + model_decoder_layers_2_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[539] + model_decoder_layers_2_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[540] + model_decoder_layers_2_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[541] + model_decoder_layers_2_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[542] + model_decoder_layers_2_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[543] + model_decoder_layers_2_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[544] + model_decoder_layers_2_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[545] + model_decoder_layers_2_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[549] + model_decoder_layers_2_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[550] + model_decoder_layers_2_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[551] + model_decoder_layers_2_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[552] + model_decoder_layers_2_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[553] + model_decoder_layers_2_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[554] + model_decoder_layers_2_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[555] + model_decoder_layers_2_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[556] + model_decoder_layers_2_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[557] + model_decoder_layers_2_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[558] + model_decoder_layers_2_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[559] + model_decoder_layers_2_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[560] + model_decoder_layers_3_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[561] + model_decoder_layers_3_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[562] + model_decoder_layers_3_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[563] + model_decoder_layers_3_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[564] + model_decoder_layers_3_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[565] + model_decoder_layers_3_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[566] + model_decoder_layers_3_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[567] + model_decoder_layers_3_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[568] + model_decoder_layers_3_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[569] + model_decoder_layers_3_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[573] + model_decoder_layers_3_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[574] + model_decoder_layers_3_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[575] + model_decoder_layers_3_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[576] + model_decoder_layers_3_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[577] + model_decoder_layers_3_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[578] + model_decoder_layers_3_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[579] + model_decoder_layers_3_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[580] + model_decoder_layers_3_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[581] + model_decoder_layers_3_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[582] + model_decoder_layers_3_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[583] + model_decoder_layers_3_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[584] + model_decoder_layers_4_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[585] + model_decoder_layers_4_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[586] + model_decoder_layers_4_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[587] + model_decoder_layers_4_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[588] + model_decoder_layers_4_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[589] + model_decoder_layers_4_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[590] + model_decoder_layers_4_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[591] + model_decoder_layers_4_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[592] + model_decoder_layers_4_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[593] + model_decoder_layers_4_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[597] + model_decoder_layers_4_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[598] + model_decoder_layers_4_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[599] + model_decoder_layers_4_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[600] + model_decoder_layers_4_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[601] + model_decoder_layers_4_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[602] + model_decoder_layers_4_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[603] + model_decoder_layers_4_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[604] + model_decoder_layers_4_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[605] + model_decoder_layers_4_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[606] + model_decoder_layers_4_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[607] + model_decoder_layers_4_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[608] + model_decoder_layers_5_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[609] + model_decoder_layers_5_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[610] + model_decoder_layers_5_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[611] + model_decoder_layers_5_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[612] + model_decoder_layers_5_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[613] + model_decoder_layers_5_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[614] + model_decoder_layers_5_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[615] + model_decoder_layers_5_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[616] + model_decoder_layers_5_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[617] + model_decoder_layers_5_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[621] + model_decoder_layers_5_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[622] + model_decoder_layers_5_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[623] + model_decoder_layers_5_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[624] + model_decoder_layers_5_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[625] + model_decoder_layers_5_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[626] + model_decoder_layers_5_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[627] + model_decoder_layers_5_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[628] + model_decoder_layers_5_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[629] + model_decoder_layers_5_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[630] + model_decoder_layers_5_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[631] + model_decoder_layers_5_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[632] + model_decoder_layers_6_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[633] + model_decoder_layers_6_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[634] + model_decoder_layers_6_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[635] + model_decoder_layers_6_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[636] + model_decoder_layers_6_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[637] + model_decoder_layers_6_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[638] + model_decoder_layers_6_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[639] + model_decoder_layers_6_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[640] + model_decoder_layers_6_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[641] + model_decoder_layers_6_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[645] + model_decoder_layers_6_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[646] + model_decoder_layers_6_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[647] + model_decoder_layers_6_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[648] + model_decoder_layers_6_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[649] + model_decoder_layers_6_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[650] + model_decoder_layers_6_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[651] + model_decoder_layers_6_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[652] + model_decoder_layers_6_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[653] + model_decoder_layers_6_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[654] + model_decoder_layers_6_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[655] + model_decoder_layers_6_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[656] + model_decoder_layers_7_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[657] + model_decoder_layers_7_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[658] + model_decoder_layers_7_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[659] + model_decoder_layers_7_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[660] + model_decoder_layers_7_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[661] + model_decoder_layers_7_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[662] + model_decoder_layers_7_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[663] + model_decoder_layers_7_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[664] + model_decoder_layers_7_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[665] + model_decoder_layers_7_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[669] + model_decoder_layers_7_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[670] + model_decoder_layers_7_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[671] + model_decoder_layers_7_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[672] + model_decoder_layers_7_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[673] + model_decoder_layers_7_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[674] + model_decoder_layers_7_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[675] + model_decoder_layers_7_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[676] + model_decoder_layers_7_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[677] + model_decoder_layers_7_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[678] + model_decoder_layers_7_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[679] + model_decoder_layers_7_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[680] + model_decoder_layers_8_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[681] + model_decoder_layers_8_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[682] + model_decoder_layers_8_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[683] + model_decoder_layers_8_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[684] + model_decoder_layers_8_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[685] + model_decoder_layers_8_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[686] + model_decoder_layers_8_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[687] + model_decoder_layers_8_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[688] + model_decoder_layers_8_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[689] + model_decoder_layers_8_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[693] + model_decoder_layers_8_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[694] + model_decoder_layers_8_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[695] + model_decoder_layers_8_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[696] + model_decoder_layers_8_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[697] + model_decoder_layers_8_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[698] + model_decoder_layers_8_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[699] + model_decoder_layers_8_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[700] + model_decoder_layers_8_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[701] + model_decoder_layers_8_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[702] + model_decoder_layers_8_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[703] + model_decoder_layers_8_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[704] + model_decoder_layers_9_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[705] + model_decoder_layers_9_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[706] + model_decoder_layers_9_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[707] + model_decoder_layers_9_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[708] + model_decoder_layers_9_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[709] + model_decoder_layers_9_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[710] + model_decoder_layers_9_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[711] + model_decoder_layers_9_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[712] + model_decoder_layers_9_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[713] + model_decoder_layers_9_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[717] + model_decoder_layers_9_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[718] + model_decoder_layers_9_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[719] + model_decoder_layers_9_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[720] + model_decoder_layers_9_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[721] + model_decoder_layers_9_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[722] + model_decoder_layers_9_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[723] + model_decoder_layers_9_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[724] + model_decoder_layers_9_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[725] + model_decoder_layers_9_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[726] + model_decoder_layers_9_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[727] + model_decoder_layers_9_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[728] + model_decoder_layers_10_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[729] + model_decoder_layers_10_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[730] + model_decoder_layers_10_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[731] + model_decoder_layers_10_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[732] + model_decoder_layers_10_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[733] + model_decoder_layers_10_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[734] + model_decoder_layers_10_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[735] + model_decoder_layers_10_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[736] + model_decoder_layers_10_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[737] + model_decoder_layers_10_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[741] + model_decoder_layers_10_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[742] + model_decoder_layers_10_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[743] + model_decoder_layers_10_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[744] + model_decoder_layers_10_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[745] + model_decoder_layers_10_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[746] + model_decoder_layers_10_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[747] + model_decoder_layers_10_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[748] + model_decoder_layers_10_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[749] + model_decoder_layers_10_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[750] + model_decoder_layers_10_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[751] + model_decoder_layers_10_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[752] + model_decoder_layers_11_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[753] + model_decoder_layers_11_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[754] + model_decoder_layers_11_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[755] + model_decoder_layers_11_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[756] + model_decoder_layers_11_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[757] + model_decoder_layers_11_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[758] + model_decoder_layers_11_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[759] + model_decoder_layers_11_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[760] + model_decoder_layers_11_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[761] + model_decoder_layers_11_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[765] + model_decoder_layers_11_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[766] + model_decoder_layers_11_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[767] + model_decoder_layers_11_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[768] + model_decoder_layers_11_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[769] + model_decoder_layers_11_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[770] + model_decoder_layers_11_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[771] + model_decoder_layers_11_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[772] + model_decoder_layers_11_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[773] + model_decoder_layers_11_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[774] + model_decoder_layers_11_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[775] + model_decoder_layers_11_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[776] + model_decoder_layers_12_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[777] + model_decoder_layers_12_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[778] + model_decoder_layers_12_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[779] + model_decoder_layers_12_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[780] + model_decoder_layers_12_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[781] + model_decoder_layers_12_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[782] + model_decoder_layers_12_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[783] + model_decoder_layers_12_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[784] + model_decoder_layers_12_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[785] + model_decoder_layers_12_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[789] + model_decoder_layers_12_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[790] + model_decoder_layers_12_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[791] + model_decoder_layers_12_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[792] + model_decoder_layers_12_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[793] + model_decoder_layers_12_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[794] + model_decoder_layers_12_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[795] + model_decoder_layers_12_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[796] + model_decoder_layers_12_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[797] + model_decoder_layers_12_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[798] + model_decoder_layers_12_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[799] + model_decoder_layers_12_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[800] + model_decoder_layers_13_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[801] + model_decoder_layers_13_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[802] + model_decoder_layers_13_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[803] + model_decoder_layers_13_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[804] + model_decoder_layers_13_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[805] + model_decoder_layers_13_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[806] + model_decoder_layers_13_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[807] + model_decoder_layers_13_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[808] + model_decoder_layers_13_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[809] + model_decoder_layers_13_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[813] + model_decoder_layers_13_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[814] + model_decoder_layers_13_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[815] + model_decoder_layers_13_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[816] + model_decoder_layers_13_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[817] + model_decoder_layers_13_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[818] + model_decoder_layers_13_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[819] + model_decoder_layers_13_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[820] + model_decoder_layers_13_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[821] + model_decoder_layers_13_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[822] + model_decoder_layers_13_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[823] + model_decoder_layers_13_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[824] + model_decoder_layers_14_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[825] + model_decoder_layers_14_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[826] + model_decoder_layers_14_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[827] + model_decoder_layers_14_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[828] + model_decoder_layers_14_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[829] + model_decoder_layers_14_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[830] + model_decoder_layers_14_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[831] + model_decoder_layers_14_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[832] + model_decoder_layers_14_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[833] + model_decoder_layers_14_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[837] + model_decoder_layers_14_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[838] + model_decoder_layers_14_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[839] + model_decoder_layers_14_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[840] + model_decoder_layers_14_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[841] + model_decoder_layers_14_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[842] + model_decoder_layers_14_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[843] + model_decoder_layers_14_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[844] + model_decoder_layers_14_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[845] + model_decoder_layers_14_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[846] + model_decoder_layers_14_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[847] + model_decoder_layers_14_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[848] + model_decoder_layers_15_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[849] + model_decoder_layers_15_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[850] + model_decoder_layers_15_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[851] + model_decoder_layers_15_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[852] + model_decoder_layers_15_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[853] + model_decoder_layers_15_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[854] + model_decoder_layers_15_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[855] + model_decoder_layers_15_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[856] + model_decoder_layers_15_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[857] + model_decoder_layers_15_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[861] + model_decoder_layers_15_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[862] + model_decoder_layers_15_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[863] + model_decoder_layers_15_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[864] + model_decoder_layers_15_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[865] + model_decoder_layers_15_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[866] + model_decoder_layers_15_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[867] + model_decoder_layers_15_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[868] + model_decoder_layers_15_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[869] + model_decoder_layers_15_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[870] + model_decoder_layers_15_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[871] + model_decoder_layers_15_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[872] + model_decoder_layers_16_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[873] + model_decoder_layers_16_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[874] + model_decoder_layers_16_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[875] + model_decoder_layers_16_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[876] + model_decoder_layers_16_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[877] + model_decoder_layers_16_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[878] + model_decoder_layers_16_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[879] + model_decoder_layers_16_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[880] + model_decoder_layers_16_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[881] + model_decoder_layers_16_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[885] + model_decoder_layers_16_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[886] + model_decoder_layers_16_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[887] + model_decoder_layers_16_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[888] + model_decoder_layers_16_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[889] + model_decoder_layers_16_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[890] + model_decoder_layers_16_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[891] + model_decoder_layers_16_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[892] + model_decoder_layers_16_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[893] + model_decoder_layers_16_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[894] + model_decoder_layers_16_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[895] + model_decoder_layers_16_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[896] + model_decoder_layers_17_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[897] + model_decoder_layers_17_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[898] + model_decoder_layers_17_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[899] + model_decoder_layers_17_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[900] + model_decoder_layers_17_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[901] + model_decoder_layers_17_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[902] + model_decoder_layers_17_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[903] + model_decoder_layers_17_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[904] + model_decoder_layers_17_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[905] + model_decoder_layers_17_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[909] + model_decoder_layers_17_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[910] + model_decoder_layers_17_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[911] + model_decoder_layers_17_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[912] + model_decoder_layers_17_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[913] + model_decoder_layers_17_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[914] + model_decoder_layers_17_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[915] + model_decoder_layers_17_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[916] + model_decoder_layers_17_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[917] + model_decoder_layers_17_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[918] + model_decoder_layers_17_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[919] + model_decoder_layers_17_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[920] + model_decoder_layers_18_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[921] + model_decoder_layers_18_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[922] + model_decoder_layers_18_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[923] + model_decoder_layers_18_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[924] + model_decoder_layers_18_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[925] + model_decoder_layers_18_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[926] + model_decoder_layers_18_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[927] + model_decoder_layers_18_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[928] + model_decoder_layers_18_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[929] + model_decoder_layers_18_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[933] + model_decoder_layers_18_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[934] + model_decoder_layers_18_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[935] + model_decoder_layers_18_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[936] + model_decoder_layers_18_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[937] + model_decoder_layers_18_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[938] + model_decoder_layers_18_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[939] + model_decoder_layers_18_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[940] + model_decoder_layers_18_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[941] + model_decoder_layers_18_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[942] + model_decoder_layers_18_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[943] + model_decoder_layers_18_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[944] + model_decoder_layers_19_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[945] + model_decoder_layers_19_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[946] + model_decoder_layers_19_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[947] + model_decoder_layers_19_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[948] + model_decoder_layers_19_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[949] + model_decoder_layers_19_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[950] + model_decoder_layers_19_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[951] + model_decoder_layers_19_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[952] + model_decoder_layers_19_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[953] + model_decoder_layers_19_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[957] + model_decoder_layers_19_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[958] + model_decoder_layers_19_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[959] + model_decoder_layers_19_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[960] + model_decoder_layers_19_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[961] + model_decoder_layers_19_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[962] + model_decoder_layers_19_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[963] + model_decoder_layers_19_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[964] + model_decoder_layers_19_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[965] + model_decoder_layers_19_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[966] + model_decoder_layers_19_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[967] + model_decoder_layers_19_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[968] + model_decoder_layers_20_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[969] + model_decoder_layers_20_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[970] + model_decoder_layers_20_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[971] + model_decoder_layers_20_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[972] + model_decoder_layers_20_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[973] + model_decoder_layers_20_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[974] + model_decoder_layers_20_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[975] + model_decoder_layers_20_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[976] + model_decoder_layers_20_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[977] + model_decoder_layers_20_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[981] + model_decoder_layers_20_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[982] + model_decoder_layers_20_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[983] + model_decoder_layers_20_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[984] + model_decoder_layers_20_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[985] + model_decoder_layers_20_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[986] + model_decoder_layers_20_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[987] + model_decoder_layers_20_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[988] + model_decoder_layers_20_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[989] + model_decoder_layers_20_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[990] + model_decoder_layers_20_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[991] + model_decoder_layers_20_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[992] + model_decoder_layers_21_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[993] + model_decoder_layers_21_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[994] + model_decoder_layers_21_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[995] + model_decoder_layers_21_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[996] + model_decoder_layers_21_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[997] + model_decoder_layers_21_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[998] + model_decoder_layers_21_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[999] + model_decoder_layers_21_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1000] + model_decoder_layers_21_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1001] + model_decoder_layers_21_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1005] + model_decoder_layers_21_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1006] + model_decoder_layers_21_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1007] + model_decoder_layers_21_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1008] + model_decoder_layers_21_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1009] + model_decoder_layers_21_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1010] + model_decoder_layers_21_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1011] + model_decoder_layers_21_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1012] + model_decoder_layers_21_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1013] + model_decoder_layers_21_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1014] + model_decoder_layers_21_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1015] + model_decoder_layers_21_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1016] + model_decoder_layers_22_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1017] + model_decoder_layers_22_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1018] + model_decoder_layers_22_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1019] + model_decoder_layers_22_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1020] + model_decoder_layers_22_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1021] + model_decoder_layers_22_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1022] + model_decoder_layers_22_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1023] + model_decoder_layers_22_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1024] + model_decoder_layers_22_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1025] + model_decoder_layers_22_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1029] + model_decoder_layers_22_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1030] + model_decoder_layers_22_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1031] + model_decoder_layers_22_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1032] + model_decoder_layers_22_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1033] + model_decoder_layers_22_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1034] + model_decoder_layers_22_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1035] + model_decoder_layers_22_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1036] + model_decoder_layers_22_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1037] + model_decoder_layers_22_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1038] + model_decoder_layers_22_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1039] + model_decoder_layers_22_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1040] + model_decoder_layers_23_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1041] + model_decoder_layers_23_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1042] + model_decoder_layers_23_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1043] + model_decoder_layers_23_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1044] + model_decoder_layers_23_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1045] + model_decoder_layers_23_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1046] + model_decoder_layers_23_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1047] + model_decoder_layers_23_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1048] + model_decoder_layers_23_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1049] + model_decoder_layers_23_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1053] + model_decoder_layers_23_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1054] + model_decoder_layers_23_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1055] + model_decoder_layers_23_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1056] + model_decoder_layers_23_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1057] + model_decoder_layers_23_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1058] + model_decoder_layers_23_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1059] + model_decoder_layers_23_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1060] + model_decoder_layers_23_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1061] + model_decoder_layers_23_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1062] + model_decoder_layers_23_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1063] + model_decoder_layers_23_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1064] + model_decoder_layers_24_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1065] + model_decoder_layers_24_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1066] + model_decoder_layers_24_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1067] + model_decoder_layers_24_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1068] + model_decoder_layers_24_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1069] + model_decoder_layers_24_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1070] + model_decoder_layers_24_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1071] + model_decoder_layers_24_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1072] + model_decoder_layers_24_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1073] + model_decoder_layers_24_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1077] + model_decoder_layers_24_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1078] + model_decoder_layers_24_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1079] + model_decoder_layers_24_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1080] + model_decoder_layers_24_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1081] + model_decoder_layers_24_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1082] + model_decoder_layers_24_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1083] + model_decoder_layers_24_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1084] + model_decoder_layers_24_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1085] + model_decoder_layers_24_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1086] + model_decoder_layers_24_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1087] + model_decoder_layers_24_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1088] + model_decoder_layers_25_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1089] + model_decoder_layers_25_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1090] + model_decoder_layers_25_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1091] + model_decoder_layers_25_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1092] + model_decoder_layers_25_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1093] + model_decoder_layers_25_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1094] + model_decoder_layers_25_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1095] + model_decoder_layers_25_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1096] + model_decoder_layers_25_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1097] + model_decoder_layers_25_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1101] + model_decoder_layers_25_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1102] + model_decoder_layers_25_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1103] + model_decoder_layers_25_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1104] + model_decoder_layers_25_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1105] + model_decoder_layers_25_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1106] + model_decoder_layers_25_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1107] + model_decoder_layers_25_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1108] + model_decoder_layers_25_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1109] + model_decoder_layers_25_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1110] + model_decoder_layers_25_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1111] + model_decoder_layers_25_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1112] + model_decoder_layers_26_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1113] + model_decoder_layers_26_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1114] + model_decoder_layers_26_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1115] + model_decoder_layers_26_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1116] + model_decoder_layers_26_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1117] + model_decoder_layers_26_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1118] + model_decoder_layers_26_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1119] + model_decoder_layers_26_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1120] + model_decoder_layers_26_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1121] + model_decoder_layers_26_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1125] + model_decoder_layers_26_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1126] + model_decoder_layers_26_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1127] + model_decoder_layers_26_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1128] + model_decoder_layers_26_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1129] + model_decoder_layers_26_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1130] + model_decoder_layers_26_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1131] + model_decoder_layers_26_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1132] + model_decoder_layers_26_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1133] + model_decoder_layers_26_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1134] + model_decoder_layers_26_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1135] + model_decoder_layers_26_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1136] + model_decoder_layers_27_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1137] + model_decoder_layers_27_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1138] + model_decoder_layers_27_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1139] + model_decoder_layers_27_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1140] + model_decoder_layers_27_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1141] + model_decoder_layers_27_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1142] + model_decoder_layers_27_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1143] + model_decoder_layers_27_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1144] + model_decoder_layers_27_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1145] + model_decoder_layers_27_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1149] + model_decoder_layers_27_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1150] + model_decoder_layers_27_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1151] + model_decoder_layers_27_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1152] + model_decoder_layers_27_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1153] + model_decoder_layers_27_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1154] + model_decoder_layers_27_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1155] + model_decoder_layers_27_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1156] + model_decoder_layers_27_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1157] + model_decoder_layers_27_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1158] + model_decoder_layers_27_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1159] + model_decoder_layers_27_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1160] + model_decoder_layers_28_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1161] + model_decoder_layers_28_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1162] + model_decoder_layers_28_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1163] + model_decoder_layers_28_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1164] + model_decoder_layers_28_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1165] + model_decoder_layers_28_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1166] + model_decoder_layers_28_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1167] + model_decoder_layers_28_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1168] + model_decoder_layers_28_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1169] + model_decoder_layers_28_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1173] + model_decoder_layers_28_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1174] + model_decoder_layers_28_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1175] + model_decoder_layers_28_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1176] + model_decoder_layers_28_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1177] + model_decoder_layers_28_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1178] + model_decoder_layers_28_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1179] + model_decoder_layers_28_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1180] + model_decoder_layers_28_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1181] + model_decoder_layers_28_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1182] + model_decoder_layers_28_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1183] + model_decoder_layers_28_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1184] + model_decoder_layers_29_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1185] + model_decoder_layers_29_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1186] + model_decoder_layers_29_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1187] + model_decoder_layers_29_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1188] + model_decoder_layers_29_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1189] + model_decoder_layers_29_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1190] + model_decoder_layers_29_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1191] + model_decoder_layers_29_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1192] + model_decoder_layers_29_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1193] + model_decoder_layers_29_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1197] + model_decoder_layers_29_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1198] + model_decoder_layers_29_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1199] + model_decoder_layers_29_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1200] + model_decoder_layers_29_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1201] + model_decoder_layers_29_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1202] + model_decoder_layers_29_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1203] + model_decoder_layers_29_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1204] + model_decoder_layers_29_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1205] + model_decoder_layers_29_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1206] + model_decoder_layers_29_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1207] + model_decoder_layers_29_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1208] + model_decoder_layers_30_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1209] + model_decoder_layers_30_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1210] + model_decoder_layers_30_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1211] + model_decoder_layers_30_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1212] + model_decoder_layers_30_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1213] + model_decoder_layers_30_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1214] + model_decoder_layers_30_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1215] + model_decoder_layers_30_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1216] + model_decoder_layers_30_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1217] + model_decoder_layers_30_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1221] + model_decoder_layers_30_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1222] + model_decoder_layers_30_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1223] + model_decoder_layers_30_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1224] + model_decoder_layers_30_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1225] + model_decoder_layers_30_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1226] + model_decoder_layers_30_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1227] + model_decoder_layers_30_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1228] + model_decoder_layers_30_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1229] + model_decoder_layers_30_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1230] + model_decoder_layers_30_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1231] + model_decoder_layers_30_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1232] + model_decoder_layers_31_self_attn_k_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1233] + model_decoder_layers_31_self_attn_v_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1234] + model_decoder_layers_31_self_attn_v_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1235] + model_decoder_layers_31_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1236] + model_decoder_layers_31_self_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1237] + model_decoder_layers_31_self_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1238] + model_decoder_layers_31_self_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1239] + model_decoder_layers_31_self_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1240] + model_decoder_layers_31_self_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1241] + model_decoder_layers_31_encoder_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1245] + model_decoder_layers_31_encoder_attn_q_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1246] + model_decoder_layers_31_encoder_attn_out_proj_weight5: R.Tensor((1280, 1280), dtype="float16") = packed_params[1247] + model_decoder_layers_31_encoder_attn_out_proj_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1248] + model_decoder_layers_31_encoder_attn_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1249] + model_decoder_layers_31_encoder_attn_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1250] + model_decoder_layers_31_fc1_weight5: R.Tensor((5120, 1280), dtype="float16") = packed_params[1251] + model_decoder_layers_31_fc1_bias5: R.Tensor((5120,), dtype="float16") = packed_params[1252] + model_decoder_layers_31_fc2_weight5: R.Tensor((1280, 5120), dtype="float16") = packed_params[1253] + model_decoder_layers_31_fc2_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1254] + model_decoder_layers_31_final_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1255] + model_decoder_layers_31_final_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1256] + model_decoder_layer_norm_weight5: R.Tensor((1280,), dtype="float16") = packed_params[1257] + model_decoder_layer_norm_bias5: R.Tensor((1280,), dtype="float16") = packed_params[1258] + reshape1353: R.Tensor((1,), dtype="int32") = R.reshape(input_ids, R.shape([1])) + take7: R.Tensor((1, 1280), dtype="float16") = R.take(model_decoder_embed_tokens_weight5, reshape1353, axis=0) + reshape1354: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(take7, R.shape([1, 1, 1280])) + lv264: R.Tensor((1,), dtype="int32") = R.call_pure_packed("vm.builtin.attention_kv_cache_get_query_positions", paged_kv_cache, sinfo_args=(R.Tensor((1,), dtype="int32"),)) + take8: R.Tensor((1, 1280), dtype="float16") = R.take(model_decoder_embed_positions_weight5, lv264, axis=0) + reshape1355: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(take8, R.shape([1, 1, 1280])) + add1220: R.Tensor((1, 1, 1280), dtype="float16") = R.add(reshape1354, reshape1355) + layer_norm356: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1220, model_decoder_layers_0_self_attn_layer_norm_weight5, model_decoder_layers_0_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv = R.call_tir(cls.NT_matmul, (layer_norm356, model_decoder_layers_0_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1221: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv, model_decoder_layers_0_self_attn_q_proj_bias5) + reshape1356: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1221, R.shape([1, 1, 20, 64])) + lv1 = R.call_tir(cls.NT_matmul, (layer_norm356, model_decoder_layers_0_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1357: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv1, R.shape([1, 1, 20, 64])) + lv2 = R.call_tir(cls.NT_matmul, (layer_norm356, model_decoder_layers_0_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1222: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv2, model_decoder_layers_0_self_attn_v_proj_bias5) + reshape1358: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1222, R.shape([1, 1, 20, 64])) + concat96: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1356, reshape1357, reshape1358), axis=2) + reshape1359: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat96, R.shape([1, 60, 64])) + lv265 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape1359), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1360: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv265, R.shape([1, 1, 20, 64])) + reshape1361: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1360, R.shape([1, 1, 1280])) + lv3 = R.call_tir(cls.NT_matmul, (reshape1361, model_decoder_layers_0_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1223: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv3, model_decoder_layers_0_self_attn_out_proj_bias5) + add1224: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1220, add1223) + layer_norm357: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1224, model_decoder_layers_0_encoder_attn_layer_norm_weight5, model_decoder_layers_0_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv4 = R.call_tir(cls.NT_matmul, (layer_norm357, model_decoder_layers_0_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1225: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv4, model_decoder_layers_0_encoder_attn_q_proj_bias5) + reshape1362: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1225, R.shape([1, 1, 20, 64])) + reshape1363: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1362, R.shape([1, 20, 64])) + lv266 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape1363), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1364: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv266, R.shape([1, 1, 20, 64])) + reshape1365: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1364, R.shape([1, 1, 1280])) + lv5 = R.call_tir(cls.NT_matmul, (reshape1365, model_decoder_layers_0_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1226: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv5, model_decoder_layers_0_encoder_attn_out_proj_bias5) + add1227: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1224, add1226) + layer_norm358: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1227, model_decoder_layers_0_final_layer_norm_weight5, model_decoder_layers_0_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv6 = R.call_tir(cls.NT_matmul1, (layer_norm358, model_decoder_layers_0_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1228: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv6, model_decoder_layers_0_fc1_bias5) + gelu130: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1228) + lv7 = R.call_tir(cls.NT_matmul2, (gelu130, model_decoder_layers_0_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1229: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv7, model_decoder_layers_0_fc2_bias5) + add1230: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1227, add1229) + layer_norm359: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1230, model_decoder_layers_1_self_attn_layer_norm_weight5, model_decoder_layers_1_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv8 = R.call_tir(cls.NT_matmul, (layer_norm359, model_decoder_layers_1_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1231: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv8, model_decoder_layers_1_self_attn_q_proj_bias5) + reshape1366: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1231, R.shape([1, 1, 20, 64])) + lv9 = R.call_tir(cls.NT_matmul, (layer_norm359, model_decoder_layers_1_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1367: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv9, R.shape([1, 1, 20, 64])) + lv10 = R.call_tir(cls.NT_matmul, (layer_norm359, model_decoder_layers_1_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1232: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv10, model_decoder_layers_1_self_attn_v_proj_bias5) + reshape1368: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1232, R.shape([1, 1, 20, 64])) + concat97: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1366, reshape1367, reshape1368), axis=2) + reshape1369: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat97, R.shape([1, 60, 64])) + lv267 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape1369), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1370: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv267, R.shape([1, 1, 20, 64])) + reshape1371: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1370, R.shape([1, 1, 1280])) + lv11 = R.call_tir(cls.NT_matmul, (reshape1371, model_decoder_layers_1_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1233: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv11, model_decoder_layers_1_self_attn_out_proj_bias5) + add1234: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1230, add1233) + layer_norm360: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1234, model_decoder_layers_1_encoder_attn_layer_norm_weight5, model_decoder_layers_1_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv12 = R.call_tir(cls.NT_matmul, (layer_norm360, model_decoder_layers_1_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1235: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv12, model_decoder_layers_1_encoder_attn_q_proj_bias5) + reshape1372: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1235, R.shape([1, 1, 20, 64])) + reshape1373: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1372, R.shape([1, 20, 64])) + lv268 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape1373), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1374: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv268, R.shape([1, 1, 20, 64])) + reshape1375: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1374, R.shape([1, 1, 1280])) + lv13 = R.call_tir(cls.NT_matmul, (reshape1375, model_decoder_layers_1_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1236: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv13, model_decoder_layers_1_encoder_attn_out_proj_bias5) + add1237: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1234, add1236) + layer_norm361: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1237, model_decoder_layers_1_final_layer_norm_weight5, model_decoder_layers_1_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv14 = R.call_tir(cls.NT_matmul1, (layer_norm361, model_decoder_layers_1_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1238: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv14, model_decoder_layers_1_fc1_bias5) + gelu131: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1238) + lv15 = R.call_tir(cls.NT_matmul2, (gelu131, model_decoder_layers_1_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1239: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv15, model_decoder_layers_1_fc2_bias5) + add1240: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1237, add1239) + layer_norm362: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1240, model_decoder_layers_2_self_attn_layer_norm_weight5, model_decoder_layers_2_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv16 = R.call_tir(cls.NT_matmul, (layer_norm362, model_decoder_layers_2_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1241: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv16, model_decoder_layers_2_self_attn_q_proj_bias5) + reshape1376: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1241, R.shape([1, 1, 20, 64])) + lv17 = R.call_tir(cls.NT_matmul, (layer_norm362, model_decoder_layers_2_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1377: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv17, R.shape([1, 1, 20, 64])) + lv18 = R.call_tir(cls.NT_matmul, (layer_norm362, model_decoder_layers_2_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1242: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv18, model_decoder_layers_2_self_attn_v_proj_bias5) + reshape1378: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1242, R.shape([1, 1, 20, 64])) + concat98: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1376, reshape1377, reshape1378), axis=2) + reshape1379: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat98, R.shape([1, 60, 64])) + lv269 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape1379), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1380: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv269, R.shape([1, 1, 20, 64])) + reshape1381: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1380, R.shape([1, 1, 1280])) + lv19 = R.call_tir(cls.NT_matmul, (reshape1381, model_decoder_layers_2_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1243: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv19, model_decoder_layers_2_self_attn_out_proj_bias5) + add1244: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1240, add1243) + layer_norm363: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1244, model_decoder_layers_2_encoder_attn_layer_norm_weight5, model_decoder_layers_2_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv20 = R.call_tir(cls.NT_matmul, (layer_norm363, model_decoder_layers_2_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1245: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv20, model_decoder_layers_2_encoder_attn_q_proj_bias5) + reshape1382: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1245, R.shape([1, 1, 20, 64])) + reshape1383: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1382, R.shape([1, 20, 64])) + lv270 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape1383), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1384: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv270, R.shape([1, 1, 20, 64])) + reshape1385: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1384, R.shape([1, 1, 1280])) + lv21 = R.call_tir(cls.NT_matmul, (reshape1385, model_decoder_layers_2_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1246: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv21, model_decoder_layers_2_encoder_attn_out_proj_bias5) + add1247: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1244, add1246) + layer_norm364: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1247, model_decoder_layers_2_final_layer_norm_weight5, model_decoder_layers_2_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv22 = R.call_tir(cls.NT_matmul1, (layer_norm364, model_decoder_layers_2_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1248: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv22, model_decoder_layers_2_fc1_bias5) + gelu132: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1248) + lv23 = R.call_tir(cls.NT_matmul2, (gelu132, model_decoder_layers_2_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1249: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv23, model_decoder_layers_2_fc2_bias5) + add1250: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1247, add1249) + layer_norm365: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1250, model_decoder_layers_3_self_attn_layer_norm_weight5, model_decoder_layers_3_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv24 = R.call_tir(cls.NT_matmul, (layer_norm365, model_decoder_layers_3_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1251: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv24, model_decoder_layers_3_self_attn_q_proj_bias5) + reshape1386: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1251, R.shape([1, 1, 20, 64])) + lv25 = R.call_tir(cls.NT_matmul, (layer_norm365, model_decoder_layers_3_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1387: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv25, R.shape([1, 1, 20, 64])) + lv26 = R.call_tir(cls.NT_matmul, (layer_norm365, model_decoder_layers_3_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1252: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv26, model_decoder_layers_3_self_attn_v_proj_bias5) + reshape1388: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1252, R.shape([1, 1, 20, 64])) + concat99: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1386, reshape1387, reshape1388), axis=2) + reshape1389: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat99, R.shape([1, 60, 64])) + lv271 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape1389), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1390: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv271, R.shape([1, 1, 20, 64])) + reshape1391: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1390, R.shape([1, 1, 1280])) + lv27 = R.call_tir(cls.NT_matmul, (reshape1391, model_decoder_layers_3_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1253: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv27, model_decoder_layers_3_self_attn_out_proj_bias5) + add1254: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1250, add1253) + layer_norm366: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1254, model_decoder_layers_3_encoder_attn_layer_norm_weight5, model_decoder_layers_3_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv28 = R.call_tir(cls.NT_matmul, (layer_norm366, model_decoder_layers_3_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1255: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv28, model_decoder_layers_3_encoder_attn_q_proj_bias5) + reshape1392: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1255, R.shape([1, 1, 20, 64])) + reshape1393: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1392, R.shape([1, 20, 64])) + lv272 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape1393), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1394: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv272, R.shape([1, 1, 20, 64])) + reshape1395: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1394, R.shape([1, 1, 1280])) + lv29 = R.call_tir(cls.NT_matmul, (reshape1395, model_decoder_layers_3_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1256: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv29, model_decoder_layers_3_encoder_attn_out_proj_bias5) + add1257: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1254, add1256) + layer_norm367: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1257, model_decoder_layers_3_final_layer_norm_weight5, model_decoder_layers_3_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv30 = R.call_tir(cls.NT_matmul1, (layer_norm367, model_decoder_layers_3_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1258: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv30, model_decoder_layers_3_fc1_bias5) + gelu133: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1258) + lv31 = R.call_tir(cls.NT_matmul2, (gelu133, model_decoder_layers_3_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1259: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv31, model_decoder_layers_3_fc2_bias5) + add1260: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1257, add1259) + layer_norm368: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1260, model_decoder_layers_4_self_attn_layer_norm_weight5, model_decoder_layers_4_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv32 = R.call_tir(cls.NT_matmul, (layer_norm368, model_decoder_layers_4_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1261: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv32, model_decoder_layers_4_self_attn_q_proj_bias5) + reshape1396: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1261, R.shape([1, 1, 20, 64])) + lv33 = R.call_tir(cls.NT_matmul, (layer_norm368, model_decoder_layers_4_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1397: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv33, R.shape([1, 1, 20, 64])) + lv34 = R.call_tir(cls.NT_matmul, (layer_norm368, model_decoder_layers_4_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1262: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv34, model_decoder_layers_4_self_attn_v_proj_bias5) + reshape1398: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1262, R.shape([1, 1, 20, 64])) + concat100: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1396, reshape1397, reshape1398), axis=2) + reshape1399: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat100, R.shape([1, 60, 64])) + lv273 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape1399), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1400: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv273, R.shape([1, 1, 20, 64])) + reshape1401: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1400, R.shape([1, 1, 1280])) + lv35 = R.call_tir(cls.NT_matmul, (reshape1401, model_decoder_layers_4_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1263: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv35, model_decoder_layers_4_self_attn_out_proj_bias5) + add1264: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1260, add1263) + layer_norm369: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1264, model_decoder_layers_4_encoder_attn_layer_norm_weight5, model_decoder_layers_4_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv36 = R.call_tir(cls.NT_matmul, (layer_norm369, model_decoder_layers_4_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1265: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv36, model_decoder_layers_4_encoder_attn_q_proj_bias5) + reshape1402: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1265, R.shape([1, 1, 20, 64])) + reshape1403: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1402, R.shape([1, 20, 64])) + lv274 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape1403), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1404: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv274, R.shape([1, 1, 20, 64])) + reshape1405: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1404, R.shape([1, 1, 1280])) + lv37 = R.call_tir(cls.NT_matmul, (reshape1405, model_decoder_layers_4_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1266: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv37, model_decoder_layers_4_encoder_attn_out_proj_bias5) + add1267: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1264, add1266) + layer_norm370: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1267, model_decoder_layers_4_final_layer_norm_weight5, model_decoder_layers_4_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv38 = R.call_tir(cls.NT_matmul1, (layer_norm370, model_decoder_layers_4_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1268: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv38, model_decoder_layers_4_fc1_bias5) + gelu134: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1268) + lv39 = R.call_tir(cls.NT_matmul2, (gelu134, model_decoder_layers_4_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1269: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv39, model_decoder_layers_4_fc2_bias5) + add1270: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1267, add1269) + layer_norm371: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1270, model_decoder_layers_5_self_attn_layer_norm_weight5, model_decoder_layers_5_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv40 = R.call_tir(cls.NT_matmul, (layer_norm371, model_decoder_layers_5_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1271: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv40, model_decoder_layers_5_self_attn_q_proj_bias5) + reshape1406: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1271, R.shape([1, 1, 20, 64])) + lv41 = R.call_tir(cls.NT_matmul, (layer_norm371, model_decoder_layers_5_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1407: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv41, R.shape([1, 1, 20, 64])) + lv42 = R.call_tir(cls.NT_matmul, (layer_norm371, model_decoder_layers_5_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1272: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv42, model_decoder_layers_5_self_attn_v_proj_bias5) + reshape1408: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1272, R.shape([1, 1, 20, 64])) + concat101: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1406, reshape1407, reshape1408), axis=2) + reshape1409: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat101, R.shape([1, 60, 64])) + lv275 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape1409), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1410: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv275, R.shape([1, 1, 20, 64])) + reshape1411: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1410, R.shape([1, 1, 1280])) + lv43 = R.call_tir(cls.NT_matmul, (reshape1411, model_decoder_layers_5_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1273: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv43, model_decoder_layers_5_self_attn_out_proj_bias5) + add1274: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1270, add1273) + layer_norm372: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1274, model_decoder_layers_5_encoder_attn_layer_norm_weight5, model_decoder_layers_5_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv44 = R.call_tir(cls.NT_matmul, (layer_norm372, model_decoder_layers_5_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1275: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv44, model_decoder_layers_5_encoder_attn_q_proj_bias5) + reshape1412: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1275, R.shape([1, 1, 20, 64])) + reshape1413: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1412, R.shape([1, 20, 64])) + lv276 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape1413), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1414: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv276, R.shape([1, 1, 20, 64])) + reshape1415: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1414, R.shape([1, 1, 1280])) + lv45 = R.call_tir(cls.NT_matmul, (reshape1415, model_decoder_layers_5_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1276: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv45, model_decoder_layers_5_encoder_attn_out_proj_bias5) + add1277: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1274, add1276) + layer_norm373: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1277, model_decoder_layers_5_final_layer_norm_weight5, model_decoder_layers_5_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv46 = R.call_tir(cls.NT_matmul1, (layer_norm373, model_decoder_layers_5_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1278: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv46, model_decoder_layers_5_fc1_bias5) + gelu135: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1278) + lv47 = R.call_tir(cls.NT_matmul2, (gelu135, model_decoder_layers_5_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1279: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv47, model_decoder_layers_5_fc2_bias5) + add1280: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1277, add1279) + layer_norm374: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1280, model_decoder_layers_6_self_attn_layer_norm_weight5, model_decoder_layers_6_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv48 = R.call_tir(cls.NT_matmul, (layer_norm374, model_decoder_layers_6_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1281: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv48, model_decoder_layers_6_self_attn_q_proj_bias5) + reshape1416: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1281, R.shape([1, 1, 20, 64])) + lv49 = R.call_tir(cls.NT_matmul, (layer_norm374, model_decoder_layers_6_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1417: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv49, R.shape([1, 1, 20, 64])) + lv50 = R.call_tir(cls.NT_matmul, (layer_norm374, model_decoder_layers_6_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1282: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv50, model_decoder_layers_6_self_attn_v_proj_bias5) + reshape1418: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1282, R.shape([1, 1, 20, 64])) + concat102: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1416, reshape1417, reshape1418), axis=2) + reshape1419: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat102, R.shape([1, 60, 64])) + lv277 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape1419), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1420: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv277, R.shape([1, 1, 20, 64])) + reshape1421: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1420, R.shape([1, 1, 1280])) + lv51 = R.call_tir(cls.NT_matmul, (reshape1421, model_decoder_layers_6_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1283: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv51, model_decoder_layers_6_self_attn_out_proj_bias5) + add1284: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1280, add1283) + layer_norm375: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1284, model_decoder_layers_6_encoder_attn_layer_norm_weight5, model_decoder_layers_6_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv52 = R.call_tir(cls.NT_matmul, (layer_norm375, model_decoder_layers_6_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1285: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv52, model_decoder_layers_6_encoder_attn_q_proj_bias5) + reshape1422: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1285, R.shape([1, 1, 20, 64])) + reshape1423: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1422, R.shape([1, 20, 64])) + lv278 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape1423), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1424: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv278, R.shape([1, 1, 20, 64])) + reshape1425: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1424, R.shape([1, 1, 1280])) + lv53 = R.call_tir(cls.NT_matmul, (reshape1425, model_decoder_layers_6_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1286: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv53, model_decoder_layers_6_encoder_attn_out_proj_bias5) + add1287: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1284, add1286) + layer_norm376: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1287, model_decoder_layers_6_final_layer_norm_weight5, model_decoder_layers_6_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv54 = R.call_tir(cls.NT_matmul1, (layer_norm376, model_decoder_layers_6_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1288: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv54, model_decoder_layers_6_fc1_bias5) + gelu136: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1288) + lv55 = R.call_tir(cls.NT_matmul2, (gelu136, model_decoder_layers_6_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1289: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv55, model_decoder_layers_6_fc2_bias5) + add1290: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1287, add1289) + layer_norm377: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1290, model_decoder_layers_7_self_attn_layer_norm_weight5, model_decoder_layers_7_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv56 = R.call_tir(cls.NT_matmul, (layer_norm377, model_decoder_layers_7_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1291: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv56, model_decoder_layers_7_self_attn_q_proj_bias5) + reshape1426: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1291, R.shape([1, 1, 20, 64])) + lv57 = R.call_tir(cls.NT_matmul, (layer_norm377, model_decoder_layers_7_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1427: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv57, R.shape([1, 1, 20, 64])) + lv58 = R.call_tir(cls.NT_matmul, (layer_norm377, model_decoder_layers_7_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1292: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv58, model_decoder_layers_7_self_attn_v_proj_bias5) + reshape1428: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1292, R.shape([1, 1, 20, 64])) + concat103: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1426, reshape1427, reshape1428), axis=2) + reshape1429: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat103, R.shape([1, 60, 64])) + lv279 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape1429), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1430: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv279, R.shape([1, 1, 20, 64])) + reshape1431: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1430, R.shape([1, 1, 1280])) + lv59 = R.call_tir(cls.NT_matmul, (reshape1431, model_decoder_layers_7_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1293: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv59, model_decoder_layers_7_self_attn_out_proj_bias5) + add1294: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1290, add1293) + layer_norm378: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1294, model_decoder_layers_7_encoder_attn_layer_norm_weight5, model_decoder_layers_7_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv60 = R.call_tir(cls.NT_matmul, (layer_norm378, model_decoder_layers_7_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1295: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv60, model_decoder_layers_7_encoder_attn_q_proj_bias5) + reshape1432: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1295, R.shape([1, 1, 20, 64])) + reshape1433: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1432, R.shape([1, 20, 64])) + lv280 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape1433), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1434: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv280, R.shape([1, 1, 20, 64])) + reshape1435: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1434, R.shape([1, 1, 1280])) + lv61 = R.call_tir(cls.NT_matmul, (reshape1435, model_decoder_layers_7_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1296: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv61, model_decoder_layers_7_encoder_attn_out_proj_bias5) + add1297: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1294, add1296) + layer_norm379: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1297, model_decoder_layers_7_final_layer_norm_weight5, model_decoder_layers_7_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv62 = R.call_tir(cls.NT_matmul1, (layer_norm379, model_decoder_layers_7_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1298: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv62, model_decoder_layers_7_fc1_bias5) + gelu137: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1298) + lv63 = R.call_tir(cls.NT_matmul2, (gelu137, model_decoder_layers_7_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1299: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv63, model_decoder_layers_7_fc2_bias5) + add1300: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1297, add1299) + layer_norm380: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1300, model_decoder_layers_8_self_attn_layer_norm_weight5, model_decoder_layers_8_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv64 = R.call_tir(cls.NT_matmul, (layer_norm380, model_decoder_layers_8_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1301: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv64, model_decoder_layers_8_self_attn_q_proj_bias5) + reshape1436: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1301, R.shape([1, 1, 20, 64])) + lv65 = R.call_tir(cls.NT_matmul, (layer_norm380, model_decoder_layers_8_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1437: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv65, R.shape([1, 1, 20, 64])) + lv66 = R.call_tir(cls.NT_matmul, (layer_norm380, model_decoder_layers_8_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1302: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv66, model_decoder_layers_8_self_attn_v_proj_bias5) + reshape1438: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1302, R.shape([1, 1, 20, 64])) + concat104: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1436, reshape1437, reshape1438), axis=2) + reshape1439: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat104, R.shape([1, 60, 64])) + lv281 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape1439), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1440: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv281, R.shape([1, 1, 20, 64])) + reshape1441: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1440, R.shape([1, 1, 1280])) + lv67 = R.call_tir(cls.NT_matmul, (reshape1441, model_decoder_layers_8_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1303: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv67, model_decoder_layers_8_self_attn_out_proj_bias5) + add1304: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1300, add1303) + layer_norm381: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1304, model_decoder_layers_8_encoder_attn_layer_norm_weight5, model_decoder_layers_8_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv68 = R.call_tir(cls.NT_matmul, (layer_norm381, model_decoder_layers_8_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1305: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv68, model_decoder_layers_8_encoder_attn_q_proj_bias5) + reshape1442: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1305, R.shape([1, 1, 20, 64])) + reshape1443: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1442, R.shape([1, 20, 64])) + lv282 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape1443), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1444: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv282, R.shape([1, 1, 20, 64])) + reshape1445: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1444, R.shape([1, 1, 1280])) + lv69 = R.call_tir(cls.NT_matmul, (reshape1445, model_decoder_layers_8_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1306: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv69, model_decoder_layers_8_encoder_attn_out_proj_bias5) + add1307: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1304, add1306) + layer_norm382: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1307, model_decoder_layers_8_final_layer_norm_weight5, model_decoder_layers_8_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv70 = R.call_tir(cls.NT_matmul1, (layer_norm382, model_decoder_layers_8_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1308: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv70, model_decoder_layers_8_fc1_bias5) + gelu138: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1308) + lv71 = R.call_tir(cls.NT_matmul2, (gelu138, model_decoder_layers_8_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1309: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv71, model_decoder_layers_8_fc2_bias5) + add1310: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1307, add1309) + layer_norm383: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1310, model_decoder_layers_9_self_attn_layer_norm_weight5, model_decoder_layers_9_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv72 = R.call_tir(cls.NT_matmul, (layer_norm383, model_decoder_layers_9_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1311: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv72, model_decoder_layers_9_self_attn_q_proj_bias5) + reshape1446: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1311, R.shape([1, 1, 20, 64])) + lv73 = R.call_tir(cls.NT_matmul, (layer_norm383, model_decoder_layers_9_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1447: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv73, R.shape([1, 1, 20, 64])) + lv74 = R.call_tir(cls.NT_matmul, (layer_norm383, model_decoder_layers_9_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1312: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv74, model_decoder_layers_9_self_attn_v_proj_bias5) + reshape1448: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1312, R.shape([1, 1, 20, 64])) + concat105: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1446, reshape1447, reshape1448), axis=2) + reshape1449: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat105, R.shape([1, 60, 64])) + lv283 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape1449), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1450: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv283, R.shape([1, 1, 20, 64])) + reshape1451: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1450, R.shape([1, 1, 1280])) + lv75 = R.call_tir(cls.NT_matmul, (reshape1451, model_decoder_layers_9_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1313: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv75, model_decoder_layers_9_self_attn_out_proj_bias5) + add1314: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1310, add1313) + layer_norm384: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1314, model_decoder_layers_9_encoder_attn_layer_norm_weight5, model_decoder_layers_9_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv76 = R.call_tir(cls.NT_matmul, (layer_norm384, model_decoder_layers_9_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1315: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv76, model_decoder_layers_9_encoder_attn_q_proj_bias5) + reshape1452: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1315, R.shape([1, 1, 20, 64])) + reshape1453: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1452, R.shape([1, 20, 64])) + lv284 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape1453), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1454: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv284, R.shape([1, 1, 20, 64])) + reshape1455: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1454, R.shape([1, 1, 1280])) + lv77 = R.call_tir(cls.NT_matmul, (reshape1455, model_decoder_layers_9_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1316: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv77, model_decoder_layers_9_encoder_attn_out_proj_bias5) + add1317: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1314, add1316) + layer_norm385: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1317, model_decoder_layers_9_final_layer_norm_weight5, model_decoder_layers_9_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv78 = R.call_tir(cls.NT_matmul1, (layer_norm385, model_decoder_layers_9_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1318: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv78, model_decoder_layers_9_fc1_bias5) + gelu139: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1318) + lv79 = R.call_tir(cls.NT_matmul2, (gelu139, model_decoder_layers_9_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1319: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv79, model_decoder_layers_9_fc2_bias5) + add1320: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1317, add1319) + layer_norm386: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1320, model_decoder_layers_10_self_attn_layer_norm_weight5, model_decoder_layers_10_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv80 = R.call_tir(cls.NT_matmul, (layer_norm386, model_decoder_layers_10_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1321: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv80, model_decoder_layers_10_self_attn_q_proj_bias5) + reshape1456: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1321, R.shape([1, 1, 20, 64])) + lv81 = R.call_tir(cls.NT_matmul, (layer_norm386, model_decoder_layers_10_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1457: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv81, R.shape([1, 1, 20, 64])) + lv82 = R.call_tir(cls.NT_matmul, (layer_norm386, model_decoder_layers_10_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1322: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv82, model_decoder_layers_10_self_attn_v_proj_bias5) + reshape1458: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1322, R.shape([1, 1, 20, 64])) + concat106: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1456, reshape1457, reshape1458), axis=2) + reshape1459: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat106, R.shape([1, 60, 64])) + lv285 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape1459), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1460: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv285, R.shape([1, 1, 20, 64])) + reshape1461: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1460, R.shape([1, 1, 1280])) + lv83 = R.call_tir(cls.NT_matmul, (reshape1461, model_decoder_layers_10_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1323: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv83, model_decoder_layers_10_self_attn_out_proj_bias5) + add1324: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1320, add1323) + layer_norm387: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1324, model_decoder_layers_10_encoder_attn_layer_norm_weight5, model_decoder_layers_10_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv84 = R.call_tir(cls.NT_matmul, (layer_norm387, model_decoder_layers_10_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1325: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv84, model_decoder_layers_10_encoder_attn_q_proj_bias5) + reshape1462: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1325, R.shape([1, 1, 20, 64])) + reshape1463: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1462, R.shape([1, 20, 64])) + lv286 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape1463), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1464: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv286, R.shape([1, 1, 20, 64])) + reshape1465: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1464, R.shape([1, 1, 1280])) + lv85 = R.call_tir(cls.NT_matmul, (reshape1465, model_decoder_layers_10_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1326: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv85, model_decoder_layers_10_encoder_attn_out_proj_bias5) + add1327: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1324, add1326) + layer_norm388: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1327, model_decoder_layers_10_final_layer_norm_weight5, model_decoder_layers_10_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv86 = R.call_tir(cls.NT_matmul1, (layer_norm388, model_decoder_layers_10_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1328: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv86, model_decoder_layers_10_fc1_bias5) + gelu140: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1328) + lv87 = R.call_tir(cls.NT_matmul2, (gelu140, model_decoder_layers_10_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1329: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv87, model_decoder_layers_10_fc2_bias5) + add1330: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1327, add1329) + layer_norm389: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1330, model_decoder_layers_11_self_attn_layer_norm_weight5, model_decoder_layers_11_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv88 = R.call_tir(cls.NT_matmul, (layer_norm389, model_decoder_layers_11_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1331: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv88, model_decoder_layers_11_self_attn_q_proj_bias5) + reshape1466: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1331, R.shape([1, 1, 20, 64])) + lv89 = R.call_tir(cls.NT_matmul, (layer_norm389, model_decoder_layers_11_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1467: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv89, R.shape([1, 1, 20, 64])) + lv90 = R.call_tir(cls.NT_matmul, (layer_norm389, model_decoder_layers_11_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1332: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv90, model_decoder_layers_11_self_attn_v_proj_bias5) + reshape1468: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1332, R.shape([1, 1, 20, 64])) + concat107: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1466, reshape1467, reshape1468), axis=2) + reshape1469: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat107, R.shape([1, 60, 64])) + lv287 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape1469), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1470: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv287, R.shape([1, 1, 20, 64])) + reshape1471: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1470, R.shape([1, 1, 1280])) + lv91 = R.call_tir(cls.NT_matmul, (reshape1471, model_decoder_layers_11_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1333: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv91, model_decoder_layers_11_self_attn_out_proj_bias5) + add1334: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1330, add1333) + layer_norm390: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1334, model_decoder_layers_11_encoder_attn_layer_norm_weight5, model_decoder_layers_11_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv92 = R.call_tir(cls.NT_matmul, (layer_norm390, model_decoder_layers_11_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1335: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv92, model_decoder_layers_11_encoder_attn_q_proj_bias5) + reshape1472: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1335, R.shape([1, 1, 20, 64])) + reshape1473: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1472, R.shape([1, 20, 64])) + lv288 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape1473), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1474: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv288, R.shape([1, 1, 20, 64])) + reshape1475: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1474, R.shape([1, 1, 1280])) + lv93 = R.call_tir(cls.NT_matmul, (reshape1475, model_decoder_layers_11_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1336: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv93, model_decoder_layers_11_encoder_attn_out_proj_bias5) + add1337: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1334, add1336) + layer_norm391: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1337, model_decoder_layers_11_final_layer_norm_weight5, model_decoder_layers_11_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv94 = R.call_tir(cls.NT_matmul1, (layer_norm391, model_decoder_layers_11_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1338: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv94, model_decoder_layers_11_fc1_bias5) + gelu141: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1338) + lv95 = R.call_tir(cls.NT_matmul2, (gelu141, model_decoder_layers_11_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1339: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv95, model_decoder_layers_11_fc2_bias5) + add1340: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1337, add1339) + layer_norm392: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1340, model_decoder_layers_12_self_attn_layer_norm_weight5, model_decoder_layers_12_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv96 = R.call_tir(cls.NT_matmul, (layer_norm392, model_decoder_layers_12_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1341: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv96, model_decoder_layers_12_self_attn_q_proj_bias5) + reshape1476: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1341, R.shape([1, 1, 20, 64])) + lv97 = R.call_tir(cls.NT_matmul, (layer_norm392, model_decoder_layers_12_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1477: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv97, R.shape([1, 1, 20, 64])) + lv98 = R.call_tir(cls.NT_matmul, (layer_norm392, model_decoder_layers_12_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1342: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv98, model_decoder_layers_12_self_attn_v_proj_bias5) + reshape1478: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1342, R.shape([1, 1, 20, 64])) + concat108: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1476, reshape1477, reshape1478), axis=2) + reshape1479: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat108, R.shape([1, 60, 64])) + lv289 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape1479), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1480: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv289, R.shape([1, 1, 20, 64])) + reshape1481: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1480, R.shape([1, 1, 1280])) + lv99 = R.call_tir(cls.NT_matmul, (reshape1481, model_decoder_layers_12_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1343: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv99, model_decoder_layers_12_self_attn_out_proj_bias5) + add1344: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1340, add1343) + layer_norm393: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1344, model_decoder_layers_12_encoder_attn_layer_norm_weight5, model_decoder_layers_12_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv100 = R.call_tir(cls.NT_matmul, (layer_norm393, model_decoder_layers_12_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1345: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv100, model_decoder_layers_12_encoder_attn_q_proj_bias5) + reshape1482: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1345, R.shape([1, 1, 20, 64])) + reshape1483: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1482, R.shape([1, 20, 64])) + lv290 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape1483), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1484: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv290, R.shape([1, 1, 20, 64])) + reshape1485: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1484, R.shape([1, 1, 1280])) + lv101 = R.call_tir(cls.NT_matmul, (reshape1485, model_decoder_layers_12_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1346: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv101, model_decoder_layers_12_encoder_attn_out_proj_bias5) + add1347: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1344, add1346) + layer_norm394: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1347, model_decoder_layers_12_final_layer_norm_weight5, model_decoder_layers_12_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv102 = R.call_tir(cls.NT_matmul1, (layer_norm394, model_decoder_layers_12_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1348: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv102, model_decoder_layers_12_fc1_bias5) + gelu142: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1348) + lv103 = R.call_tir(cls.NT_matmul2, (gelu142, model_decoder_layers_12_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1349: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv103, model_decoder_layers_12_fc2_bias5) + add1350: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1347, add1349) + layer_norm395: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1350, model_decoder_layers_13_self_attn_layer_norm_weight5, model_decoder_layers_13_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv104 = R.call_tir(cls.NT_matmul, (layer_norm395, model_decoder_layers_13_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1351: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv104, model_decoder_layers_13_self_attn_q_proj_bias5) + reshape1486: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1351, R.shape([1, 1, 20, 64])) + lv105 = R.call_tir(cls.NT_matmul, (layer_norm395, model_decoder_layers_13_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1487: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv105, R.shape([1, 1, 20, 64])) + lv106 = R.call_tir(cls.NT_matmul, (layer_norm395, model_decoder_layers_13_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1352: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv106, model_decoder_layers_13_self_attn_v_proj_bias5) + reshape1488: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1352, R.shape([1, 1, 20, 64])) + concat109: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1486, reshape1487, reshape1488), axis=2) + reshape1489: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat109, R.shape([1, 60, 64])) + lv291 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape1489), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1490: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv291, R.shape([1, 1, 20, 64])) + reshape1491: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1490, R.shape([1, 1, 1280])) + lv107 = R.call_tir(cls.NT_matmul, (reshape1491, model_decoder_layers_13_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1353: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv107, model_decoder_layers_13_self_attn_out_proj_bias5) + add1354: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1350, add1353) + layer_norm396: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1354, model_decoder_layers_13_encoder_attn_layer_norm_weight5, model_decoder_layers_13_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv108 = R.call_tir(cls.NT_matmul, (layer_norm396, model_decoder_layers_13_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1355: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv108, model_decoder_layers_13_encoder_attn_q_proj_bias5) + reshape1492: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1355, R.shape([1, 1, 20, 64])) + reshape1493: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1492, R.shape([1, 20, 64])) + lv292 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape1493), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1494: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv292, R.shape([1, 1, 20, 64])) + reshape1495: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1494, R.shape([1, 1, 1280])) + lv109 = R.call_tir(cls.NT_matmul, (reshape1495, model_decoder_layers_13_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1356: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv109, model_decoder_layers_13_encoder_attn_out_proj_bias5) + add1357: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1354, add1356) + layer_norm397: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1357, model_decoder_layers_13_final_layer_norm_weight5, model_decoder_layers_13_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv110 = R.call_tir(cls.NT_matmul1, (layer_norm397, model_decoder_layers_13_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1358: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv110, model_decoder_layers_13_fc1_bias5) + gelu143: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1358) + lv111 = R.call_tir(cls.NT_matmul2, (gelu143, model_decoder_layers_13_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1359: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv111, model_decoder_layers_13_fc2_bias5) + add1360: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1357, add1359) + layer_norm398: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1360, model_decoder_layers_14_self_attn_layer_norm_weight5, model_decoder_layers_14_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv112 = R.call_tir(cls.NT_matmul, (layer_norm398, model_decoder_layers_14_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1361: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv112, model_decoder_layers_14_self_attn_q_proj_bias5) + reshape1496: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1361, R.shape([1, 1, 20, 64])) + lv113 = R.call_tir(cls.NT_matmul, (layer_norm398, model_decoder_layers_14_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1497: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv113, R.shape([1, 1, 20, 64])) + lv114 = R.call_tir(cls.NT_matmul, (layer_norm398, model_decoder_layers_14_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1362: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv114, model_decoder_layers_14_self_attn_v_proj_bias5) + reshape1498: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1362, R.shape([1, 1, 20, 64])) + concat110: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1496, reshape1497, reshape1498), axis=2) + reshape1499: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat110, R.shape([1, 60, 64])) + lv293 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape1499), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1500: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv293, R.shape([1, 1, 20, 64])) + reshape1501: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1500, R.shape([1, 1, 1280])) + lv115 = R.call_tir(cls.NT_matmul, (reshape1501, model_decoder_layers_14_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1363: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv115, model_decoder_layers_14_self_attn_out_proj_bias5) + add1364: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1360, add1363) + layer_norm399: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1364, model_decoder_layers_14_encoder_attn_layer_norm_weight5, model_decoder_layers_14_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv116 = R.call_tir(cls.NT_matmul, (layer_norm399, model_decoder_layers_14_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1365: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv116, model_decoder_layers_14_encoder_attn_q_proj_bias5) + reshape1502: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1365, R.shape([1, 1, 20, 64])) + reshape1503: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1502, R.shape([1, 20, 64])) + lv294 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape1503), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1504: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv294, R.shape([1, 1, 20, 64])) + reshape1505: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1504, R.shape([1, 1, 1280])) + lv117 = R.call_tir(cls.NT_matmul, (reshape1505, model_decoder_layers_14_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1366: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv117, model_decoder_layers_14_encoder_attn_out_proj_bias5) + add1367: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1364, add1366) + layer_norm400: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1367, model_decoder_layers_14_final_layer_norm_weight5, model_decoder_layers_14_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv118 = R.call_tir(cls.NT_matmul1, (layer_norm400, model_decoder_layers_14_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1368: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv118, model_decoder_layers_14_fc1_bias5) + gelu144: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1368) + lv119 = R.call_tir(cls.NT_matmul2, (gelu144, model_decoder_layers_14_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1369: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv119, model_decoder_layers_14_fc2_bias5) + add1370: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1367, add1369) + layer_norm401: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1370, model_decoder_layers_15_self_attn_layer_norm_weight5, model_decoder_layers_15_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv120 = R.call_tir(cls.NT_matmul, (layer_norm401, model_decoder_layers_15_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1371: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv120, model_decoder_layers_15_self_attn_q_proj_bias5) + reshape1506: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1371, R.shape([1, 1, 20, 64])) + lv121 = R.call_tir(cls.NT_matmul, (layer_norm401, model_decoder_layers_15_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1507: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv121, R.shape([1, 1, 20, 64])) + lv122 = R.call_tir(cls.NT_matmul, (layer_norm401, model_decoder_layers_15_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1372: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv122, model_decoder_layers_15_self_attn_v_proj_bias5) + reshape1508: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1372, R.shape([1, 1, 20, 64])) + concat111: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1506, reshape1507, reshape1508), axis=2) + reshape1509: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat111, R.shape([1, 60, 64])) + lv295 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape1509), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1510: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv295, R.shape([1, 1, 20, 64])) + reshape1511: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1510, R.shape([1, 1, 1280])) + lv123 = R.call_tir(cls.NT_matmul, (reshape1511, model_decoder_layers_15_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1373: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv123, model_decoder_layers_15_self_attn_out_proj_bias5) + add1374: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1370, add1373) + layer_norm402: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1374, model_decoder_layers_15_encoder_attn_layer_norm_weight5, model_decoder_layers_15_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv124 = R.call_tir(cls.NT_matmul, (layer_norm402, model_decoder_layers_15_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1375: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv124, model_decoder_layers_15_encoder_attn_q_proj_bias5) + reshape1512: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1375, R.shape([1, 1, 20, 64])) + reshape1513: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1512, R.shape([1, 20, 64])) + lv296 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape1513), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1514: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv296, R.shape([1, 1, 20, 64])) + reshape1515: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1514, R.shape([1, 1, 1280])) + lv125 = R.call_tir(cls.NT_matmul, (reshape1515, model_decoder_layers_15_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1376: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv125, model_decoder_layers_15_encoder_attn_out_proj_bias5) + add1377: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1374, add1376) + layer_norm403: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1377, model_decoder_layers_15_final_layer_norm_weight5, model_decoder_layers_15_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv126 = R.call_tir(cls.NT_matmul1, (layer_norm403, model_decoder_layers_15_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1378: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv126, model_decoder_layers_15_fc1_bias5) + gelu145: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1378) + lv127 = R.call_tir(cls.NT_matmul2, (gelu145, model_decoder_layers_15_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1379: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv127, model_decoder_layers_15_fc2_bias5) + add1380: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1377, add1379) + layer_norm404: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1380, model_decoder_layers_16_self_attn_layer_norm_weight5, model_decoder_layers_16_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv128 = R.call_tir(cls.NT_matmul, (layer_norm404, model_decoder_layers_16_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1381: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv128, model_decoder_layers_16_self_attn_q_proj_bias5) + reshape1516: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1381, R.shape([1, 1, 20, 64])) + lv129 = R.call_tir(cls.NT_matmul, (layer_norm404, model_decoder_layers_16_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1517: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv129, R.shape([1, 1, 20, 64])) + lv130 = R.call_tir(cls.NT_matmul, (layer_norm404, model_decoder_layers_16_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1382: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv130, model_decoder_layers_16_self_attn_v_proj_bias5) + reshape1518: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1382, R.shape([1, 1, 20, 64])) + concat112: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1516, reshape1517, reshape1518), axis=2) + reshape1519: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat112, R.shape([1, 60, 64])) + lv297 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape1519), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1520: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv297, R.shape([1, 1, 20, 64])) + reshape1521: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1520, R.shape([1, 1, 1280])) + lv131 = R.call_tir(cls.NT_matmul, (reshape1521, model_decoder_layers_16_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1383: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv131, model_decoder_layers_16_self_attn_out_proj_bias5) + add1384: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1380, add1383) + layer_norm405: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1384, model_decoder_layers_16_encoder_attn_layer_norm_weight5, model_decoder_layers_16_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv132 = R.call_tir(cls.NT_matmul, (layer_norm405, model_decoder_layers_16_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1385: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv132, model_decoder_layers_16_encoder_attn_q_proj_bias5) + reshape1522: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1385, R.shape([1, 1, 20, 64])) + reshape1523: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1522, R.shape([1, 20, 64])) + lv298 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape1523), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1524: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv298, R.shape([1, 1, 20, 64])) + reshape1525: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1524, R.shape([1, 1, 1280])) + lv133 = R.call_tir(cls.NT_matmul, (reshape1525, model_decoder_layers_16_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1386: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv133, model_decoder_layers_16_encoder_attn_out_proj_bias5) + add1387: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1384, add1386) + layer_norm406: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1387, model_decoder_layers_16_final_layer_norm_weight5, model_decoder_layers_16_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv134 = R.call_tir(cls.NT_matmul1, (layer_norm406, model_decoder_layers_16_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1388: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv134, model_decoder_layers_16_fc1_bias5) + gelu146: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1388) + lv135 = R.call_tir(cls.NT_matmul2, (gelu146, model_decoder_layers_16_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1389: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv135, model_decoder_layers_16_fc2_bias5) + add1390: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1387, add1389) + layer_norm407: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1390, model_decoder_layers_17_self_attn_layer_norm_weight5, model_decoder_layers_17_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv136 = R.call_tir(cls.NT_matmul, (layer_norm407, model_decoder_layers_17_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1391: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv136, model_decoder_layers_17_self_attn_q_proj_bias5) + reshape1526: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1391, R.shape([1, 1, 20, 64])) + lv137 = R.call_tir(cls.NT_matmul, (layer_norm407, model_decoder_layers_17_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1527: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv137, R.shape([1, 1, 20, 64])) + lv138 = R.call_tir(cls.NT_matmul, (layer_norm407, model_decoder_layers_17_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1392: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv138, model_decoder_layers_17_self_attn_v_proj_bias5) + reshape1528: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1392, R.shape([1, 1, 20, 64])) + concat113: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1526, reshape1527, reshape1528), axis=2) + reshape1529: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat113, R.shape([1, 60, 64])) + lv299 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape1529), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1530: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv299, R.shape([1, 1, 20, 64])) + reshape1531: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1530, R.shape([1, 1, 1280])) + lv139 = R.call_tir(cls.NT_matmul, (reshape1531, model_decoder_layers_17_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1393: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv139, model_decoder_layers_17_self_attn_out_proj_bias5) + add1394: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1390, add1393) + layer_norm408: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1394, model_decoder_layers_17_encoder_attn_layer_norm_weight5, model_decoder_layers_17_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv140 = R.call_tir(cls.NT_matmul, (layer_norm408, model_decoder_layers_17_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1395: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv140, model_decoder_layers_17_encoder_attn_q_proj_bias5) + reshape1532: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1395, R.shape([1, 1, 20, 64])) + reshape1533: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1532, R.shape([1, 20, 64])) + lv300 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape1533), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1534: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv300, R.shape([1, 1, 20, 64])) + reshape1535: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1534, R.shape([1, 1, 1280])) + lv141 = R.call_tir(cls.NT_matmul, (reshape1535, model_decoder_layers_17_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1396: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv141, model_decoder_layers_17_encoder_attn_out_proj_bias5) + add1397: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1394, add1396) + layer_norm409: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1397, model_decoder_layers_17_final_layer_norm_weight5, model_decoder_layers_17_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv142 = R.call_tir(cls.NT_matmul1, (layer_norm409, model_decoder_layers_17_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1398: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv142, model_decoder_layers_17_fc1_bias5) + gelu147: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1398) + lv143 = R.call_tir(cls.NT_matmul2, (gelu147, model_decoder_layers_17_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1399: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv143, model_decoder_layers_17_fc2_bias5) + add1400: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1397, add1399) + layer_norm410: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1400, model_decoder_layers_18_self_attn_layer_norm_weight5, model_decoder_layers_18_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv144 = R.call_tir(cls.NT_matmul, (layer_norm410, model_decoder_layers_18_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1401: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv144, model_decoder_layers_18_self_attn_q_proj_bias5) + reshape1536: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1401, R.shape([1, 1, 20, 64])) + lv145 = R.call_tir(cls.NT_matmul, (layer_norm410, model_decoder_layers_18_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1537: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv145, R.shape([1, 1, 20, 64])) + lv146 = R.call_tir(cls.NT_matmul, (layer_norm410, model_decoder_layers_18_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1402: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv146, model_decoder_layers_18_self_attn_v_proj_bias5) + reshape1538: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1402, R.shape([1, 1, 20, 64])) + concat114: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1536, reshape1537, reshape1538), axis=2) + reshape1539: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat114, R.shape([1, 60, 64])) + lv301 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape1539), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1540: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv301, R.shape([1, 1, 20, 64])) + reshape1541: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1540, R.shape([1, 1, 1280])) + lv147 = R.call_tir(cls.NT_matmul, (reshape1541, model_decoder_layers_18_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1403: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv147, model_decoder_layers_18_self_attn_out_proj_bias5) + add1404: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1400, add1403) + layer_norm411: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1404, model_decoder_layers_18_encoder_attn_layer_norm_weight5, model_decoder_layers_18_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv148 = R.call_tir(cls.NT_matmul, (layer_norm411, model_decoder_layers_18_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1405: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv148, model_decoder_layers_18_encoder_attn_q_proj_bias5) + reshape1542: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1405, R.shape([1, 1, 20, 64])) + reshape1543: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1542, R.shape([1, 20, 64])) + lv302 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape1543), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1544: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv302, R.shape([1, 1, 20, 64])) + reshape1545: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1544, R.shape([1, 1, 1280])) + lv149 = R.call_tir(cls.NT_matmul, (reshape1545, model_decoder_layers_18_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1406: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv149, model_decoder_layers_18_encoder_attn_out_proj_bias5) + add1407: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1404, add1406) + layer_norm412: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1407, model_decoder_layers_18_final_layer_norm_weight5, model_decoder_layers_18_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv150 = R.call_tir(cls.NT_matmul1, (layer_norm412, model_decoder_layers_18_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1408: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv150, model_decoder_layers_18_fc1_bias5) + gelu148: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1408) + lv151 = R.call_tir(cls.NT_matmul2, (gelu148, model_decoder_layers_18_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1409: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv151, model_decoder_layers_18_fc2_bias5) + add1410: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1407, add1409) + layer_norm413: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1410, model_decoder_layers_19_self_attn_layer_norm_weight5, model_decoder_layers_19_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv152 = R.call_tir(cls.NT_matmul, (layer_norm413, model_decoder_layers_19_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1411: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv152, model_decoder_layers_19_self_attn_q_proj_bias5) + reshape1546: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1411, R.shape([1, 1, 20, 64])) + lv153 = R.call_tir(cls.NT_matmul, (layer_norm413, model_decoder_layers_19_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1547: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv153, R.shape([1, 1, 20, 64])) + lv154 = R.call_tir(cls.NT_matmul, (layer_norm413, model_decoder_layers_19_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1412: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv154, model_decoder_layers_19_self_attn_v_proj_bias5) + reshape1548: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1412, R.shape([1, 1, 20, 64])) + concat115: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1546, reshape1547, reshape1548), axis=2) + reshape1549: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat115, R.shape([1, 60, 64])) + lv303 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape1549), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1550: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv303, R.shape([1, 1, 20, 64])) + reshape1551: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1550, R.shape([1, 1, 1280])) + lv155 = R.call_tir(cls.NT_matmul, (reshape1551, model_decoder_layers_19_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1413: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv155, model_decoder_layers_19_self_attn_out_proj_bias5) + add1414: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1410, add1413) + layer_norm414: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1414, model_decoder_layers_19_encoder_attn_layer_norm_weight5, model_decoder_layers_19_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv156 = R.call_tir(cls.NT_matmul, (layer_norm414, model_decoder_layers_19_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1415: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv156, model_decoder_layers_19_encoder_attn_q_proj_bias5) + reshape1552: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1415, R.shape([1, 1, 20, 64])) + reshape1553: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1552, R.shape([1, 20, 64])) + lv304 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape1553), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1554: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv304, R.shape([1, 1, 20, 64])) + reshape1555: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1554, R.shape([1, 1, 1280])) + lv157 = R.call_tir(cls.NT_matmul, (reshape1555, model_decoder_layers_19_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1416: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv157, model_decoder_layers_19_encoder_attn_out_proj_bias5) + add1417: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1414, add1416) + layer_norm415: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1417, model_decoder_layers_19_final_layer_norm_weight5, model_decoder_layers_19_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv158 = R.call_tir(cls.NT_matmul1, (layer_norm415, model_decoder_layers_19_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1418: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv158, model_decoder_layers_19_fc1_bias5) + gelu149: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1418) + lv159 = R.call_tir(cls.NT_matmul2, (gelu149, model_decoder_layers_19_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1419: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv159, model_decoder_layers_19_fc2_bias5) + add1420: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1417, add1419) + layer_norm416: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1420, model_decoder_layers_20_self_attn_layer_norm_weight5, model_decoder_layers_20_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv160 = R.call_tir(cls.NT_matmul, (layer_norm416, model_decoder_layers_20_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1421: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv160, model_decoder_layers_20_self_attn_q_proj_bias5) + reshape1556: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1421, R.shape([1, 1, 20, 64])) + lv161 = R.call_tir(cls.NT_matmul, (layer_norm416, model_decoder_layers_20_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1557: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv161, R.shape([1, 1, 20, 64])) + lv162 = R.call_tir(cls.NT_matmul, (layer_norm416, model_decoder_layers_20_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1422: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv162, model_decoder_layers_20_self_attn_v_proj_bias5) + reshape1558: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1422, R.shape([1, 1, 20, 64])) + concat116: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1556, reshape1557, reshape1558), axis=2) + reshape1559: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat116, R.shape([1, 60, 64])) + lv305 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape1559), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1560: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv305, R.shape([1, 1, 20, 64])) + reshape1561: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1560, R.shape([1, 1, 1280])) + lv163 = R.call_tir(cls.NT_matmul, (reshape1561, model_decoder_layers_20_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1423: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv163, model_decoder_layers_20_self_attn_out_proj_bias5) + add1424: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1420, add1423) + layer_norm417: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1424, model_decoder_layers_20_encoder_attn_layer_norm_weight5, model_decoder_layers_20_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv164 = R.call_tir(cls.NT_matmul, (layer_norm417, model_decoder_layers_20_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1425: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv164, model_decoder_layers_20_encoder_attn_q_proj_bias5) + reshape1562: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1425, R.shape([1, 1, 20, 64])) + reshape1563: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1562, R.shape([1, 20, 64])) + lv306 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape1563), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1564: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv306, R.shape([1, 1, 20, 64])) + reshape1565: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1564, R.shape([1, 1, 1280])) + lv165 = R.call_tir(cls.NT_matmul, (reshape1565, model_decoder_layers_20_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1426: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv165, model_decoder_layers_20_encoder_attn_out_proj_bias5) + add1427: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1424, add1426) + layer_norm418: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1427, model_decoder_layers_20_final_layer_norm_weight5, model_decoder_layers_20_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv166 = R.call_tir(cls.NT_matmul1, (layer_norm418, model_decoder_layers_20_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1428: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv166, model_decoder_layers_20_fc1_bias5) + gelu150: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1428) + lv167 = R.call_tir(cls.NT_matmul2, (gelu150, model_decoder_layers_20_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1429: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv167, model_decoder_layers_20_fc2_bias5) + add1430: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1427, add1429) + layer_norm419: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1430, model_decoder_layers_21_self_attn_layer_norm_weight5, model_decoder_layers_21_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv168 = R.call_tir(cls.NT_matmul, (layer_norm419, model_decoder_layers_21_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1431: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv168, model_decoder_layers_21_self_attn_q_proj_bias5) + reshape1566: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1431, R.shape([1, 1, 20, 64])) + lv169 = R.call_tir(cls.NT_matmul, (layer_norm419, model_decoder_layers_21_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1567: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv169, R.shape([1, 1, 20, 64])) + lv170 = R.call_tir(cls.NT_matmul, (layer_norm419, model_decoder_layers_21_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1432: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv170, model_decoder_layers_21_self_attn_v_proj_bias5) + reshape1568: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1432, R.shape([1, 1, 20, 64])) + concat117: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1566, reshape1567, reshape1568), axis=2) + reshape1569: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat117, R.shape([1, 60, 64])) + lv307 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape1569), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1570: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv307, R.shape([1, 1, 20, 64])) + reshape1571: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1570, R.shape([1, 1, 1280])) + lv171 = R.call_tir(cls.NT_matmul, (reshape1571, model_decoder_layers_21_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1433: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv171, model_decoder_layers_21_self_attn_out_proj_bias5) + add1434: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1430, add1433) + layer_norm420: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1434, model_decoder_layers_21_encoder_attn_layer_norm_weight5, model_decoder_layers_21_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv172 = R.call_tir(cls.NT_matmul, (layer_norm420, model_decoder_layers_21_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1435: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv172, model_decoder_layers_21_encoder_attn_q_proj_bias5) + reshape1572: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1435, R.shape([1, 1, 20, 64])) + reshape1573: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1572, R.shape([1, 20, 64])) + lv308 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape1573), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1574: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv308, R.shape([1, 1, 20, 64])) + reshape1575: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1574, R.shape([1, 1, 1280])) + lv173 = R.call_tir(cls.NT_matmul, (reshape1575, model_decoder_layers_21_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1436: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv173, model_decoder_layers_21_encoder_attn_out_proj_bias5) + add1437: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1434, add1436) + layer_norm421: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1437, model_decoder_layers_21_final_layer_norm_weight5, model_decoder_layers_21_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv174 = R.call_tir(cls.NT_matmul1, (layer_norm421, model_decoder_layers_21_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1438: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv174, model_decoder_layers_21_fc1_bias5) + gelu151: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1438) + lv175 = R.call_tir(cls.NT_matmul2, (gelu151, model_decoder_layers_21_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1439: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv175, model_decoder_layers_21_fc2_bias5) + add1440: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1437, add1439) + layer_norm422: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1440, model_decoder_layers_22_self_attn_layer_norm_weight5, model_decoder_layers_22_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv176 = R.call_tir(cls.NT_matmul, (layer_norm422, model_decoder_layers_22_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1441: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv176, model_decoder_layers_22_self_attn_q_proj_bias5) + reshape1576: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1441, R.shape([1, 1, 20, 64])) + lv177 = R.call_tir(cls.NT_matmul, (layer_norm422, model_decoder_layers_22_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1577: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv177, R.shape([1, 1, 20, 64])) + lv178 = R.call_tir(cls.NT_matmul, (layer_norm422, model_decoder_layers_22_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1442: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv178, model_decoder_layers_22_self_attn_v_proj_bias5) + reshape1578: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1442, R.shape([1, 1, 20, 64])) + concat118: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1576, reshape1577, reshape1578), axis=2) + reshape1579: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat118, R.shape([1, 60, 64])) + lv309 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape1579), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1580: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv309, R.shape([1, 1, 20, 64])) + reshape1581: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1580, R.shape([1, 1, 1280])) + lv179 = R.call_tir(cls.NT_matmul, (reshape1581, model_decoder_layers_22_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1443: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv179, model_decoder_layers_22_self_attn_out_proj_bias5) + add1444: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1440, add1443) + layer_norm423: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1444, model_decoder_layers_22_encoder_attn_layer_norm_weight5, model_decoder_layers_22_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv180 = R.call_tir(cls.NT_matmul, (layer_norm423, model_decoder_layers_22_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1445: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv180, model_decoder_layers_22_encoder_attn_q_proj_bias5) + reshape1582: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1445, R.shape([1, 1, 20, 64])) + reshape1583: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1582, R.shape([1, 20, 64])) + lv310 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape1583), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1584: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv310, R.shape([1, 1, 20, 64])) + reshape1585: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1584, R.shape([1, 1, 1280])) + lv181 = R.call_tir(cls.NT_matmul, (reshape1585, model_decoder_layers_22_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1446: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv181, model_decoder_layers_22_encoder_attn_out_proj_bias5) + add1447: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1444, add1446) + layer_norm424: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1447, model_decoder_layers_22_final_layer_norm_weight5, model_decoder_layers_22_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv182 = R.call_tir(cls.NT_matmul1, (layer_norm424, model_decoder_layers_22_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1448: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv182, model_decoder_layers_22_fc1_bias5) + gelu152: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1448) + lv183 = R.call_tir(cls.NT_matmul2, (gelu152, model_decoder_layers_22_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1449: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv183, model_decoder_layers_22_fc2_bias5) + add1450: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1447, add1449) + layer_norm425: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1450, model_decoder_layers_23_self_attn_layer_norm_weight5, model_decoder_layers_23_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv184 = R.call_tir(cls.NT_matmul, (layer_norm425, model_decoder_layers_23_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1451: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv184, model_decoder_layers_23_self_attn_q_proj_bias5) + reshape1586: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1451, R.shape([1, 1, 20, 64])) + lv185 = R.call_tir(cls.NT_matmul, (layer_norm425, model_decoder_layers_23_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1587: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv185, R.shape([1, 1, 20, 64])) + lv186 = R.call_tir(cls.NT_matmul, (layer_norm425, model_decoder_layers_23_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1452: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv186, model_decoder_layers_23_self_attn_v_proj_bias5) + reshape1588: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1452, R.shape([1, 1, 20, 64])) + concat119: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1586, reshape1587, reshape1588), axis=2) + reshape1589: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat119, R.shape([1, 60, 64])) + lv311 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape1589), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1590: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv311, R.shape([1, 1, 20, 64])) + reshape1591: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1590, R.shape([1, 1, 1280])) + lv187 = R.call_tir(cls.NT_matmul, (reshape1591, model_decoder_layers_23_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1453: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv187, model_decoder_layers_23_self_attn_out_proj_bias5) + add1454: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1450, add1453) + layer_norm426: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1454, model_decoder_layers_23_encoder_attn_layer_norm_weight5, model_decoder_layers_23_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv188 = R.call_tir(cls.NT_matmul, (layer_norm426, model_decoder_layers_23_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1455: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv188, model_decoder_layers_23_encoder_attn_q_proj_bias5) + reshape1592: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1455, R.shape([1, 1, 20, 64])) + reshape1593: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1592, R.shape([1, 20, 64])) + lv312 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape1593), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1594: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv312, R.shape([1, 1, 20, 64])) + reshape1595: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1594, R.shape([1, 1, 1280])) + lv189 = R.call_tir(cls.NT_matmul, (reshape1595, model_decoder_layers_23_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1456: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv189, model_decoder_layers_23_encoder_attn_out_proj_bias5) + add1457: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1454, add1456) + layer_norm427: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1457, model_decoder_layers_23_final_layer_norm_weight5, model_decoder_layers_23_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv190 = R.call_tir(cls.NT_matmul1, (layer_norm427, model_decoder_layers_23_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1458: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv190, model_decoder_layers_23_fc1_bias5) + gelu153: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1458) + lv191 = R.call_tir(cls.NT_matmul2, (gelu153, model_decoder_layers_23_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1459: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv191, model_decoder_layers_23_fc2_bias5) + add1460: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1457, add1459) + layer_norm428: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1460, model_decoder_layers_24_self_attn_layer_norm_weight5, model_decoder_layers_24_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv192 = R.call_tir(cls.NT_matmul, (layer_norm428, model_decoder_layers_24_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1461: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv192, model_decoder_layers_24_self_attn_q_proj_bias5) + reshape1596: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1461, R.shape([1, 1, 20, 64])) + lv193 = R.call_tir(cls.NT_matmul, (layer_norm428, model_decoder_layers_24_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1597: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv193, R.shape([1, 1, 20, 64])) + lv194 = R.call_tir(cls.NT_matmul, (layer_norm428, model_decoder_layers_24_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1462: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv194, model_decoder_layers_24_self_attn_v_proj_bias5) + reshape1598: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1462, R.shape([1, 1, 20, 64])) + concat120: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1596, reshape1597, reshape1598), axis=2) + reshape1599: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat120, R.shape([1, 60, 64])) + lv313 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape1599), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1600: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv313, R.shape([1, 1, 20, 64])) + reshape1601: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1600, R.shape([1, 1, 1280])) + lv195 = R.call_tir(cls.NT_matmul, (reshape1601, model_decoder_layers_24_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1463: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv195, model_decoder_layers_24_self_attn_out_proj_bias5) + add1464: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1460, add1463) + layer_norm429: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1464, model_decoder_layers_24_encoder_attn_layer_norm_weight5, model_decoder_layers_24_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv196 = R.call_tir(cls.NT_matmul, (layer_norm429, model_decoder_layers_24_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1465: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv196, model_decoder_layers_24_encoder_attn_q_proj_bias5) + reshape1602: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1465, R.shape([1, 1, 20, 64])) + reshape1603: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1602, R.shape([1, 20, 64])) + lv314 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape1603), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1604: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv314, R.shape([1, 1, 20, 64])) + reshape1605: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1604, R.shape([1, 1, 1280])) + lv197 = R.call_tir(cls.NT_matmul, (reshape1605, model_decoder_layers_24_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1466: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv197, model_decoder_layers_24_encoder_attn_out_proj_bias5) + add1467: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1464, add1466) + layer_norm430: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1467, model_decoder_layers_24_final_layer_norm_weight5, model_decoder_layers_24_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv198 = R.call_tir(cls.NT_matmul1, (layer_norm430, model_decoder_layers_24_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1468: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv198, model_decoder_layers_24_fc1_bias5) + gelu154: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1468) + lv199 = R.call_tir(cls.NT_matmul2, (gelu154, model_decoder_layers_24_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1469: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv199, model_decoder_layers_24_fc2_bias5) + add1470: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1467, add1469) + layer_norm431: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1470, model_decoder_layers_25_self_attn_layer_norm_weight5, model_decoder_layers_25_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv200 = R.call_tir(cls.NT_matmul, (layer_norm431, model_decoder_layers_25_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1471: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv200, model_decoder_layers_25_self_attn_q_proj_bias5) + reshape1606: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1471, R.shape([1, 1, 20, 64])) + lv201 = R.call_tir(cls.NT_matmul, (layer_norm431, model_decoder_layers_25_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1607: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv201, R.shape([1, 1, 20, 64])) + lv202 = R.call_tir(cls.NT_matmul, (layer_norm431, model_decoder_layers_25_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1472: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv202, model_decoder_layers_25_self_attn_v_proj_bias5) + reshape1608: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1472, R.shape([1, 1, 20, 64])) + concat121: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1606, reshape1607, reshape1608), axis=2) + reshape1609: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat121, R.shape([1, 60, 64])) + lv315 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape1609), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1610: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv315, R.shape([1, 1, 20, 64])) + reshape1611: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1610, R.shape([1, 1, 1280])) + lv203 = R.call_tir(cls.NT_matmul, (reshape1611, model_decoder_layers_25_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1473: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv203, model_decoder_layers_25_self_attn_out_proj_bias5) + add1474: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1470, add1473) + layer_norm432: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1474, model_decoder_layers_25_encoder_attn_layer_norm_weight5, model_decoder_layers_25_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv204 = R.call_tir(cls.NT_matmul, (layer_norm432, model_decoder_layers_25_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1475: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv204, model_decoder_layers_25_encoder_attn_q_proj_bias5) + reshape1612: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1475, R.shape([1, 1, 20, 64])) + reshape1613: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1612, R.shape([1, 20, 64])) + lv316 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape1613), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1614: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv316, R.shape([1, 1, 20, 64])) + reshape1615: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1614, R.shape([1, 1, 1280])) + lv205 = R.call_tir(cls.NT_matmul, (reshape1615, model_decoder_layers_25_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1476: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv205, model_decoder_layers_25_encoder_attn_out_proj_bias5) + add1477: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1474, add1476) + layer_norm433: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1477, model_decoder_layers_25_final_layer_norm_weight5, model_decoder_layers_25_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv206 = R.call_tir(cls.NT_matmul1, (layer_norm433, model_decoder_layers_25_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1478: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv206, model_decoder_layers_25_fc1_bias5) + gelu155: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1478) + lv207 = R.call_tir(cls.NT_matmul2, (gelu155, model_decoder_layers_25_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1479: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv207, model_decoder_layers_25_fc2_bias5) + add1480: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1477, add1479) + layer_norm434: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1480, model_decoder_layers_26_self_attn_layer_norm_weight5, model_decoder_layers_26_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv208 = R.call_tir(cls.NT_matmul, (layer_norm434, model_decoder_layers_26_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1481: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv208, model_decoder_layers_26_self_attn_q_proj_bias5) + reshape1616: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1481, R.shape([1, 1, 20, 64])) + lv209 = R.call_tir(cls.NT_matmul, (layer_norm434, model_decoder_layers_26_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1617: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv209, R.shape([1, 1, 20, 64])) + lv210 = R.call_tir(cls.NT_matmul, (layer_norm434, model_decoder_layers_26_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1482: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv210, model_decoder_layers_26_self_attn_v_proj_bias5) + reshape1618: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1482, R.shape([1, 1, 20, 64])) + concat122: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1616, reshape1617, reshape1618), axis=2) + reshape1619: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat122, R.shape([1, 60, 64])) + lv317 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape1619), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1620: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv317, R.shape([1, 1, 20, 64])) + reshape1621: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1620, R.shape([1, 1, 1280])) + lv211 = R.call_tir(cls.NT_matmul, (reshape1621, model_decoder_layers_26_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1483: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv211, model_decoder_layers_26_self_attn_out_proj_bias5) + add1484: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1480, add1483) + layer_norm435: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1484, model_decoder_layers_26_encoder_attn_layer_norm_weight5, model_decoder_layers_26_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv212 = R.call_tir(cls.NT_matmul, (layer_norm435, model_decoder_layers_26_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1485: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv212, model_decoder_layers_26_encoder_attn_q_proj_bias5) + reshape1622: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1485, R.shape([1, 1, 20, 64])) + reshape1623: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1622, R.shape([1, 20, 64])) + lv318 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape1623), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1624: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv318, R.shape([1, 1, 20, 64])) + reshape1625: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1624, R.shape([1, 1, 1280])) + lv213 = R.call_tir(cls.NT_matmul, (reshape1625, model_decoder_layers_26_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1486: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv213, model_decoder_layers_26_encoder_attn_out_proj_bias5) + add1487: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1484, add1486) + layer_norm436: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1487, model_decoder_layers_26_final_layer_norm_weight5, model_decoder_layers_26_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv214 = R.call_tir(cls.NT_matmul1, (layer_norm436, model_decoder_layers_26_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1488: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv214, model_decoder_layers_26_fc1_bias5) + gelu156: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1488) + lv215 = R.call_tir(cls.NT_matmul2, (gelu156, model_decoder_layers_26_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1489: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv215, model_decoder_layers_26_fc2_bias5) + add1490: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1487, add1489) + layer_norm437: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1490, model_decoder_layers_27_self_attn_layer_norm_weight5, model_decoder_layers_27_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv216 = R.call_tir(cls.NT_matmul, (layer_norm437, model_decoder_layers_27_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1491: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv216, model_decoder_layers_27_self_attn_q_proj_bias5) + reshape1626: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1491, R.shape([1, 1, 20, 64])) + lv217 = R.call_tir(cls.NT_matmul, (layer_norm437, model_decoder_layers_27_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1627: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv217, R.shape([1, 1, 20, 64])) + lv218 = R.call_tir(cls.NT_matmul, (layer_norm437, model_decoder_layers_27_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1492: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv218, model_decoder_layers_27_self_attn_v_proj_bias5) + reshape1628: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1492, R.shape([1, 1, 20, 64])) + concat123: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1626, reshape1627, reshape1628), axis=2) + reshape1629: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat123, R.shape([1, 60, 64])) + lv319 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape1629), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1630: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv319, R.shape([1, 1, 20, 64])) + reshape1631: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1630, R.shape([1, 1, 1280])) + lv219 = R.call_tir(cls.NT_matmul, (reshape1631, model_decoder_layers_27_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1493: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv219, model_decoder_layers_27_self_attn_out_proj_bias5) + add1494: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1490, add1493) + layer_norm438: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1494, model_decoder_layers_27_encoder_attn_layer_norm_weight5, model_decoder_layers_27_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv220 = R.call_tir(cls.NT_matmul, (layer_norm438, model_decoder_layers_27_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1495: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv220, model_decoder_layers_27_encoder_attn_q_proj_bias5) + reshape1632: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1495, R.shape([1, 1, 20, 64])) + reshape1633: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1632, R.shape([1, 20, 64])) + lv320 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape1633), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1634: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv320, R.shape([1, 1, 20, 64])) + reshape1635: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1634, R.shape([1, 1, 1280])) + lv221 = R.call_tir(cls.NT_matmul, (reshape1635, model_decoder_layers_27_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1496: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv221, model_decoder_layers_27_encoder_attn_out_proj_bias5) + add1497: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1494, add1496) + layer_norm439: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1497, model_decoder_layers_27_final_layer_norm_weight5, model_decoder_layers_27_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv222 = R.call_tir(cls.NT_matmul1, (layer_norm439, model_decoder_layers_27_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1498: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv222, model_decoder_layers_27_fc1_bias5) + gelu157: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1498) + lv223 = R.call_tir(cls.NT_matmul2, (gelu157, model_decoder_layers_27_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1499: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv223, model_decoder_layers_27_fc2_bias5) + add1500: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1497, add1499) + layer_norm440: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1500, model_decoder_layers_28_self_attn_layer_norm_weight5, model_decoder_layers_28_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv224 = R.call_tir(cls.NT_matmul, (layer_norm440, model_decoder_layers_28_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1501: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv224, model_decoder_layers_28_self_attn_q_proj_bias5) + reshape1636: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1501, R.shape([1, 1, 20, 64])) + lv225 = R.call_tir(cls.NT_matmul, (layer_norm440, model_decoder_layers_28_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1637: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv225, R.shape([1, 1, 20, 64])) + lv226 = R.call_tir(cls.NT_matmul, (layer_norm440, model_decoder_layers_28_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1502: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv226, model_decoder_layers_28_self_attn_v_proj_bias5) + reshape1638: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1502, R.shape([1, 1, 20, 64])) + concat124: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1636, reshape1637, reshape1638), axis=2) + reshape1639: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat124, R.shape([1, 60, 64])) + lv321 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape1639), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1640: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv321, R.shape([1, 1, 20, 64])) + reshape1641: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1640, R.shape([1, 1, 1280])) + lv227 = R.call_tir(cls.NT_matmul, (reshape1641, model_decoder_layers_28_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1503: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv227, model_decoder_layers_28_self_attn_out_proj_bias5) + add1504: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1500, add1503) + layer_norm441: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1504, model_decoder_layers_28_encoder_attn_layer_norm_weight5, model_decoder_layers_28_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv228 = R.call_tir(cls.NT_matmul, (layer_norm441, model_decoder_layers_28_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1505: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv228, model_decoder_layers_28_encoder_attn_q_proj_bias5) + reshape1642: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1505, R.shape([1, 1, 20, 64])) + reshape1643: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1642, R.shape([1, 20, 64])) + lv322 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape1643), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1644: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv322, R.shape([1, 1, 20, 64])) + reshape1645: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1644, R.shape([1, 1, 1280])) + lv229 = R.call_tir(cls.NT_matmul, (reshape1645, model_decoder_layers_28_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1506: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv229, model_decoder_layers_28_encoder_attn_out_proj_bias5) + add1507: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1504, add1506) + layer_norm442: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1507, model_decoder_layers_28_final_layer_norm_weight5, model_decoder_layers_28_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv230 = R.call_tir(cls.NT_matmul1, (layer_norm442, model_decoder_layers_28_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1508: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv230, model_decoder_layers_28_fc1_bias5) + gelu158: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1508) + lv231 = R.call_tir(cls.NT_matmul2, (gelu158, model_decoder_layers_28_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1509: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv231, model_decoder_layers_28_fc2_bias5) + add1510: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1507, add1509) + layer_norm443: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1510, model_decoder_layers_29_self_attn_layer_norm_weight5, model_decoder_layers_29_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv232 = R.call_tir(cls.NT_matmul, (layer_norm443, model_decoder_layers_29_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1511: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv232, model_decoder_layers_29_self_attn_q_proj_bias5) + reshape1646: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1511, R.shape([1, 1, 20, 64])) + lv233 = R.call_tir(cls.NT_matmul, (layer_norm443, model_decoder_layers_29_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1647: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv233, R.shape([1, 1, 20, 64])) + lv234 = R.call_tir(cls.NT_matmul, (layer_norm443, model_decoder_layers_29_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1512: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv234, model_decoder_layers_29_self_attn_v_proj_bias5) + reshape1648: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1512, R.shape([1, 1, 20, 64])) + concat125: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1646, reshape1647, reshape1648), axis=2) + reshape1649: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat125, R.shape([1, 60, 64])) + lv323 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape1649), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1650: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv323, R.shape([1, 1, 20, 64])) + reshape1651: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1650, R.shape([1, 1, 1280])) + lv235 = R.call_tir(cls.NT_matmul, (reshape1651, model_decoder_layers_29_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1513: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv235, model_decoder_layers_29_self_attn_out_proj_bias5) + add1514: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1510, add1513) + layer_norm444: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1514, model_decoder_layers_29_encoder_attn_layer_norm_weight5, model_decoder_layers_29_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv236 = R.call_tir(cls.NT_matmul, (layer_norm444, model_decoder_layers_29_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1515: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv236, model_decoder_layers_29_encoder_attn_q_proj_bias5) + reshape1652: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1515, R.shape([1, 1, 20, 64])) + reshape1653: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1652, R.shape([1, 20, 64])) + lv324 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape1653), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1654: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv324, R.shape([1, 1, 20, 64])) + reshape1655: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1654, R.shape([1, 1, 1280])) + lv237 = R.call_tir(cls.NT_matmul, (reshape1655, model_decoder_layers_29_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1516: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv237, model_decoder_layers_29_encoder_attn_out_proj_bias5) + add1517: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1514, add1516) + layer_norm445: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1517, model_decoder_layers_29_final_layer_norm_weight5, model_decoder_layers_29_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv238 = R.call_tir(cls.NT_matmul1, (layer_norm445, model_decoder_layers_29_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1518: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv238, model_decoder_layers_29_fc1_bias5) + gelu159: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1518) + lv239 = R.call_tir(cls.NT_matmul2, (gelu159, model_decoder_layers_29_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1519: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv239, model_decoder_layers_29_fc2_bias5) + add1520: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1517, add1519) + layer_norm446: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1520, model_decoder_layers_30_self_attn_layer_norm_weight5, model_decoder_layers_30_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv240 = R.call_tir(cls.NT_matmul, (layer_norm446, model_decoder_layers_30_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1521: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv240, model_decoder_layers_30_self_attn_q_proj_bias5) + reshape1656: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1521, R.shape([1, 1, 20, 64])) + lv241 = R.call_tir(cls.NT_matmul, (layer_norm446, model_decoder_layers_30_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1657: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv241, R.shape([1, 1, 20, 64])) + lv242 = R.call_tir(cls.NT_matmul, (layer_norm446, model_decoder_layers_30_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1522: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv242, model_decoder_layers_30_self_attn_v_proj_bias5) + reshape1658: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1522, R.shape([1, 1, 20, 64])) + concat126: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1656, reshape1657, reshape1658), axis=2) + reshape1659: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat126, R.shape([1, 60, 64])) + lv325 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape1659), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1660: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv325, R.shape([1, 1, 20, 64])) + reshape1661: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1660, R.shape([1, 1, 1280])) + lv243 = R.call_tir(cls.NT_matmul, (reshape1661, model_decoder_layers_30_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1523: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv243, model_decoder_layers_30_self_attn_out_proj_bias5) + add1524: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1520, add1523) + layer_norm447: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1524, model_decoder_layers_30_encoder_attn_layer_norm_weight5, model_decoder_layers_30_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv244 = R.call_tir(cls.NT_matmul, (layer_norm447, model_decoder_layers_30_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1525: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv244, model_decoder_layers_30_encoder_attn_q_proj_bias5) + reshape1662: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1525, R.shape([1, 1, 20, 64])) + reshape1663: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1662, R.shape([1, 20, 64])) + lv326 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape1663), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1664: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv326, R.shape([1, 1, 20, 64])) + reshape1665: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1664, R.shape([1, 1, 1280])) + lv245 = R.call_tir(cls.NT_matmul, (reshape1665, model_decoder_layers_30_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1526: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv245, model_decoder_layers_30_encoder_attn_out_proj_bias5) + add1527: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1524, add1526) + layer_norm448: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1527, model_decoder_layers_30_final_layer_norm_weight5, model_decoder_layers_30_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv246 = R.call_tir(cls.NT_matmul1, (layer_norm448, model_decoder_layers_30_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1528: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv246, model_decoder_layers_30_fc1_bias5) + gelu160: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1528) + lv247 = R.call_tir(cls.NT_matmul2, (gelu160, model_decoder_layers_30_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1529: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv247, model_decoder_layers_30_fc2_bias5) + add1530: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1527, add1529) + layer_norm449: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1530, model_decoder_layers_31_self_attn_layer_norm_weight5, model_decoder_layers_31_self_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv248 = R.call_tir(cls.NT_matmul, (layer_norm449, model_decoder_layers_31_self_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1531: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv248, model_decoder_layers_31_self_attn_q_proj_bias5) + reshape1666: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1531, R.shape([1, 1, 20, 64])) + lv249 = R.call_tir(cls.NT_matmul, (layer_norm449, model_decoder_layers_31_self_attn_k_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + reshape1667: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv249, R.shape([1, 1, 20, 64])) + lv250 = R.call_tir(cls.NT_matmul, (layer_norm449, model_decoder_layers_31_self_attn_v_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1532: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv250, model_decoder_layers_31_self_attn_v_proj_bias5) + reshape1668: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1532, R.shape([1, 1, 20, 64])) + concat127: R.Tensor((1, 1, 60, 64), dtype="float16") = R.concat((reshape1666, reshape1667, reshape1668), axis=2) + reshape1669: R.Tensor((1, 60, 64), dtype="float16") = R.reshape(concat127, R.shape([1, 60, 64])) + lv327 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape1669), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1670: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv327, R.shape([1, 1, 20, 64])) + reshape1671: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1670, R.shape([1, 1, 1280])) + lv251 = R.call_tir(cls.NT_matmul, (reshape1671, model_decoder_layers_31_self_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1533: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv251, model_decoder_layers_31_self_attn_out_proj_bias5) + add1534: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1530, add1533) + layer_norm450: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1534, model_decoder_layers_31_encoder_attn_layer_norm_weight5, model_decoder_layers_31_encoder_attn_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv252 = R.call_tir(cls.NT_matmul, (layer_norm450, model_decoder_layers_31_encoder_attn_q_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1535: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv252, model_decoder_layers_31_encoder_attn_q_proj_bias5) + reshape1672: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(add1535, R.shape([1, 1, 20, 64])) + reshape1673: R.Tensor((1, 20, 64), dtype="float16") = R.reshape(reshape1672, R.shape([1, 20, 64])) + lv328 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape1673), out_sinfo=R.Tensor((1, 20, 64), dtype="float16")) + reshape1674: R.Tensor((1, 1, 20, 64), dtype="float16") = R.reshape(lv328, R.shape([1, 1, 20, 64])) + reshape1675: R.Tensor((1, 1, 1280), dtype="float16") = R.reshape(reshape1674, R.shape([1, 1, 1280])) + lv253 = R.call_tir(cls.NT_matmul, (reshape1675, model_decoder_layers_31_encoder_attn_out_proj_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1536: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv253, model_decoder_layers_31_encoder_attn_out_proj_bias5) + add1537: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1534, add1536) + layer_norm451: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1537, model_decoder_layers_31_final_layer_norm_weight5, model_decoder_layers_31_final_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv254 = R.call_tir(cls.NT_matmul1, (layer_norm451, model_decoder_layers_31_fc1_weight5), out_sinfo=R.Tensor((1, 1, 5120), dtype="float16")) + add1538: R.Tensor((1, 1, 5120), dtype="float16") = R.add(lv254, model_decoder_layers_31_fc1_bias5) + gelu161: R.Tensor((1, 1, 5120), dtype="float16") = R.nn.gelu(add1538) + lv255 = R.call_tir(cls.NT_matmul2, (gelu161, model_decoder_layers_31_fc2_weight5), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + add1539: R.Tensor((1, 1, 1280), dtype="float16") = R.add(lv255, model_decoder_layers_31_fc2_bias5) + add1540: R.Tensor((1, 1, 1280), dtype="float16") = R.add(add1537, add1539) + layer_norm452: R.Tensor((1, 1, 1280), dtype="float16") = R.nn.layer_norm(add1540, model_decoder_layer_norm_weight5, model_decoder_layer_norm_bias5, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv256 = R.call_tir(cls.NT_matmul3, (layer_norm452, model_decoder_embed_tokens_weight5), out_sinfo=R.Tensor((1, 1, 51866), dtype="float32")) + gv5: R.Tensor((1, 1, 51866), dtype="float32") = lv256 + R.output(gv5) + return gv5 + + @R.function(private=True) + def fused_relax_permute_dims_relax_matmul(model_decoder_layers_0_self_attn_q_proj_weight5: R.Tensor((1280, 1280), dtype="float16"), layer_norm356: R.Tensor((1, 1, 1280), dtype="float16")) -> R.Tensor((1, 1, 1280), dtype="float16"): + R.func_attr({"Composite": "transpose_matmul_fuse", "Primitive": 1}) + with R.dataflow(): + permute_dims1028: R.Tensor((1280, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_self_attn_q_proj_weight5, axes=None) + gv: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(layer_norm356, permute_dims1028, out_dtype="void") + R.output(gv) + return gv + + @R.function(private=True) + def fused_relax_permute_dims_relax_matmul1(model_decoder_layers_0_fc1_weight5: R.Tensor((5120, 1280), dtype="float16"), layer_norm358: R.Tensor((1, 1, 1280), dtype="float16")) -> R.Tensor((1, 1, 5120), dtype="float16"): + R.func_attr({"Composite": "transpose_matmul_fuse", "Primitive": 1}) + with R.dataflow(): + permute_dims1034: R.Tensor((1280, 5120), dtype="float16") = R.permute_dims(model_decoder_layers_0_fc1_weight5, axes=None) + gv: R.Tensor((1, 1, 5120), dtype="float16") = R.matmul(layer_norm358, permute_dims1034, out_dtype="void") + R.output(gv) + return gv + + @R.function(private=True) + def fused_relax_permute_dims_relax_matmul2(model_decoder_layers_0_fc2_weight5: R.Tensor((1280, 5120), dtype="float16"), gelu130: R.Tensor((1, 1, 5120), dtype="float16")) -> R.Tensor((1, 1, 1280), dtype="float16"): + R.func_attr({"Composite": "transpose_matmul_fuse", "Primitive": 1}) + with R.dataflow(): + permute_dims1035: R.Tensor((5120, 1280), dtype="float16") = R.permute_dims(model_decoder_layers_0_fc2_weight5, axes=None) + gv: R.Tensor((1, 1, 1280), dtype="float16") = R.matmul(gelu130, permute_dims1035, out_dtype="void") + R.output(gv) + return gv + + @R.function(private=True) + def fused_relax_permute_dims_relax_matmul3(model_decoder_embed_tokens_weight5: R.Tensor((51866, 1280), dtype="float16"), layer_norm452: R.Tensor((1, 1, 1280), dtype="float16")) -> R.Tensor((1, 1, 51866), dtype="float32"): + R.func_attr({"Composite": "transpose_matmul_fuse", "Primitive": 1}) + with R.dataflow(): + permute_dims1284: R.Tensor((1280, 51866), dtype="float16") = R.permute_dims(model_decoder_embed_tokens_weight5, axes=None) + gv: R.Tensor((1, 1, 51866), dtype="float32") = R.matmul(layer_norm452, permute_dims1284, out_dtype="float32") + R.output(gv) + return gv + + @R.function + def multinomial_from_uniform(probs: R.Tensor(("batch_size", "vocab_size"), dtype="float32"), uniform_samples: R.Tensor(("num_samples",), dtype="float32"), sample_indices: R.Tensor(("num_samples",), dtype="int32")) -> R.Tensor(("num_samples",), dtype="int32"): + num_samples = T.int64() + batch_size = T.int64() + vocab_size = T.int64() + R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "num_positions": 48, "num_samples": 8}}) + with R.dataflow(): + probs_1: R.Tensor((batch_size, vocab_size), dtype="float32") = probs + uniform_samples_1: R.Tensor((num_samples, 1), dtype="float32") = R.call_pure_packed("vm.builtin.reshape", uniform_samples, R.shape([num_samples, 1]), sinfo_args=(R.Tensor((num_samples, 1), dtype="float32"),)) + sample_indices_1: R.Tensor((num_samples, 1), dtype="int32") = R.call_pure_packed("vm.builtin.reshape", sample_indices, R.shape([num_samples, 1]), sinfo_args=(R.Tensor((num_samples, 1), dtype="int32"),)) + nn_multinomial_from_uniform: R.Tensor((num_samples, 1), dtype="int32") = R.multinomial_from_uniform(probs_1, uniform_samples_1, sample_indices_1, dtype="int32") + lv: R.Tensor((num_samples,), dtype="int32") = R.call_pure_packed("vm.builtin.reshape", nn_multinomial_from_uniform, R.shape([num_samples]), sinfo_args=(R.Tensor((num_samples,), dtype="int32"),)) + gv: R.Tensor((num_samples,), dtype="int32") = lv + R.output(gv) + return gv + + @R.function + def prefill(input_ids: R.Tensor((1, "seq_len"), dtype="int32"), paged_kv_cache: R.Object, packed_params: R.Tuple(R.Tensor((1280, 128, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280, 3), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1500, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((51866, 1280), dtype="float16"), R.Tensor((448, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280, 1280), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((5120, 1280), dtype="float16"), R.Tensor((5120,), dtype="float16"), R.Tensor((1280, 5120), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"), R.Tensor((1280,), dtype="float16"))) -> R.Tensor((1, 1, 51866), dtype="float32"): + seq_len = T.int64() + R.func_attr({"num_input": 2, "relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) + cls = Module + with R.dataflow(): + model_decoder_embed_tokens_weight4: R.Tensor((51866, 1280), dtype="float16") = packed_params[487] + model_decoder_embed_positions_weight4: R.Tensor((448, 1280), dtype="float16") = packed_params[488] + model_decoder_layers_0_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[489] + model_decoder_layers_0_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[490] + model_decoder_layers_0_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[491] + model_decoder_layers_0_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[492] + model_decoder_layers_0_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[493] + model_decoder_layers_0_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[494] + model_decoder_layers_0_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[495] + model_decoder_layers_0_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[496] + model_decoder_layers_0_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[497] + model_decoder_layers_0_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[501] + model_decoder_layers_0_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[502] + model_decoder_layers_0_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[503] + model_decoder_layers_0_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[504] + model_decoder_layers_0_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[505] + model_decoder_layers_0_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[506] + model_decoder_layers_0_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[507] + model_decoder_layers_0_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[508] + model_decoder_layers_0_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[509] + model_decoder_layers_0_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[510] + model_decoder_layers_0_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[511] + model_decoder_layers_0_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[512] + model_decoder_layers_1_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[513] + model_decoder_layers_1_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[514] + model_decoder_layers_1_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[515] + model_decoder_layers_1_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[516] + model_decoder_layers_1_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[517] + model_decoder_layers_1_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[518] + model_decoder_layers_1_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[519] + model_decoder_layers_1_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[520] + model_decoder_layers_1_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[521] + model_decoder_layers_1_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[525] + model_decoder_layers_1_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[526] + model_decoder_layers_1_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[527] + model_decoder_layers_1_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[528] + model_decoder_layers_1_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[529] + model_decoder_layers_1_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[530] + model_decoder_layers_1_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[531] + model_decoder_layers_1_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[532] + model_decoder_layers_1_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[533] + model_decoder_layers_1_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[534] + model_decoder_layers_1_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[535] + model_decoder_layers_1_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[536] + model_decoder_layers_2_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[537] + model_decoder_layers_2_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[538] + model_decoder_layers_2_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[539] + model_decoder_layers_2_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[540] + model_decoder_layers_2_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[541] + model_decoder_layers_2_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[542] + model_decoder_layers_2_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[543] + model_decoder_layers_2_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[544] + model_decoder_layers_2_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[545] + model_decoder_layers_2_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[549] + model_decoder_layers_2_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[550] + model_decoder_layers_2_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[551] + model_decoder_layers_2_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[552] + model_decoder_layers_2_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[553] + model_decoder_layers_2_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[554] + model_decoder_layers_2_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[555] + model_decoder_layers_2_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[556] + model_decoder_layers_2_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[557] + model_decoder_layers_2_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[558] + model_decoder_layers_2_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[559] + model_decoder_layers_2_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[560] + model_decoder_layers_3_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[561] + model_decoder_layers_3_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[562] + model_decoder_layers_3_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[563] + model_decoder_layers_3_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[564] + model_decoder_layers_3_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[565] + model_decoder_layers_3_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[566] + model_decoder_layers_3_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[567] + model_decoder_layers_3_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[568] + model_decoder_layers_3_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[569] + model_decoder_layers_3_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[573] + model_decoder_layers_3_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[574] + model_decoder_layers_3_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[575] + model_decoder_layers_3_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[576] + model_decoder_layers_3_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[577] + model_decoder_layers_3_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[578] + model_decoder_layers_3_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[579] + model_decoder_layers_3_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[580] + model_decoder_layers_3_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[581] + model_decoder_layers_3_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[582] + model_decoder_layers_3_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[583] + model_decoder_layers_3_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[584] + model_decoder_layers_4_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[585] + model_decoder_layers_4_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[586] + model_decoder_layers_4_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[587] + model_decoder_layers_4_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[588] + model_decoder_layers_4_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[589] + model_decoder_layers_4_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[590] + model_decoder_layers_4_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[591] + model_decoder_layers_4_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[592] + model_decoder_layers_4_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[593] + model_decoder_layers_4_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[597] + model_decoder_layers_4_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[598] + model_decoder_layers_4_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[599] + model_decoder_layers_4_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[600] + model_decoder_layers_4_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[601] + model_decoder_layers_4_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[602] + model_decoder_layers_4_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[603] + model_decoder_layers_4_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[604] + model_decoder_layers_4_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[605] + model_decoder_layers_4_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[606] + model_decoder_layers_4_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[607] + model_decoder_layers_4_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[608] + model_decoder_layers_5_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[609] + model_decoder_layers_5_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[610] + model_decoder_layers_5_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[611] + model_decoder_layers_5_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[612] + model_decoder_layers_5_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[613] + model_decoder_layers_5_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[614] + model_decoder_layers_5_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[615] + model_decoder_layers_5_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[616] + model_decoder_layers_5_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[617] + model_decoder_layers_5_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[621] + model_decoder_layers_5_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[622] + model_decoder_layers_5_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[623] + model_decoder_layers_5_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[624] + model_decoder_layers_5_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[625] + model_decoder_layers_5_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[626] + model_decoder_layers_5_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[627] + model_decoder_layers_5_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[628] + model_decoder_layers_5_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[629] + model_decoder_layers_5_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[630] + model_decoder_layers_5_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[631] + model_decoder_layers_5_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[632] + model_decoder_layers_6_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[633] + model_decoder_layers_6_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[634] + model_decoder_layers_6_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[635] + model_decoder_layers_6_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[636] + model_decoder_layers_6_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[637] + model_decoder_layers_6_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[638] + model_decoder_layers_6_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[639] + model_decoder_layers_6_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[640] + model_decoder_layers_6_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[641] + model_decoder_layers_6_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[645] + model_decoder_layers_6_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[646] + model_decoder_layers_6_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[647] + model_decoder_layers_6_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[648] + model_decoder_layers_6_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[649] + model_decoder_layers_6_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[650] + model_decoder_layers_6_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[651] + model_decoder_layers_6_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[652] + model_decoder_layers_6_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[653] + model_decoder_layers_6_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[654] + model_decoder_layers_6_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[655] + model_decoder_layers_6_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[656] + model_decoder_layers_7_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[657] + model_decoder_layers_7_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[658] + model_decoder_layers_7_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[659] + model_decoder_layers_7_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[660] + model_decoder_layers_7_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[661] + model_decoder_layers_7_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[662] + model_decoder_layers_7_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[663] + model_decoder_layers_7_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[664] + model_decoder_layers_7_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[665] + model_decoder_layers_7_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[669] + model_decoder_layers_7_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[670] + model_decoder_layers_7_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[671] + model_decoder_layers_7_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[672] + model_decoder_layers_7_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[673] + model_decoder_layers_7_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[674] + model_decoder_layers_7_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[675] + model_decoder_layers_7_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[676] + model_decoder_layers_7_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[677] + model_decoder_layers_7_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[678] + model_decoder_layers_7_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[679] + model_decoder_layers_7_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[680] + model_decoder_layers_8_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[681] + model_decoder_layers_8_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[682] + model_decoder_layers_8_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[683] + model_decoder_layers_8_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[684] + model_decoder_layers_8_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[685] + model_decoder_layers_8_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[686] + model_decoder_layers_8_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[687] + model_decoder_layers_8_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[688] + model_decoder_layers_8_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[689] + model_decoder_layers_8_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[693] + model_decoder_layers_8_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[694] + model_decoder_layers_8_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[695] + model_decoder_layers_8_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[696] + model_decoder_layers_8_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[697] + model_decoder_layers_8_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[698] + model_decoder_layers_8_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[699] + model_decoder_layers_8_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[700] + model_decoder_layers_8_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[701] + model_decoder_layers_8_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[702] + model_decoder_layers_8_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[703] + model_decoder_layers_8_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[704] + model_decoder_layers_9_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[705] + model_decoder_layers_9_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[706] + model_decoder_layers_9_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[707] + model_decoder_layers_9_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[708] + model_decoder_layers_9_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[709] + model_decoder_layers_9_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[710] + model_decoder_layers_9_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[711] + model_decoder_layers_9_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[712] + model_decoder_layers_9_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[713] + model_decoder_layers_9_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[717] + model_decoder_layers_9_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[718] + model_decoder_layers_9_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[719] + model_decoder_layers_9_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[720] + model_decoder_layers_9_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[721] + model_decoder_layers_9_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[722] + model_decoder_layers_9_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[723] + model_decoder_layers_9_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[724] + model_decoder_layers_9_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[725] + model_decoder_layers_9_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[726] + model_decoder_layers_9_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[727] + model_decoder_layers_9_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[728] + model_decoder_layers_10_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[729] + model_decoder_layers_10_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[730] + model_decoder_layers_10_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[731] + model_decoder_layers_10_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[732] + model_decoder_layers_10_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[733] + model_decoder_layers_10_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[734] + model_decoder_layers_10_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[735] + model_decoder_layers_10_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[736] + model_decoder_layers_10_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[737] + model_decoder_layers_10_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[741] + model_decoder_layers_10_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[742] + model_decoder_layers_10_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[743] + model_decoder_layers_10_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[744] + model_decoder_layers_10_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[745] + model_decoder_layers_10_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[746] + model_decoder_layers_10_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[747] + model_decoder_layers_10_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[748] + model_decoder_layers_10_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[749] + model_decoder_layers_10_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[750] + model_decoder_layers_10_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[751] + model_decoder_layers_10_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[752] + model_decoder_layers_11_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[753] + model_decoder_layers_11_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[754] + model_decoder_layers_11_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[755] + model_decoder_layers_11_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[756] + model_decoder_layers_11_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[757] + model_decoder_layers_11_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[758] + model_decoder_layers_11_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[759] + model_decoder_layers_11_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[760] + model_decoder_layers_11_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[761] + model_decoder_layers_11_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[765] + model_decoder_layers_11_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[766] + model_decoder_layers_11_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[767] + model_decoder_layers_11_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[768] + model_decoder_layers_11_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[769] + model_decoder_layers_11_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[770] + model_decoder_layers_11_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[771] + model_decoder_layers_11_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[772] + model_decoder_layers_11_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[773] + model_decoder_layers_11_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[774] + model_decoder_layers_11_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[775] + model_decoder_layers_11_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[776] + model_decoder_layers_12_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[777] + model_decoder_layers_12_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[778] + model_decoder_layers_12_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[779] + model_decoder_layers_12_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[780] + model_decoder_layers_12_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[781] + model_decoder_layers_12_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[782] + model_decoder_layers_12_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[783] + model_decoder_layers_12_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[784] + model_decoder_layers_12_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[785] + model_decoder_layers_12_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[789] + model_decoder_layers_12_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[790] + model_decoder_layers_12_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[791] + model_decoder_layers_12_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[792] + model_decoder_layers_12_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[793] + model_decoder_layers_12_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[794] + model_decoder_layers_12_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[795] + model_decoder_layers_12_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[796] + model_decoder_layers_12_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[797] + model_decoder_layers_12_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[798] + model_decoder_layers_12_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[799] + model_decoder_layers_12_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[800] + model_decoder_layers_13_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[801] + model_decoder_layers_13_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[802] + model_decoder_layers_13_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[803] + model_decoder_layers_13_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[804] + model_decoder_layers_13_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[805] + model_decoder_layers_13_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[806] + model_decoder_layers_13_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[807] + model_decoder_layers_13_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[808] + model_decoder_layers_13_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[809] + model_decoder_layers_13_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[813] + model_decoder_layers_13_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[814] + model_decoder_layers_13_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[815] + model_decoder_layers_13_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[816] + model_decoder_layers_13_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[817] + model_decoder_layers_13_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[818] + model_decoder_layers_13_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[819] + model_decoder_layers_13_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[820] + model_decoder_layers_13_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[821] + model_decoder_layers_13_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[822] + model_decoder_layers_13_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[823] + model_decoder_layers_13_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[824] + model_decoder_layers_14_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[825] + model_decoder_layers_14_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[826] + model_decoder_layers_14_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[827] + model_decoder_layers_14_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[828] + model_decoder_layers_14_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[829] + model_decoder_layers_14_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[830] + model_decoder_layers_14_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[831] + model_decoder_layers_14_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[832] + model_decoder_layers_14_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[833] + model_decoder_layers_14_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[837] + model_decoder_layers_14_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[838] + model_decoder_layers_14_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[839] + model_decoder_layers_14_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[840] + model_decoder_layers_14_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[841] + model_decoder_layers_14_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[842] + model_decoder_layers_14_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[843] + model_decoder_layers_14_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[844] + model_decoder_layers_14_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[845] + model_decoder_layers_14_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[846] + model_decoder_layers_14_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[847] + model_decoder_layers_14_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[848] + model_decoder_layers_15_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[849] + model_decoder_layers_15_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[850] + model_decoder_layers_15_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[851] + model_decoder_layers_15_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[852] + model_decoder_layers_15_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[853] + model_decoder_layers_15_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[854] + model_decoder_layers_15_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[855] + model_decoder_layers_15_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[856] + model_decoder_layers_15_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[857] + model_decoder_layers_15_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[861] + model_decoder_layers_15_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[862] + model_decoder_layers_15_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[863] + model_decoder_layers_15_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[864] + model_decoder_layers_15_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[865] + model_decoder_layers_15_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[866] + model_decoder_layers_15_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[867] + model_decoder_layers_15_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[868] + model_decoder_layers_15_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[869] + model_decoder_layers_15_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[870] + model_decoder_layers_15_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[871] + model_decoder_layers_15_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[872] + model_decoder_layers_16_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[873] + model_decoder_layers_16_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[874] + model_decoder_layers_16_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[875] + model_decoder_layers_16_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[876] + model_decoder_layers_16_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[877] + model_decoder_layers_16_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[878] + model_decoder_layers_16_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[879] + model_decoder_layers_16_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[880] + model_decoder_layers_16_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[881] + model_decoder_layers_16_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[885] + model_decoder_layers_16_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[886] + model_decoder_layers_16_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[887] + model_decoder_layers_16_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[888] + model_decoder_layers_16_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[889] + model_decoder_layers_16_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[890] + model_decoder_layers_16_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[891] + model_decoder_layers_16_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[892] + model_decoder_layers_16_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[893] + model_decoder_layers_16_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[894] + model_decoder_layers_16_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[895] + model_decoder_layers_16_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[896] + model_decoder_layers_17_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[897] + model_decoder_layers_17_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[898] + model_decoder_layers_17_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[899] + model_decoder_layers_17_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[900] + model_decoder_layers_17_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[901] + model_decoder_layers_17_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[902] + model_decoder_layers_17_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[903] + model_decoder_layers_17_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[904] + model_decoder_layers_17_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[905] + model_decoder_layers_17_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[909] + model_decoder_layers_17_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[910] + model_decoder_layers_17_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[911] + model_decoder_layers_17_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[912] + model_decoder_layers_17_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[913] + model_decoder_layers_17_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[914] + model_decoder_layers_17_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[915] + model_decoder_layers_17_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[916] + model_decoder_layers_17_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[917] + model_decoder_layers_17_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[918] + model_decoder_layers_17_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[919] + model_decoder_layers_17_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[920] + model_decoder_layers_18_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[921] + model_decoder_layers_18_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[922] + model_decoder_layers_18_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[923] + model_decoder_layers_18_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[924] + model_decoder_layers_18_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[925] + model_decoder_layers_18_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[926] + model_decoder_layers_18_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[927] + model_decoder_layers_18_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[928] + model_decoder_layers_18_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[929] + model_decoder_layers_18_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[933] + model_decoder_layers_18_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[934] + model_decoder_layers_18_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[935] + model_decoder_layers_18_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[936] + model_decoder_layers_18_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[937] + model_decoder_layers_18_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[938] + model_decoder_layers_18_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[939] + model_decoder_layers_18_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[940] + model_decoder_layers_18_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[941] + model_decoder_layers_18_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[942] + model_decoder_layers_18_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[943] + model_decoder_layers_18_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[944] + model_decoder_layers_19_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[945] + model_decoder_layers_19_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[946] + model_decoder_layers_19_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[947] + model_decoder_layers_19_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[948] + model_decoder_layers_19_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[949] + model_decoder_layers_19_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[950] + model_decoder_layers_19_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[951] + model_decoder_layers_19_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[952] + model_decoder_layers_19_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[953] + model_decoder_layers_19_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[957] + model_decoder_layers_19_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[958] + model_decoder_layers_19_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[959] + model_decoder_layers_19_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[960] + model_decoder_layers_19_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[961] + model_decoder_layers_19_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[962] + model_decoder_layers_19_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[963] + model_decoder_layers_19_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[964] + model_decoder_layers_19_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[965] + model_decoder_layers_19_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[966] + model_decoder_layers_19_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[967] + model_decoder_layers_19_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[968] + model_decoder_layers_20_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[969] + model_decoder_layers_20_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[970] + model_decoder_layers_20_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[971] + model_decoder_layers_20_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[972] + model_decoder_layers_20_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[973] + model_decoder_layers_20_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[974] + model_decoder_layers_20_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[975] + model_decoder_layers_20_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[976] + model_decoder_layers_20_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[977] + model_decoder_layers_20_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[981] + model_decoder_layers_20_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[982] + model_decoder_layers_20_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[983] + model_decoder_layers_20_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[984] + model_decoder_layers_20_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[985] + model_decoder_layers_20_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[986] + model_decoder_layers_20_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[987] + model_decoder_layers_20_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[988] + model_decoder_layers_20_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[989] + model_decoder_layers_20_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[990] + model_decoder_layers_20_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[991] + model_decoder_layers_20_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[992] + model_decoder_layers_21_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[993] + model_decoder_layers_21_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[994] + model_decoder_layers_21_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[995] + model_decoder_layers_21_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[996] + model_decoder_layers_21_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[997] + model_decoder_layers_21_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[998] + model_decoder_layers_21_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[999] + model_decoder_layers_21_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1000] + model_decoder_layers_21_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1001] + model_decoder_layers_21_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1005] + model_decoder_layers_21_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1006] + model_decoder_layers_21_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1007] + model_decoder_layers_21_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1008] + model_decoder_layers_21_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1009] + model_decoder_layers_21_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1010] + model_decoder_layers_21_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1011] + model_decoder_layers_21_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1012] + model_decoder_layers_21_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1013] + model_decoder_layers_21_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1014] + model_decoder_layers_21_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1015] + model_decoder_layers_21_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1016] + model_decoder_layers_22_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1017] + model_decoder_layers_22_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1018] + model_decoder_layers_22_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1019] + model_decoder_layers_22_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1020] + model_decoder_layers_22_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1021] + model_decoder_layers_22_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1022] + model_decoder_layers_22_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1023] + model_decoder_layers_22_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1024] + model_decoder_layers_22_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1025] + model_decoder_layers_22_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1029] + model_decoder_layers_22_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1030] + model_decoder_layers_22_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1031] + model_decoder_layers_22_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1032] + model_decoder_layers_22_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1033] + model_decoder_layers_22_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1034] + model_decoder_layers_22_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1035] + model_decoder_layers_22_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1036] + model_decoder_layers_22_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1037] + model_decoder_layers_22_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1038] + model_decoder_layers_22_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1039] + model_decoder_layers_22_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1040] + model_decoder_layers_23_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1041] + model_decoder_layers_23_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1042] + model_decoder_layers_23_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1043] + model_decoder_layers_23_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1044] + model_decoder_layers_23_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1045] + model_decoder_layers_23_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1046] + model_decoder_layers_23_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1047] + model_decoder_layers_23_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1048] + model_decoder_layers_23_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1049] + model_decoder_layers_23_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1053] + model_decoder_layers_23_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1054] + model_decoder_layers_23_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1055] + model_decoder_layers_23_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1056] + model_decoder_layers_23_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1057] + model_decoder_layers_23_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1058] + model_decoder_layers_23_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1059] + model_decoder_layers_23_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1060] + model_decoder_layers_23_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1061] + model_decoder_layers_23_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1062] + model_decoder_layers_23_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1063] + model_decoder_layers_23_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1064] + model_decoder_layers_24_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1065] + model_decoder_layers_24_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1066] + model_decoder_layers_24_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1067] + model_decoder_layers_24_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1068] + model_decoder_layers_24_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1069] + model_decoder_layers_24_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1070] + model_decoder_layers_24_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1071] + model_decoder_layers_24_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1072] + model_decoder_layers_24_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1073] + model_decoder_layers_24_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1077] + model_decoder_layers_24_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1078] + model_decoder_layers_24_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1079] + model_decoder_layers_24_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1080] + model_decoder_layers_24_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1081] + model_decoder_layers_24_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1082] + model_decoder_layers_24_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1083] + model_decoder_layers_24_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1084] + model_decoder_layers_24_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1085] + model_decoder_layers_24_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1086] + model_decoder_layers_24_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1087] + model_decoder_layers_24_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1088] + model_decoder_layers_25_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1089] + model_decoder_layers_25_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1090] + model_decoder_layers_25_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1091] + model_decoder_layers_25_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1092] + model_decoder_layers_25_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1093] + model_decoder_layers_25_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1094] + model_decoder_layers_25_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1095] + model_decoder_layers_25_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1096] + model_decoder_layers_25_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1097] + model_decoder_layers_25_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1101] + model_decoder_layers_25_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1102] + model_decoder_layers_25_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1103] + model_decoder_layers_25_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1104] + model_decoder_layers_25_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1105] + model_decoder_layers_25_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1106] + model_decoder_layers_25_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1107] + model_decoder_layers_25_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1108] + model_decoder_layers_25_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1109] + model_decoder_layers_25_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1110] + model_decoder_layers_25_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1111] + model_decoder_layers_25_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1112] + model_decoder_layers_26_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1113] + model_decoder_layers_26_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1114] + model_decoder_layers_26_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1115] + model_decoder_layers_26_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1116] + model_decoder_layers_26_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1117] + model_decoder_layers_26_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1118] + model_decoder_layers_26_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1119] + model_decoder_layers_26_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1120] + model_decoder_layers_26_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1121] + model_decoder_layers_26_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1125] + model_decoder_layers_26_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1126] + model_decoder_layers_26_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1127] + model_decoder_layers_26_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1128] + model_decoder_layers_26_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1129] + model_decoder_layers_26_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1130] + model_decoder_layers_26_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1131] + model_decoder_layers_26_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1132] + model_decoder_layers_26_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1133] + model_decoder_layers_26_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1134] + model_decoder_layers_26_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1135] + model_decoder_layers_26_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1136] + model_decoder_layers_27_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1137] + model_decoder_layers_27_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1138] + model_decoder_layers_27_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1139] + model_decoder_layers_27_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1140] + model_decoder_layers_27_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1141] + model_decoder_layers_27_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1142] + model_decoder_layers_27_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1143] + model_decoder_layers_27_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1144] + model_decoder_layers_27_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1145] + model_decoder_layers_27_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1149] + model_decoder_layers_27_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1150] + model_decoder_layers_27_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1151] + model_decoder_layers_27_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1152] + model_decoder_layers_27_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1153] + model_decoder_layers_27_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1154] + model_decoder_layers_27_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1155] + model_decoder_layers_27_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1156] + model_decoder_layers_27_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1157] + model_decoder_layers_27_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1158] + model_decoder_layers_27_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1159] + model_decoder_layers_27_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1160] + model_decoder_layers_28_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1161] + model_decoder_layers_28_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1162] + model_decoder_layers_28_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1163] + model_decoder_layers_28_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1164] + model_decoder_layers_28_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1165] + model_decoder_layers_28_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1166] + model_decoder_layers_28_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1167] + model_decoder_layers_28_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1168] + model_decoder_layers_28_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1169] + model_decoder_layers_28_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1173] + model_decoder_layers_28_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1174] + model_decoder_layers_28_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1175] + model_decoder_layers_28_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1176] + model_decoder_layers_28_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1177] + model_decoder_layers_28_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1178] + model_decoder_layers_28_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1179] + model_decoder_layers_28_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1180] + model_decoder_layers_28_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1181] + model_decoder_layers_28_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1182] + model_decoder_layers_28_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1183] + model_decoder_layers_28_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1184] + model_decoder_layers_29_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1185] + model_decoder_layers_29_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1186] + model_decoder_layers_29_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1187] + model_decoder_layers_29_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1188] + model_decoder_layers_29_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1189] + model_decoder_layers_29_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1190] + model_decoder_layers_29_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1191] + model_decoder_layers_29_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1192] + model_decoder_layers_29_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1193] + model_decoder_layers_29_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1197] + model_decoder_layers_29_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1198] + model_decoder_layers_29_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1199] + model_decoder_layers_29_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1200] + model_decoder_layers_29_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1201] + model_decoder_layers_29_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1202] + model_decoder_layers_29_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1203] + model_decoder_layers_29_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1204] + model_decoder_layers_29_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1205] + model_decoder_layers_29_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1206] + model_decoder_layers_29_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1207] + model_decoder_layers_29_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1208] + model_decoder_layers_30_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1209] + model_decoder_layers_30_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1210] + model_decoder_layers_30_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1211] + model_decoder_layers_30_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1212] + model_decoder_layers_30_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1213] + model_decoder_layers_30_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1214] + model_decoder_layers_30_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1215] + model_decoder_layers_30_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1216] + model_decoder_layers_30_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1217] + model_decoder_layers_30_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1221] + model_decoder_layers_30_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1222] + model_decoder_layers_30_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1223] + model_decoder_layers_30_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1224] + model_decoder_layers_30_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1225] + model_decoder_layers_30_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1226] + model_decoder_layers_30_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1227] + model_decoder_layers_30_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1228] + model_decoder_layers_30_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1229] + model_decoder_layers_30_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1230] + model_decoder_layers_30_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1231] + model_decoder_layers_30_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1232] + model_decoder_layers_31_self_attn_k_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1233] + model_decoder_layers_31_self_attn_v_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1234] + model_decoder_layers_31_self_attn_v_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1235] + model_decoder_layers_31_self_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1236] + model_decoder_layers_31_self_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1237] + model_decoder_layers_31_self_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1238] + model_decoder_layers_31_self_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1239] + model_decoder_layers_31_self_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1240] + model_decoder_layers_31_self_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1241] + model_decoder_layers_31_encoder_attn_q_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1245] + model_decoder_layers_31_encoder_attn_q_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1246] + model_decoder_layers_31_encoder_attn_out_proj_weight4: R.Tensor((1280, 1280), dtype="float16") = packed_params[1247] + model_decoder_layers_31_encoder_attn_out_proj_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1248] + model_decoder_layers_31_encoder_attn_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1249] + model_decoder_layers_31_encoder_attn_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1250] + model_decoder_layers_31_fc1_weight4: R.Tensor((5120, 1280), dtype="float16") = packed_params[1251] + model_decoder_layers_31_fc1_bias4: R.Tensor((5120,), dtype="float16") = packed_params[1252] + model_decoder_layers_31_fc2_weight4: R.Tensor((1280, 5120), dtype="float16") = packed_params[1253] + model_decoder_layers_31_fc2_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1254] + model_decoder_layers_31_final_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1255] + model_decoder_layers_31_final_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1256] + model_decoder_layer_norm_weight4: R.Tensor((1280,), dtype="float16") = packed_params[1257] + model_decoder_layer_norm_bias4: R.Tensor((1280,), dtype="float16") = packed_params[1258] + reshape1030: R.Tensor((seq_len,), dtype="int32") = R.reshape(input_ids, R.shape([seq_len])) + take5: R.Tensor((seq_len, 1280), dtype="float16") = R.take(model_decoder_embed_tokens_weight4, reshape1030, axis=0) + reshape1031: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(take5, R.shape([1, seq_len, 1280])) + lv198: R.Tensor((seq_len,), dtype="int32") = R.call_pure_packed("vm.builtin.attention_kv_cache_get_query_positions", paged_kv_cache, sinfo_args=(R.Tensor((seq_len,), dtype="int32"),)) + take6: R.Tensor((seq_len, 1280), dtype="float16") = R.take(model_decoder_embed_positions_weight4, lv198, axis=0) + reshape1032: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(take6, R.shape([1, seq_len, 1280])) + add899: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(reshape1031, reshape1032) + layer_norm259: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add899, model_decoder_layers_0_self_attn_layer_norm_weight4, model_decoder_layers_0_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv32 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_0_self_attn_q_proj_weight4, layer_norm259, model_decoder_layers_0_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1033: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv32, R.shape([1, seq_len, 20, 64])) + lv32_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_0_self_attn_k_proj_weight4, layer_norm259), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1034: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv32_1, R.shape([1, seq_len, 20, 64])) + lv33 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_0_self_attn_v_proj_weight4, layer_norm259, model_decoder_layers_0_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1035: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv33, R.shape([1, seq_len, 20, 64])) + concat64: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1033, reshape1034, reshape1035), axis=2) + reshape1036: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat64, R.shape([seq_len, 60, 64])) + lv199 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape1036), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1037: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv199, R.shape([1, seq_len, 20, 64])) + reshape1038: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1037, R.shape([1, seq_len, 1280])) + lv34 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_0_self_attn_out_proj_weight4, reshape1038, model_decoder_layers_0_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add903: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add899, lv34) + layer_norm260: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add903, model_decoder_layers_0_encoder_attn_layer_norm_weight4, model_decoder_layers_0_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv35 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_0_encoder_attn_q_proj_weight4, layer_norm260, model_decoder_layers_0_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1039: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv35, R.shape([1, seq_len, 20, 64])) + reshape1040: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1039, R.shape([seq_len, 20, 64])) + lv200 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(0), R.prim_value(T.float32(1)), reshape1040), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1041: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv200, R.shape([1, seq_len, 20, 64])) + reshape1042: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1041, R.shape([1, seq_len, 1280])) + lv36 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_0_encoder_attn_out_proj_weight4, reshape1042, model_decoder_layers_0_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add906: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add903, lv36) + layer_norm261: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add906, model_decoder_layers_0_final_layer_norm_weight4, model_decoder_layers_0_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_0_fc1_weight4, layer_norm261, model_decoder_layers_0_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv37 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_0_fc2_weight4, lv, model_decoder_layers_0_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add909: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add906, lv37) + layer_norm262: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add909, model_decoder_layers_1_self_attn_layer_norm_weight4, model_decoder_layers_1_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv38 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_1_self_attn_q_proj_weight4, layer_norm262, model_decoder_layers_1_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1043: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv38, R.shape([1, seq_len, 20, 64])) + lv33_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_1_self_attn_k_proj_weight4, layer_norm262), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1044: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv33_1, R.shape([1, seq_len, 20, 64])) + lv39 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_1_self_attn_v_proj_weight4, layer_norm262, model_decoder_layers_1_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1045: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv39, R.shape([1, seq_len, 20, 64])) + concat65: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1043, reshape1044, reshape1045), axis=2) + reshape1046: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat65, R.shape([seq_len, 60, 64])) + lv201 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape1046), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1047: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv201, R.shape([1, seq_len, 20, 64])) + reshape1048: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1047, R.shape([1, seq_len, 1280])) + lv40 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_1_self_attn_out_proj_weight4, reshape1048, model_decoder_layers_1_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add913: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add909, lv40) + layer_norm263: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add913, model_decoder_layers_1_encoder_attn_layer_norm_weight4, model_decoder_layers_1_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv41 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_1_encoder_attn_q_proj_weight4, layer_norm263, model_decoder_layers_1_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1049: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv41, R.shape([1, seq_len, 20, 64])) + reshape1050: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1049, R.shape([seq_len, 20, 64])) + lv202 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(1), R.prim_value(T.float32(1)), reshape1050), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1051: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv202, R.shape([1, seq_len, 20, 64])) + reshape1052: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1051, R.shape([1, seq_len, 1280])) + lv42 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_1_encoder_attn_out_proj_weight4, reshape1052, model_decoder_layers_1_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add916: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add913, lv42) + layer_norm264: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add916, model_decoder_layers_1_final_layer_norm_weight4, model_decoder_layers_1_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_1_fc1_weight4, layer_norm264, model_decoder_layers_1_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv43 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_1_fc2_weight4, lv1, model_decoder_layers_1_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add919: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add916, lv43) + layer_norm265: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add919, model_decoder_layers_2_self_attn_layer_norm_weight4, model_decoder_layers_2_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv44 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_2_self_attn_q_proj_weight4, layer_norm265, model_decoder_layers_2_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1053: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv44, R.shape([1, seq_len, 20, 64])) + lv34_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_2_self_attn_k_proj_weight4, layer_norm265), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1054: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv34_1, R.shape([1, seq_len, 20, 64])) + lv45 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_2_self_attn_v_proj_weight4, layer_norm265, model_decoder_layers_2_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1055: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv45, R.shape([1, seq_len, 20, 64])) + concat66: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1053, reshape1054, reshape1055), axis=2) + reshape1056: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat66, R.shape([seq_len, 60, 64])) + lv203 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape1056), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1057: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv203, R.shape([1, seq_len, 20, 64])) + reshape1058: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1057, R.shape([1, seq_len, 1280])) + lv46 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_2_self_attn_out_proj_weight4, reshape1058, model_decoder_layers_2_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add923: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add919, lv46) + layer_norm266: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add923, model_decoder_layers_2_encoder_attn_layer_norm_weight4, model_decoder_layers_2_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv47 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_2_encoder_attn_q_proj_weight4, layer_norm266, model_decoder_layers_2_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1059: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv47, R.shape([1, seq_len, 20, 64])) + reshape1060: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1059, R.shape([seq_len, 20, 64])) + lv204 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(2), R.prim_value(T.float32(1)), reshape1060), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1061: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv204, R.shape([1, seq_len, 20, 64])) + reshape1062: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1061, R.shape([1, seq_len, 1280])) + lv48 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_2_encoder_attn_out_proj_weight4, reshape1062, model_decoder_layers_2_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add926: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add923, lv48) + layer_norm267: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add926, model_decoder_layers_2_final_layer_norm_weight4, model_decoder_layers_2_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv2 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_2_fc1_weight4, layer_norm267, model_decoder_layers_2_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv49 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_2_fc2_weight4, lv2, model_decoder_layers_2_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add929: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add926, lv49) + layer_norm268: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add929, model_decoder_layers_3_self_attn_layer_norm_weight4, model_decoder_layers_3_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv50 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_3_self_attn_q_proj_weight4, layer_norm268, model_decoder_layers_3_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1063: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv50, R.shape([1, seq_len, 20, 64])) + lv35_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_3_self_attn_k_proj_weight4, layer_norm268), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1064: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv35_1, R.shape([1, seq_len, 20, 64])) + lv51 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_3_self_attn_v_proj_weight4, layer_norm268, model_decoder_layers_3_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1065: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv51, R.shape([1, seq_len, 20, 64])) + concat67: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1063, reshape1064, reshape1065), axis=2) + reshape1066: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat67, R.shape([seq_len, 60, 64])) + lv205 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape1066), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1067: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv205, R.shape([1, seq_len, 20, 64])) + reshape1068: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1067, R.shape([1, seq_len, 1280])) + lv52 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_3_self_attn_out_proj_weight4, reshape1068, model_decoder_layers_3_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add933: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add929, lv52) + layer_norm269: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add933, model_decoder_layers_3_encoder_attn_layer_norm_weight4, model_decoder_layers_3_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv53 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_3_encoder_attn_q_proj_weight4, layer_norm269, model_decoder_layers_3_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1069: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv53, R.shape([1, seq_len, 20, 64])) + reshape1070: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1069, R.shape([seq_len, 20, 64])) + lv206 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(3), R.prim_value(T.float32(1)), reshape1070), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1071: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv206, R.shape([1, seq_len, 20, 64])) + reshape1072: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1071, R.shape([1, seq_len, 1280])) + lv54 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_3_encoder_attn_out_proj_weight4, reshape1072, model_decoder_layers_3_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add936: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add933, lv54) + layer_norm270: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add936, model_decoder_layers_3_final_layer_norm_weight4, model_decoder_layers_3_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv3 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_3_fc1_weight4, layer_norm270, model_decoder_layers_3_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv55 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_3_fc2_weight4, lv3, model_decoder_layers_3_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add939: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add936, lv55) + layer_norm271: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add939, model_decoder_layers_4_self_attn_layer_norm_weight4, model_decoder_layers_4_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv56 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_4_self_attn_q_proj_weight4, layer_norm271, model_decoder_layers_4_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1073: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv56, R.shape([1, seq_len, 20, 64])) + lv36_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_4_self_attn_k_proj_weight4, layer_norm271), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1074: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv36_1, R.shape([1, seq_len, 20, 64])) + lv57 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_4_self_attn_v_proj_weight4, layer_norm271, model_decoder_layers_4_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1075: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv57, R.shape([1, seq_len, 20, 64])) + concat68: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1073, reshape1074, reshape1075), axis=2) + reshape1076: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat68, R.shape([seq_len, 60, 64])) + lv207 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape1076), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1077: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv207, R.shape([1, seq_len, 20, 64])) + reshape1078: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1077, R.shape([1, seq_len, 1280])) + lv58 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_4_self_attn_out_proj_weight4, reshape1078, model_decoder_layers_4_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add943: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add939, lv58) + layer_norm272: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add943, model_decoder_layers_4_encoder_attn_layer_norm_weight4, model_decoder_layers_4_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv59 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_4_encoder_attn_q_proj_weight4, layer_norm272, model_decoder_layers_4_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1079: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv59, R.shape([1, seq_len, 20, 64])) + reshape1080: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1079, R.shape([seq_len, 20, 64])) + lv208 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(4), R.prim_value(T.float32(1)), reshape1080), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1081: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv208, R.shape([1, seq_len, 20, 64])) + reshape1082: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1081, R.shape([1, seq_len, 1280])) + lv60 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_4_encoder_attn_out_proj_weight4, reshape1082, model_decoder_layers_4_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add946: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add943, lv60) + layer_norm273: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add946, model_decoder_layers_4_final_layer_norm_weight4, model_decoder_layers_4_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv4 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_4_fc1_weight4, layer_norm273, model_decoder_layers_4_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv61 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_4_fc2_weight4, lv4, model_decoder_layers_4_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add949: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add946, lv61) + layer_norm274: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add949, model_decoder_layers_5_self_attn_layer_norm_weight4, model_decoder_layers_5_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv62 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_5_self_attn_q_proj_weight4, layer_norm274, model_decoder_layers_5_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1083: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv62, R.shape([1, seq_len, 20, 64])) + lv37_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_5_self_attn_k_proj_weight4, layer_norm274), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1084: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv37_1, R.shape([1, seq_len, 20, 64])) + lv63 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_5_self_attn_v_proj_weight4, layer_norm274, model_decoder_layers_5_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1085: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv63, R.shape([1, seq_len, 20, 64])) + concat69: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1083, reshape1084, reshape1085), axis=2) + reshape1086: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat69, R.shape([seq_len, 60, 64])) + lv209 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape1086), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1087: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv209, R.shape([1, seq_len, 20, 64])) + reshape1088: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1087, R.shape([1, seq_len, 1280])) + lv64 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_5_self_attn_out_proj_weight4, reshape1088, model_decoder_layers_5_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add953: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add949, lv64) + layer_norm275: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add953, model_decoder_layers_5_encoder_attn_layer_norm_weight4, model_decoder_layers_5_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv65 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_5_encoder_attn_q_proj_weight4, layer_norm275, model_decoder_layers_5_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1089: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv65, R.shape([1, seq_len, 20, 64])) + reshape1090: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1089, R.shape([seq_len, 20, 64])) + lv210 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(5), R.prim_value(T.float32(1)), reshape1090), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1091: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv210, R.shape([1, seq_len, 20, 64])) + reshape1092: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1091, R.shape([1, seq_len, 1280])) + lv66 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_5_encoder_attn_out_proj_weight4, reshape1092, model_decoder_layers_5_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add956: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add953, lv66) + layer_norm276: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add956, model_decoder_layers_5_final_layer_norm_weight4, model_decoder_layers_5_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv5 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_5_fc1_weight4, layer_norm276, model_decoder_layers_5_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv67 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_5_fc2_weight4, lv5, model_decoder_layers_5_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add959: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add956, lv67) + layer_norm277: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add959, model_decoder_layers_6_self_attn_layer_norm_weight4, model_decoder_layers_6_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv68 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_6_self_attn_q_proj_weight4, layer_norm277, model_decoder_layers_6_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1093: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv68, R.shape([1, seq_len, 20, 64])) + lv38_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_6_self_attn_k_proj_weight4, layer_norm277), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1094: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv38_1, R.shape([1, seq_len, 20, 64])) + lv69 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_6_self_attn_v_proj_weight4, layer_norm277, model_decoder_layers_6_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1095: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv69, R.shape([1, seq_len, 20, 64])) + concat70: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1093, reshape1094, reshape1095), axis=2) + reshape1096: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat70, R.shape([seq_len, 60, 64])) + lv211 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape1096), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1097: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv211, R.shape([1, seq_len, 20, 64])) + reshape1098: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1097, R.shape([1, seq_len, 1280])) + lv70 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_6_self_attn_out_proj_weight4, reshape1098, model_decoder_layers_6_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add963: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add959, lv70) + layer_norm278: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add963, model_decoder_layers_6_encoder_attn_layer_norm_weight4, model_decoder_layers_6_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv71 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_6_encoder_attn_q_proj_weight4, layer_norm278, model_decoder_layers_6_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1099: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv71, R.shape([1, seq_len, 20, 64])) + reshape1100: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1099, R.shape([seq_len, 20, 64])) + lv212 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(6), R.prim_value(T.float32(1)), reshape1100), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1101: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv212, R.shape([1, seq_len, 20, 64])) + reshape1102: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1101, R.shape([1, seq_len, 1280])) + lv72 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_6_encoder_attn_out_proj_weight4, reshape1102, model_decoder_layers_6_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add966: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add963, lv72) + layer_norm279: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add966, model_decoder_layers_6_final_layer_norm_weight4, model_decoder_layers_6_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv6 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_6_fc1_weight4, layer_norm279, model_decoder_layers_6_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv73 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_6_fc2_weight4, lv6, model_decoder_layers_6_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add969: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add966, lv73) + layer_norm280: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add969, model_decoder_layers_7_self_attn_layer_norm_weight4, model_decoder_layers_7_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv74 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_7_self_attn_q_proj_weight4, layer_norm280, model_decoder_layers_7_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1103: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv74, R.shape([1, seq_len, 20, 64])) + lv39_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_7_self_attn_k_proj_weight4, layer_norm280), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1104: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv39_1, R.shape([1, seq_len, 20, 64])) + lv75 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_7_self_attn_v_proj_weight4, layer_norm280, model_decoder_layers_7_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1105: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv75, R.shape([1, seq_len, 20, 64])) + concat71: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1103, reshape1104, reshape1105), axis=2) + reshape1106: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat71, R.shape([seq_len, 60, 64])) + lv213 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape1106), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1107: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv213, R.shape([1, seq_len, 20, 64])) + reshape1108: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1107, R.shape([1, seq_len, 1280])) + lv76 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_7_self_attn_out_proj_weight4, reshape1108, model_decoder_layers_7_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add973: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add969, lv76) + layer_norm281: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add973, model_decoder_layers_7_encoder_attn_layer_norm_weight4, model_decoder_layers_7_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv77 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_7_encoder_attn_q_proj_weight4, layer_norm281, model_decoder_layers_7_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1109: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv77, R.shape([1, seq_len, 20, 64])) + reshape1110: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1109, R.shape([seq_len, 20, 64])) + lv214 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(7), R.prim_value(T.float32(1)), reshape1110), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1111: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv214, R.shape([1, seq_len, 20, 64])) + reshape1112: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1111, R.shape([1, seq_len, 1280])) + lv78 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_7_encoder_attn_out_proj_weight4, reshape1112, model_decoder_layers_7_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add976: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add973, lv78) + layer_norm282: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add976, model_decoder_layers_7_final_layer_norm_weight4, model_decoder_layers_7_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv7 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_7_fc1_weight4, layer_norm282, model_decoder_layers_7_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv79 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_7_fc2_weight4, lv7, model_decoder_layers_7_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add979: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add976, lv79) + layer_norm283: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add979, model_decoder_layers_8_self_attn_layer_norm_weight4, model_decoder_layers_8_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv80 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_8_self_attn_q_proj_weight4, layer_norm283, model_decoder_layers_8_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1113: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv80, R.shape([1, seq_len, 20, 64])) + lv40_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_8_self_attn_k_proj_weight4, layer_norm283), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1114: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv40_1, R.shape([1, seq_len, 20, 64])) + lv81 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_8_self_attn_v_proj_weight4, layer_norm283, model_decoder_layers_8_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1115: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv81, R.shape([1, seq_len, 20, 64])) + concat72: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1113, reshape1114, reshape1115), axis=2) + reshape1116: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat72, R.shape([seq_len, 60, 64])) + lv215 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape1116), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1117: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv215, R.shape([1, seq_len, 20, 64])) + reshape1118: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1117, R.shape([1, seq_len, 1280])) + lv82 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_8_self_attn_out_proj_weight4, reshape1118, model_decoder_layers_8_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add983: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add979, lv82) + layer_norm284: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add983, model_decoder_layers_8_encoder_attn_layer_norm_weight4, model_decoder_layers_8_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv83 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_8_encoder_attn_q_proj_weight4, layer_norm284, model_decoder_layers_8_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1119: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv83, R.shape([1, seq_len, 20, 64])) + reshape1120: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1119, R.shape([seq_len, 20, 64])) + lv216 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(8), R.prim_value(T.float32(1)), reshape1120), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1121: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv216, R.shape([1, seq_len, 20, 64])) + reshape1122: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1121, R.shape([1, seq_len, 1280])) + lv84 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_8_encoder_attn_out_proj_weight4, reshape1122, model_decoder_layers_8_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add986: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add983, lv84) + layer_norm285: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add986, model_decoder_layers_8_final_layer_norm_weight4, model_decoder_layers_8_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv8 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_8_fc1_weight4, layer_norm285, model_decoder_layers_8_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv85 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_8_fc2_weight4, lv8, model_decoder_layers_8_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add989: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add986, lv85) + layer_norm286: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add989, model_decoder_layers_9_self_attn_layer_norm_weight4, model_decoder_layers_9_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv86 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_9_self_attn_q_proj_weight4, layer_norm286, model_decoder_layers_9_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1123: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv86, R.shape([1, seq_len, 20, 64])) + lv41_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_9_self_attn_k_proj_weight4, layer_norm286), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1124: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv41_1, R.shape([1, seq_len, 20, 64])) + lv87 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_9_self_attn_v_proj_weight4, layer_norm286, model_decoder_layers_9_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1125: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv87, R.shape([1, seq_len, 20, 64])) + concat73: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1123, reshape1124, reshape1125), axis=2) + reshape1126: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat73, R.shape([seq_len, 60, 64])) + lv217 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape1126), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1127: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv217, R.shape([1, seq_len, 20, 64])) + reshape1128: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1127, R.shape([1, seq_len, 1280])) + lv88 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_9_self_attn_out_proj_weight4, reshape1128, model_decoder_layers_9_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add993: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add989, lv88) + layer_norm287: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add993, model_decoder_layers_9_encoder_attn_layer_norm_weight4, model_decoder_layers_9_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv89 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_9_encoder_attn_q_proj_weight4, layer_norm287, model_decoder_layers_9_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1129: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv89, R.shape([1, seq_len, 20, 64])) + reshape1130: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1129, R.shape([seq_len, 20, 64])) + lv218 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(9), R.prim_value(T.float32(1)), reshape1130), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1131: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv218, R.shape([1, seq_len, 20, 64])) + reshape1132: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1131, R.shape([1, seq_len, 1280])) + lv90 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_9_encoder_attn_out_proj_weight4, reshape1132, model_decoder_layers_9_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add996: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add993, lv90) + layer_norm288: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add996, model_decoder_layers_9_final_layer_norm_weight4, model_decoder_layers_9_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv9 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_9_fc1_weight4, layer_norm288, model_decoder_layers_9_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv91 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_9_fc2_weight4, lv9, model_decoder_layers_9_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add999: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add996, lv91) + layer_norm289: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add999, model_decoder_layers_10_self_attn_layer_norm_weight4, model_decoder_layers_10_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv92 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_10_self_attn_q_proj_weight4, layer_norm289, model_decoder_layers_10_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1133: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv92, R.shape([1, seq_len, 20, 64])) + lv42_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_10_self_attn_k_proj_weight4, layer_norm289), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1134: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv42_1, R.shape([1, seq_len, 20, 64])) + lv93 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_10_self_attn_v_proj_weight4, layer_norm289, model_decoder_layers_10_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1135: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv93, R.shape([1, seq_len, 20, 64])) + concat74: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1133, reshape1134, reshape1135), axis=2) + reshape1136: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat74, R.shape([seq_len, 60, 64])) + lv219 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape1136), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1137: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv219, R.shape([1, seq_len, 20, 64])) + reshape1138: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1137, R.shape([1, seq_len, 1280])) + lv94 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_10_self_attn_out_proj_weight4, reshape1138, model_decoder_layers_10_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1003: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add999, lv94) + layer_norm290: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1003, model_decoder_layers_10_encoder_attn_layer_norm_weight4, model_decoder_layers_10_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv95 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_10_encoder_attn_q_proj_weight4, layer_norm290, model_decoder_layers_10_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1139: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv95, R.shape([1, seq_len, 20, 64])) + reshape1140: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1139, R.shape([seq_len, 20, 64])) + lv220 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(10), R.prim_value(T.float32(1)), reshape1140), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1141: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv220, R.shape([1, seq_len, 20, 64])) + reshape1142: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1141, R.shape([1, seq_len, 1280])) + lv96 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_10_encoder_attn_out_proj_weight4, reshape1142, model_decoder_layers_10_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1006: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1003, lv96) + layer_norm291: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1006, model_decoder_layers_10_final_layer_norm_weight4, model_decoder_layers_10_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv10 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_10_fc1_weight4, layer_norm291, model_decoder_layers_10_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv97 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_10_fc2_weight4, lv10, model_decoder_layers_10_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1009: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1006, lv97) + layer_norm292: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1009, model_decoder_layers_11_self_attn_layer_norm_weight4, model_decoder_layers_11_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv98 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_11_self_attn_q_proj_weight4, layer_norm292, model_decoder_layers_11_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1143: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv98, R.shape([1, seq_len, 20, 64])) + lv43_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_11_self_attn_k_proj_weight4, layer_norm292), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1144: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv43_1, R.shape([1, seq_len, 20, 64])) + lv99 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_11_self_attn_v_proj_weight4, layer_norm292, model_decoder_layers_11_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1145: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv99, R.shape([1, seq_len, 20, 64])) + concat75: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1143, reshape1144, reshape1145), axis=2) + reshape1146: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat75, R.shape([seq_len, 60, 64])) + lv221 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape1146), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1147: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv221, R.shape([1, seq_len, 20, 64])) + reshape1148: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1147, R.shape([1, seq_len, 1280])) + lv100 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_11_self_attn_out_proj_weight4, reshape1148, model_decoder_layers_11_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1013: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1009, lv100) + layer_norm293: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1013, model_decoder_layers_11_encoder_attn_layer_norm_weight4, model_decoder_layers_11_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv101 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_11_encoder_attn_q_proj_weight4, layer_norm293, model_decoder_layers_11_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1149: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv101, R.shape([1, seq_len, 20, 64])) + reshape1150: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1149, R.shape([seq_len, 20, 64])) + lv222 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(11), R.prim_value(T.float32(1)), reshape1150), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1151: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv222, R.shape([1, seq_len, 20, 64])) + reshape1152: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1151, R.shape([1, seq_len, 1280])) + lv102 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_11_encoder_attn_out_proj_weight4, reshape1152, model_decoder_layers_11_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1016: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1013, lv102) + layer_norm294: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1016, model_decoder_layers_11_final_layer_norm_weight4, model_decoder_layers_11_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv11 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_11_fc1_weight4, layer_norm294, model_decoder_layers_11_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv103 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_11_fc2_weight4, lv11, model_decoder_layers_11_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1019: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1016, lv103) + layer_norm295: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1019, model_decoder_layers_12_self_attn_layer_norm_weight4, model_decoder_layers_12_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv104 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_12_self_attn_q_proj_weight4, layer_norm295, model_decoder_layers_12_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1153: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv104, R.shape([1, seq_len, 20, 64])) + lv44_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_12_self_attn_k_proj_weight4, layer_norm295), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1154: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv44_1, R.shape([1, seq_len, 20, 64])) + lv105 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_12_self_attn_v_proj_weight4, layer_norm295, model_decoder_layers_12_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1155: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv105, R.shape([1, seq_len, 20, 64])) + concat76: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1153, reshape1154, reshape1155), axis=2) + reshape1156: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat76, R.shape([seq_len, 60, 64])) + lv223 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape1156), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1157: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv223, R.shape([1, seq_len, 20, 64])) + reshape1158: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1157, R.shape([1, seq_len, 1280])) + lv106 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_12_self_attn_out_proj_weight4, reshape1158, model_decoder_layers_12_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1023: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1019, lv106) + layer_norm296: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1023, model_decoder_layers_12_encoder_attn_layer_norm_weight4, model_decoder_layers_12_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv107 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_12_encoder_attn_q_proj_weight4, layer_norm296, model_decoder_layers_12_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1159: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv107, R.shape([1, seq_len, 20, 64])) + reshape1160: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1159, R.shape([seq_len, 20, 64])) + lv224 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(12), R.prim_value(T.float32(1)), reshape1160), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1161: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv224, R.shape([1, seq_len, 20, 64])) + reshape1162: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1161, R.shape([1, seq_len, 1280])) + lv108 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_12_encoder_attn_out_proj_weight4, reshape1162, model_decoder_layers_12_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1026: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1023, lv108) + layer_norm297: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1026, model_decoder_layers_12_final_layer_norm_weight4, model_decoder_layers_12_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv12 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_12_fc1_weight4, layer_norm297, model_decoder_layers_12_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv109 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_12_fc2_weight4, lv12, model_decoder_layers_12_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1029: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1026, lv109) + layer_norm298: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1029, model_decoder_layers_13_self_attn_layer_norm_weight4, model_decoder_layers_13_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv110 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_13_self_attn_q_proj_weight4, layer_norm298, model_decoder_layers_13_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1163: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv110, R.shape([1, seq_len, 20, 64])) + lv45_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_13_self_attn_k_proj_weight4, layer_norm298), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1164: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv45_1, R.shape([1, seq_len, 20, 64])) + lv111 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_13_self_attn_v_proj_weight4, layer_norm298, model_decoder_layers_13_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1165: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv111, R.shape([1, seq_len, 20, 64])) + concat77: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1163, reshape1164, reshape1165), axis=2) + reshape1166: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat77, R.shape([seq_len, 60, 64])) + lv225 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape1166), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1167: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv225, R.shape([1, seq_len, 20, 64])) + reshape1168: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1167, R.shape([1, seq_len, 1280])) + lv112 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_13_self_attn_out_proj_weight4, reshape1168, model_decoder_layers_13_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1033: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1029, lv112) + layer_norm299: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1033, model_decoder_layers_13_encoder_attn_layer_norm_weight4, model_decoder_layers_13_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv113 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_13_encoder_attn_q_proj_weight4, layer_norm299, model_decoder_layers_13_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1169: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv113, R.shape([1, seq_len, 20, 64])) + reshape1170: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1169, R.shape([seq_len, 20, 64])) + lv226 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(13), R.prim_value(T.float32(1)), reshape1170), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1171: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv226, R.shape([1, seq_len, 20, 64])) + reshape1172: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1171, R.shape([1, seq_len, 1280])) + lv114 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_13_encoder_attn_out_proj_weight4, reshape1172, model_decoder_layers_13_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1036: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1033, lv114) + layer_norm300: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1036, model_decoder_layers_13_final_layer_norm_weight4, model_decoder_layers_13_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv13 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_13_fc1_weight4, layer_norm300, model_decoder_layers_13_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv115 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_13_fc2_weight4, lv13, model_decoder_layers_13_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1039: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1036, lv115) + layer_norm301: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1039, model_decoder_layers_14_self_attn_layer_norm_weight4, model_decoder_layers_14_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv116 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_14_self_attn_q_proj_weight4, layer_norm301, model_decoder_layers_14_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1173: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv116, R.shape([1, seq_len, 20, 64])) + lv46_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_14_self_attn_k_proj_weight4, layer_norm301), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1174: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv46_1, R.shape([1, seq_len, 20, 64])) + lv117 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_14_self_attn_v_proj_weight4, layer_norm301, model_decoder_layers_14_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1175: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv117, R.shape([1, seq_len, 20, 64])) + concat78: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1173, reshape1174, reshape1175), axis=2) + reshape1176: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat78, R.shape([seq_len, 60, 64])) + lv227 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape1176), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1177: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv227, R.shape([1, seq_len, 20, 64])) + reshape1178: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1177, R.shape([1, seq_len, 1280])) + lv118 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_14_self_attn_out_proj_weight4, reshape1178, model_decoder_layers_14_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1043: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1039, lv118) + layer_norm302: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1043, model_decoder_layers_14_encoder_attn_layer_norm_weight4, model_decoder_layers_14_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv119 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_14_encoder_attn_q_proj_weight4, layer_norm302, model_decoder_layers_14_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1179: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv119, R.shape([1, seq_len, 20, 64])) + reshape1180: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1179, R.shape([seq_len, 20, 64])) + lv228 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(14), R.prim_value(T.float32(1)), reshape1180), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1181: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv228, R.shape([1, seq_len, 20, 64])) + reshape1182: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1181, R.shape([1, seq_len, 1280])) + lv120 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_14_encoder_attn_out_proj_weight4, reshape1182, model_decoder_layers_14_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1046: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1043, lv120) + layer_norm303: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1046, model_decoder_layers_14_final_layer_norm_weight4, model_decoder_layers_14_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv14 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_14_fc1_weight4, layer_norm303, model_decoder_layers_14_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv121 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_14_fc2_weight4, lv14, model_decoder_layers_14_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1049: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1046, lv121) + layer_norm304: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1049, model_decoder_layers_15_self_attn_layer_norm_weight4, model_decoder_layers_15_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv122 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_15_self_attn_q_proj_weight4, layer_norm304, model_decoder_layers_15_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1183: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv122, R.shape([1, seq_len, 20, 64])) + lv47_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_15_self_attn_k_proj_weight4, layer_norm304), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1184: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv47_1, R.shape([1, seq_len, 20, 64])) + lv123 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_15_self_attn_v_proj_weight4, layer_norm304, model_decoder_layers_15_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1185: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv123, R.shape([1, seq_len, 20, 64])) + concat79: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1183, reshape1184, reshape1185), axis=2) + reshape1186: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat79, R.shape([seq_len, 60, 64])) + lv229 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape1186), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1187: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv229, R.shape([1, seq_len, 20, 64])) + reshape1188: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1187, R.shape([1, seq_len, 1280])) + lv124 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_15_self_attn_out_proj_weight4, reshape1188, model_decoder_layers_15_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1053: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1049, lv124) + layer_norm305: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1053, model_decoder_layers_15_encoder_attn_layer_norm_weight4, model_decoder_layers_15_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv125 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_15_encoder_attn_q_proj_weight4, layer_norm305, model_decoder_layers_15_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1189: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv125, R.shape([1, seq_len, 20, 64])) + reshape1190: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1189, R.shape([seq_len, 20, 64])) + lv230 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(15), R.prim_value(T.float32(1)), reshape1190), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1191: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv230, R.shape([1, seq_len, 20, 64])) + reshape1192: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1191, R.shape([1, seq_len, 1280])) + lv126 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_15_encoder_attn_out_proj_weight4, reshape1192, model_decoder_layers_15_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1056: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1053, lv126) + layer_norm306: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1056, model_decoder_layers_15_final_layer_norm_weight4, model_decoder_layers_15_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv15 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_15_fc1_weight4, layer_norm306, model_decoder_layers_15_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv127 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_15_fc2_weight4, lv15, model_decoder_layers_15_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1059: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1056, lv127) + layer_norm307: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1059, model_decoder_layers_16_self_attn_layer_norm_weight4, model_decoder_layers_16_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv128 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_16_self_attn_q_proj_weight4, layer_norm307, model_decoder_layers_16_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1193: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv128, R.shape([1, seq_len, 20, 64])) + lv48_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_16_self_attn_k_proj_weight4, layer_norm307), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1194: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv48_1, R.shape([1, seq_len, 20, 64])) + lv129 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_16_self_attn_v_proj_weight4, layer_norm307, model_decoder_layers_16_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1195: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv129, R.shape([1, seq_len, 20, 64])) + concat80: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1193, reshape1194, reshape1195), axis=2) + reshape1196: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat80, R.shape([seq_len, 60, 64])) + lv231 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape1196), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1197: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv231, R.shape([1, seq_len, 20, 64])) + reshape1198: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1197, R.shape([1, seq_len, 1280])) + lv130 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_16_self_attn_out_proj_weight4, reshape1198, model_decoder_layers_16_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1063: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1059, lv130) + layer_norm308: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1063, model_decoder_layers_16_encoder_attn_layer_norm_weight4, model_decoder_layers_16_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv131 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_16_encoder_attn_q_proj_weight4, layer_norm308, model_decoder_layers_16_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1199: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv131, R.shape([1, seq_len, 20, 64])) + reshape1200: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1199, R.shape([seq_len, 20, 64])) + lv232 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(16), R.prim_value(T.float32(1)), reshape1200), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1201: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv232, R.shape([1, seq_len, 20, 64])) + reshape1202: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1201, R.shape([1, seq_len, 1280])) + lv132 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_16_encoder_attn_out_proj_weight4, reshape1202, model_decoder_layers_16_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1066: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1063, lv132) + layer_norm309: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1066, model_decoder_layers_16_final_layer_norm_weight4, model_decoder_layers_16_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv16 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_16_fc1_weight4, layer_norm309, model_decoder_layers_16_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv133 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_16_fc2_weight4, lv16, model_decoder_layers_16_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1069: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1066, lv133) + layer_norm310: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1069, model_decoder_layers_17_self_attn_layer_norm_weight4, model_decoder_layers_17_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv134 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_17_self_attn_q_proj_weight4, layer_norm310, model_decoder_layers_17_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1203: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv134, R.shape([1, seq_len, 20, 64])) + lv49_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_17_self_attn_k_proj_weight4, layer_norm310), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1204: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv49_1, R.shape([1, seq_len, 20, 64])) + lv135 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_17_self_attn_v_proj_weight4, layer_norm310, model_decoder_layers_17_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1205: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv135, R.shape([1, seq_len, 20, 64])) + concat81: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1203, reshape1204, reshape1205), axis=2) + reshape1206: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat81, R.shape([seq_len, 60, 64])) + lv233 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape1206), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1207: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv233, R.shape([1, seq_len, 20, 64])) + reshape1208: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1207, R.shape([1, seq_len, 1280])) + lv136 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_17_self_attn_out_proj_weight4, reshape1208, model_decoder_layers_17_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1073: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1069, lv136) + layer_norm311: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1073, model_decoder_layers_17_encoder_attn_layer_norm_weight4, model_decoder_layers_17_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv137 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_17_encoder_attn_q_proj_weight4, layer_norm311, model_decoder_layers_17_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1209: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv137, R.shape([1, seq_len, 20, 64])) + reshape1210: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1209, R.shape([seq_len, 20, 64])) + lv234 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(17), R.prim_value(T.float32(1)), reshape1210), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1211: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv234, R.shape([1, seq_len, 20, 64])) + reshape1212: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1211, R.shape([1, seq_len, 1280])) + lv138 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_17_encoder_attn_out_proj_weight4, reshape1212, model_decoder_layers_17_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1076: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1073, lv138) + layer_norm312: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1076, model_decoder_layers_17_final_layer_norm_weight4, model_decoder_layers_17_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv17 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_17_fc1_weight4, layer_norm312, model_decoder_layers_17_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv139 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_17_fc2_weight4, lv17, model_decoder_layers_17_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1079: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1076, lv139) + layer_norm313: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1079, model_decoder_layers_18_self_attn_layer_norm_weight4, model_decoder_layers_18_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv140 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_18_self_attn_q_proj_weight4, layer_norm313, model_decoder_layers_18_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1213: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv140, R.shape([1, seq_len, 20, 64])) + lv50_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_18_self_attn_k_proj_weight4, layer_norm313), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1214: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv50_1, R.shape([1, seq_len, 20, 64])) + lv141 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_18_self_attn_v_proj_weight4, layer_norm313, model_decoder_layers_18_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1215: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv141, R.shape([1, seq_len, 20, 64])) + concat82: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1213, reshape1214, reshape1215), axis=2) + reshape1216: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat82, R.shape([seq_len, 60, 64])) + lv235 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape1216), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1217: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv235, R.shape([1, seq_len, 20, 64])) + reshape1218: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1217, R.shape([1, seq_len, 1280])) + lv142 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_18_self_attn_out_proj_weight4, reshape1218, model_decoder_layers_18_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1083: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1079, lv142) + layer_norm314: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1083, model_decoder_layers_18_encoder_attn_layer_norm_weight4, model_decoder_layers_18_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv143 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_18_encoder_attn_q_proj_weight4, layer_norm314, model_decoder_layers_18_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1219: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv143, R.shape([1, seq_len, 20, 64])) + reshape1220: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1219, R.shape([seq_len, 20, 64])) + lv236 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(18), R.prim_value(T.float32(1)), reshape1220), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1221: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv236, R.shape([1, seq_len, 20, 64])) + reshape1222: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1221, R.shape([1, seq_len, 1280])) + lv144 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_18_encoder_attn_out_proj_weight4, reshape1222, model_decoder_layers_18_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1086: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1083, lv144) + layer_norm315: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1086, model_decoder_layers_18_final_layer_norm_weight4, model_decoder_layers_18_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv18 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_18_fc1_weight4, layer_norm315, model_decoder_layers_18_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv145 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_18_fc2_weight4, lv18, model_decoder_layers_18_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1089: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1086, lv145) + layer_norm316: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1089, model_decoder_layers_19_self_attn_layer_norm_weight4, model_decoder_layers_19_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv146 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_19_self_attn_q_proj_weight4, layer_norm316, model_decoder_layers_19_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1223: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv146, R.shape([1, seq_len, 20, 64])) + lv51_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_19_self_attn_k_proj_weight4, layer_norm316), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1224: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv51_1, R.shape([1, seq_len, 20, 64])) + lv147 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_19_self_attn_v_proj_weight4, layer_norm316, model_decoder_layers_19_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1225: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv147, R.shape([1, seq_len, 20, 64])) + concat83: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1223, reshape1224, reshape1225), axis=2) + reshape1226: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat83, R.shape([seq_len, 60, 64])) + lv237 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape1226), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1227: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv237, R.shape([1, seq_len, 20, 64])) + reshape1228: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1227, R.shape([1, seq_len, 1280])) + lv148 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_19_self_attn_out_proj_weight4, reshape1228, model_decoder_layers_19_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1093: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1089, lv148) + layer_norm317: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1093, model_decoder_layers_19_encoder_attn_layer_norm_weight4, model_decoder_layers_19_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv149 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_19_encoder_attn_q_proj_weight4, layer_norm317, model_decoder_layers_19_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1229: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv149, R.shape([1, seq_len, 20, 64])) + reshape1230: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1229, R.shape([seq_len, 20, 64])) + lv238 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(19), R.prim_value(T.float32(1)), reshape1230), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1231: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv238, R.shape([1, seq_len, 20, 64])) + reshape1232: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1231, R.shape([1, seq_len, 1280])) + lv150 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_19_encoder_attn_out_proj_weight4, reshape1232, model_decoder_layers_19_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1096: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1093, lv150) + layer_norm318: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1096, model_decoder_layers_19_final_layer_norm_weight4, model_decoder_layers_19_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv19 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_19_fc1_weight4, layer_norm318, model_decoder_layers_19_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv151 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_19_fc2_weight4, lv19, model_decoder_layers_19_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1099: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1096, lv151) + layer_norm319: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1099, model_decoder_layers_20_self_attn_layer_norm_weight4, model_decoder_layers_20_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv152 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_20_self_attn_q_proj_weight4, layer_norm319, model_decoder_layers_20_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1233: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv152, R.shape([1, seq_len, 20, 64])) + lv52_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_20_self_attn_k_proj_weight4, layer_norm319), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1234: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv52_1, R.shape([1, seq_len, 20, 64])) + lv153 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_20_self_attn_v_proj_weight4, layer_norm319, model_decoder_layers_20_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1235: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv153, R.shape([1, seq_len, 20, 64])) + concat84: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1233, reshape1234, reshape1235), axis=2) + reshape1236: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat84, R.shape([seq_len, 60, 64])) + lv239 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape1236), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1237: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv239, R.shape([1, seq_len, 20, 64])) + reshape1238: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1237, R.shape([1, seq_len, 1280])) + lv154 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_20_self_attn_out_proj_weight4, reshape1238, model_decoder_layers_20_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1103: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1099, lv154) + layer_norm320: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1103, model_decoder_layers_20_encoder_attn_layer_norm_weight4, model_decoder_layers_20_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv155 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_20_encoder_attn_q_proj_weight4, layer_norm320, model_decoder_layers_20_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1239: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv155, R.shape([1, seq_len, 20, 64])) + reshape1240: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1239, R.shape([seq_len, 20, 64])) + lv240 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(20), R.prim_value(T.float32(1)), reshape1240), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1241: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv240, R.shape([1, seq_len, 20, 64])) + reshape1242: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1241, R.shape([1, seq_len, 1280])) + lv156 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_20_encoder_attn_out_proj_weight4, reshape1242, model_decoder_layers_20_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1106: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1103, lv156) + layer_norm321: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1106, model_decoder_layers_20_final_layer_norm_weight4, model_decoder_layers_20_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv20 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_20_fc1_weight4, layer_norm321, model_decoder_layers_20_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv157 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_20_fc2_weight4, lv20, model_decoder_layers_20_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1109: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1106, lv157) + layer_norm322: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1109, model_decoder_layers_21_self_attn_layer_norm_weight4, model_decoder_layers_21_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv158 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_21_self_attn_q_proj_weight4, layer_norm322, model_decoder_layers_21_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1243: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv158, R.shape([1, seq_len, 20, 64])) + lv53_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_21_self_attn_k_proj_weight4, layer_norm322), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1244: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv53_1, R.shape([1, seq_len, 20, 64])) + lv159 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_21_self_attn_v_proj_weight4, layer_norm322, model_decoder_layers_21_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1245: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv159, R.shape([1, seq_len, 20, 64])) + concat85: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1243, reshape1244, reshape1245), axis=2) + reshape1246: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat85, R.shape([seq_len, 60, 64])) + lv241 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape1246), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1247: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv241, R.shape([1, seq_len, 20, 64])) + reshape1248: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1247, R.shape([1, seq_len, 1280])) + lv160 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_21_self_attn_out_proj_weight4, reshape1248, model_decoder_layers_21_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1113: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1109, lv160) + layer_norm323: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1113, model_decoder_layers_21_encoder_attn_layer_norm_weight4, model_decoder_layers_21_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv161 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_21_encoder_attn_q_proj_weight4, layer_norm323, model_decoder_layers_21_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1249: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv161, R.shape([1, seq_len, 20, 64])) + reshape1250: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1249, R.shape([seq_len, 20, 64])) + lv242 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(21), R.prim_value(T.float32(1)), reshape1250), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1251: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv242, R.shape([1, seq_len, 20, 64])) + reshape1252: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1251, R.shape([1, seq_len, 1280])) + lv162 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_21_encoder_attn_out_proj_weight4, reshape1252, model_decoder_layers_21_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1116: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1113, lv162) + layer_norm324: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1116, model_decoder_layers_21_final_layer_norm_weight4, model_decoder_layers_21_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv21 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_21_fc1_weight4, layer_norm324, model_decoder_layers_21_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv163 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_21_fc2_weight4, lv21, model_decoder_layers_21_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1119: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1116, lv163) + layer_norm325: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1119, model_decoder_layers_22_self_attn_layer_norm_weight4, model_decoder_layers_22_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv164 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_22_self_attn_q_proj_weight4, layer_norm325, model_decoder_layers_22_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1253: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv164, R.shape([1, seq_len, 20, 64])) + lv54_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_22_self_attn_k_proj_weight4, layer_norm325), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1254: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv54_1, R.shape([1, seq_len, 20, 64])) + lv165 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_22_self_attn_v_proj_weight4, layer_norm325, model_decoder_layers_22_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1255: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv165, R.shape([1, seq_len, 20, 64])) + concat86: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1253, reshape1254, reshape1255), axis=2) + reshape1256: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat86, R.shape([seq_len, 60, 64])) + lv243 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape1256), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1257: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv243, R.shape([1, seq_len, 20, 64])) + reshape1258: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1257, R.shape([1, seq_len, 1280])) + lv166 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_22_self_attn_out_proj_weight4, reshape1258, model_decoder_layers_22_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1123: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1119, lv166) + layer_norm326: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1123, model_decoder_layers_22_encoder_attn_layer_norm_weight4, model_decoder_layers_22_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv167 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_22_encoder_attn_q_proj_weight4, layer_norm326, model_decoder_layers_22_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1259: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv167, R.shape([1, seq_len, 20, 64])) + reshape1260: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1259, R.shape([seq_len, 20, 64])) + lv244 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(22), R.prim_value(T.float32(1)), reshape1260), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1261: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv244, R.shape([1, seq_len, 20, 64])) + reshape1262: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1261, R.shape([1, seq_len, 1280])) + lv168 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_22_encoder_attn_out_proj_weight4, reshape1262, model_decoder_layers_22_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1126: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1123, lv168) + layer_norm327: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1126, model_decoder_layers_22_final_layer_norm_weight4, model_decoder_layers_22_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv22 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_22_fc1_weight4, layer_norm327, model_decoder_layers_22_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv169 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_22_fc2_weight4, lv22, model_decoder_layers_22_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1129: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1126, lv169) + layer_norm328: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1129, model_decoder_layers_23_self_attn_layer_norm_weight4, model_decoder_layers_23_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv170 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_23_self_attn_q_proj_weight4, layer_norm328, model_decoder_layers_23_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1263: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv170, R.shape([1, seq_len, 20, 64])) + lv55_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_23_self_attn_k_proj_weight4, layer_norm328), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1264: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv55_1, R.shape([1, seq_len, 20, 64])) + lv171 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_23_self_attn_v_proj_weight4, layer_norm328, model_decoder_layers_23_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1265: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv171, R.shape([1, seq_len, 20, 64])) + concat87: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1263, reshape1264, reshape1265), axis=2) + reshape1266: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat87, R.shape([seq_len, 60, 64])) + lv245 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape1266), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1267: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv245, R.shape([1, seq_len, 20, 64])) + reshape1268: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1267, R.shape([1, seq_len, 1280])) + lv172 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_23_self_attn_out_proj_weight4, reshape1268, model_decoder_layers_23_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1133: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1129, lv172) + layer_norm329: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1133, model_decoder_layers_23_encoder_attn_layer_norm_weight4, model_decoder_layers_23_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv173 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_23_encoder_attn_q_proj_weight4, layer_norm329, model_decoder_layers_23_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1269: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv173, R.shape([1, seq_len, 20, 64])) + reshape1270: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1269, R.shape([seq_len, 20, 64])) + lv246 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(23), R.prim_value(T.float32(1)), reshape1270), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1271: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv246, R.shape([1, seq_len, 20, 64])) + reshape1272: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1271, R.shape([1, seq_len, 1280])) + lv174 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_23_encoder_attn_out_proj_weight4, reshape1272, model_decoder_layers_23_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1136: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1133, lv174) + layer_norm330: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1136, model_decoder_layers_23_final_layer_norm_weight4, model_decoder_layers_23_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv23 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_23_fc1_weight4, layer_norm330, model_decoder_layers_23_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv175 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_23_fc2_weight4, lv23, model_decoder_layers_23_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1139: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1136, lv175) + layer_norm331: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1139, model_decoder_layers_24_self_attn_layer_norm_weight4, model_decoder_layers_24_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv176 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_24_self_attn_q_proj_weight4, layer_norm331, model_decoder_layers_24_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1273: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv176, R.shape([1, seq_len, 20, 64])) + lv56_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_24_self_attn_k_proj_weight4, layer_norm331), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1274: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv56_1, R.shape([1, seq_len, 20, 64])) + lv177 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_24_self_attn_v_proj_weight4, layer_norm331, model_decoder_layers_24_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1275: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv177, R.shape([1, seq_len, 20, 64])) + concat88: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1273, reshape1274, reshape1275), axis=2) + reshape1276: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat88, R.shape([seq_len, 60, 64])) + lv247 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape1276), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1277: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv247, R.shape([1, seq_len, 20, 64])) + reshape1278: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1277, R.shape([1, seq_len, 1280])) + lv178 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_24_self_attn_out_proj_weight4, reshape1278, model_decoder_layers_24_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1143: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1139, lv178) + layer_norm332: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1143, model_decoder_layers_24_encoder_attn_layer_norm_weight4, model_decoder_layers_24_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv179 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_24_encoder_attn_q_proj_weight4, layer_norm332, model_decoder_layers_24_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1279: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv179, R.shape([1, seq_len, 20, 64])) + reshape1280: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1279, R.shape([seq_len, 20, 64])) + lv248 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(24), R.prim_value(T.float32(1)), reshape1280), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1281: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv248, R.shape([1, seq_len, 20, 64])) + reshape1282: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1281, R.shape([1, seq_len, 1280])) + lv180 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_24_encoder_attn_out_proj_weight4, reshape1282, model_decoder_layers_24_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1146: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1143, lv180) + layer_norm333: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1146, model_decoder_layers_24_final_layer_norm_weight4, model_decoder_layers_24_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv24 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_24_fc1_weight4, layer_norm333, model_decoder_layers_24_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv181 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_24_fc2_weight4, lv24, model_decoder_layers_24_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1149: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1146, lv181) + layer_norm334: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1149, model_decoder_layers_25_self_attn_layer_norm_weight4, model_decoder_layers_25_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv182 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_25_self_attn_q_proj_weight4, layer_norm334, model_decoder_layers_25_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1283: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv182, R.shape([1, seq_len, 20, 64])) + lv57_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_25_self_attn_k_proj_weight4, layer_norm334), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1284: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv57_1, R.shape([1, seq_len, 20, 64])) + lv183 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_25_self_attn_v_proj_weight4, layer_norm334, model_decoder_layers_25_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1285: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv183, R.shape([1, seq_len, 20, 64])) + concat89: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1283, reshape1284, reshape1285), axis=2) + reshape1286: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat89, R.shape([seq_len, 60, 64])) + lv249 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape1286), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1287: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv249, R.shape([1, seq_len, 20, 64])) + reshape1288: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1287, R.shape([1, seq_len, 1280])) + lv184 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_25_self_attn_out_proj_weight4, reshape1288, model_decoder_layers_25_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1153: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1149, lv184) + layer_norm335: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1153, model_decoder_layers_25_encoder_attn_layer_norm_weight4, model_decoder_layers_25_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv185 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_25_encoder_attn_q_proj_weight4, layer_norm335, model_decoder_layers_25_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1289: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv185, R.shape([1, seq_len, 20, 64])) + reshape1290: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1289, R.shape([seq_len, 20, 64])) + lv250 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(25), R.prim_value(T.float32(1)), reshape1290), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1291: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv250, R.shape([1, seq_len, 20, 64])) + reshape1292: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1291, R.shape([1, seq_len, 1280])) + lv186 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_25_encoder_attn_out_proj_weight4, reshape1292, model_decoder_layers_25_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1156: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1153, lv186) + layer_norm336: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1156, model_decoder_layers_25_final_layer_norm_weight4, model_decoder_layers_25_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv25 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_25_fc1_weight4, layer_norm336, model_decoder_layers_25_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv187 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_25_fc2_weight4, lv25, model_decoder_layers_25_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1159: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1156, lv187) + layer_norm337: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1159, model_decoder_layers_26_self_attn_layer_norm_weight4, model_decoder_layers_26_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv188 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_26_self_attn_q_proj_weight4, layer_norm337, model_decoder_layers_26_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1293: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv188, R.shape([1, seq_len, 20, 64])) + lv58_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_26_self_attn_k_proj_weight4, layer_norm337), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1294: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv58_1, R.shape([1, seq_len, 20, 64])) + lv189 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_26_self_attn_v_proj_weight4, layer_norm337, model_decoder_layers_26_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1295: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv189, R.shape([1, seq_len, 20, 64])) + concat90: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1293, reshape1294, reshape1295), axis=2) + reshape1296: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat90, R.shape([seq_len, 60, 64])) + lv251 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape1296), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1297: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv251, R.shape([1, seq_len, 20, 64])) + reshape1298: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1297, R.shape([1, seq_len, 1280])) + lv190 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_26_self_attn_out_proj_weight4, reshape1298, model_decoder_layers_26_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1163: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1159, lv190) + layer_norm338: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1163, model_decoder_layers_26_encoder_attn_layer_norm_weight4, model_decoder_layers_26_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv191 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_26_encoder_attn_q_proj_weight4, layer_norm338, model_decoder_layers_26_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1299: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv191, R.shape([1, seq_len, 20, 64])) + reshape1300: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1299, R.shape([seq_len, 20, 64])) + lv252 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(26), R.prim_value(T.float32(1)), reshape1300), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1301: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv252, R.shape([1, seq_len, 20, 64])) + reshape1302: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1301, R.shape([1, seq_len, 1280])) + lv192 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_26_encoder_attn_out_proj_weight4, reshape1302, model_decoder_layers_26_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1166: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1163, lv192) + layer_norm339: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1166, model_decoder_layers_26_final_layer_norm_weight4, model_decoder_layers_26_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv26 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_26_fc1_weight4, layer_norm339, model_decoder_layers_26_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv193 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_26_fc2_weight4, lv26, model_decoder_layers_26_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1169: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1166, lv193) + layer_norm340: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1169, model_decoder_layers_27_self_attn_layer_norm_weight4, model_decoder_layers_27_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv194 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_27_self_attn_q_proj_weight4, layer_norm340, model_decoder_layers_27_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1303: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv194, R.shape([1, seq_len, 20, 64])) + lv59_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_27_self_attn_k_proj_weight4, layer_norm340), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1304: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv59_1, R.shape([1, seq_len, 20, 64])) + lv195 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_27_self_attn_v_proj_weight4, layer_norm340, model_decoder_layers_27_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1305: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv195, R.shape([1, seq_len, 20, 64])) + concat91: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1303, reshape1304, reshape1305), axis=2) + reshape1306: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat91, R.shape([seq_len, 60, 64])) + lv253 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape1306), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1307: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv253, R.shape([1, seq_len, 20, 64])) + reshape1308: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1307, R.shape([1, seq_len, 1280])) + lv196 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_27_self_attn_out_proj_weight4, reshape1308, model_decoder_layers_27_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1173: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1169, lv196) + layer_norm341: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1173, model_decoder_layers_27_encoder_attn_layer_norm_weight4, model_decoder_layers_27_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv197 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_27_encoder_attn_q_proj_weight4, layer_norm341, model_decoder_layers_27_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1309: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv197, R.shape([1, seq_len, 20, 64])) + reshape1310: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1309, R.shape([seq_len, 20, 64])) + lv254 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(27), R.prim_value(T.float32(1)), reshape1310), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1311: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv254, R.shape([1, seq_len, 20, 64])) + reshape1312: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1311, R.shape([1, seq_len, 1280])) + lv198_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_27_encoder_attn_out_proj_weight4, reshape1312, model_decoder_layers_27_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1176: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1173, lv198_1) + layer_norm342: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1176, model_decoder_layers_27_final_layer_norm_weight4, model_decoder_layers_27_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv27 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_27_fc1_weight4, layer_norm342, model_decoder_layers_27_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv199_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_27_fc2_weight4, lv27, model_decoder_layers_27_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1179: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1176, lv199_1) + layer_norm343: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1179, model_decoder_layers_28_self_attn_layer_norm_weight4, model_decoder_layers_28_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv200_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_28_self_attn_q_proj_weight4, layer_norm343, model_decoder_layers_28_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1313: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv200_1, R.shape([1, seq_len, 20, 64])) + lv60_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_28_self_attn_k_proj_weight4, layer_norm343), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1314: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv60_1, R.shape([1, seq_len, 20, 64])) + lv201_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_28_self_attn_v_proj_weight4, layer_norm343, model_decoder_layers_28_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1315: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv201_1, R.shape([1, seq_len, 20, 64])) + concat92: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1313, reshape1314, reshape1315), axis=2) + reshape1316: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat92, R.shape([seq_len, 60, 64])) + lv255 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape1316), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1317: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv255, R.shape([1, seq_len, 20, 64])) + reshape1318: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1317, R.shape([1, seq_len, 1280])) + lv202_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_28_self_attn_out_proj_weight4, reshape1318, model_decoder_layers_28_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1183: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1179, lv202_1) + layer_norm344: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1183, model_decoder_layers_28_encoder_attn_layer_norm_weight4, model_decoder_layers_28_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv203_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_28_encoder_attn_q_proj_weight4, layer_norm344, model_decoder_layers_28_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1319: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv203_1, R.shape([1, seq_len, 20, 64])) + reshape1320: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1319, R.shape([seq_len, 20, 64])) + lv256 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(28), R.prim_value(T.float32(1)), reshape1320), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1321: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv256, R.shape([1, seq_len, 20, 64])) + reshape1322: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1321, R.shape([1, seq_len, 1280])) + lv204_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_28_encoder_attn_out_proj_weight4, reshape1322, model_decoder_layers_28_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1186: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1183, lv204_1) + layer_norm345: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1186, model_decoder_layers_28_final_layer_norm_weight4, model_decoder_layers_28_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv28 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_28_fc1_weight4, layer_norm345, model_decoder_layers_28_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv205_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_28_fc2_weight4, lv28, model_decoder_layers_28_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1189: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1186, lv205_1) + layer_norm346: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1189, model_decoder_layers_29_self_attn_layer_norm_weight4, model_decoder_layers_29_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv206_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_29_self_attn_q_proj_weight4, layer_norm346, model_decoder_layers_29_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1323: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv206_1, R.shape([1, seq_len, 20, 64])) + lv61_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_29_self_attn_k_proj_weight4, layer_norm346), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1324: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv61_1, R.shape([1, seq_len, 20, 64])) + lv207_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_29_self_attn_v_proj_weight4, layer_norm346, model_decoder_layers_29_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1325: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv207_1, R.shape([1, seq_len, 20, 64])) + concat93: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1323, reshape1324, reshape1325), axis=2) + reshape1326: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat93, R.shape([seq_len, 60, 64])) + lv257 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape1326), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1327: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv257, R.shape([1, seq_len, 20, 64])) + reshape1328: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1327, R.shape([1, seq_len, 1280])) + lv208_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_29_self_attn_out_proj_weight4, reshape1328, model_decoder_layers_29_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1193: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1189, lv208_1) + layer_norm347: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1193, model_decoder_layers_29_encoder_attn_layer_norm_weight4, model_decoder_layers_29_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv209_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_29_encoder_attn_q_proj_weight4, layer_norm347, model_decoder_layers_29_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1329: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv209_1, R.shape([1, seq_len, 20, 64])) + reshape1330: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1329, R.shape([seq_len, 20, 64])) + lv258 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(29), R.prim_value(T.float32(1)), reshape1330), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1331: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv258, R.shape([1, seq_len, 20, 64])) + reshape1332: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1331, R.shape([1, seq_len, 1280])) + lv210_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_29_encoder_attn_out_proj_weight4, reshape1332, model_decoder_layers_29_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1196: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1193, lv210_1) + layer_norm348: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1196, model_decoder_layers_29_final_layer_norm_weight4, model_decoder_layers_29_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv29 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_29_fc1_weight4, layer_norm348, model_decoder_layers_29_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv211_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_29_fc2_weight4, lv29, model_decoder_layers_29_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1199: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1196, lv211_1) + layer_norm349: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1199, model_decoder_layers_30_self_attn_layer_norm_weight4, model_decoder_layers_30_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv212_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_30_self_attn_q_proj_weight4, layer_norm349, model_decoder_layers_30_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1333: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv212_1, R.shape([1, seq_len, 20, 64])) + lv62_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_30_self_attn_k_proj_weight4, layer_norm349), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1334: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv62_1, R.shape([1, seq_len, 20, 64])) + lv213_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_30_self_attn_v_proj_weight4, layer_norm349, model_decoder_layers_30_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1335: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv213_1, R.shape([1, seq_len, 20, 64])) + concat94: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1333, reshape1334, reshape1335), axis=2) + reshape1336: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat94, R.shape([seq_len, 60, 64])) + lv259 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape1336), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1337: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv259, R.shape([1, seq_len, 20, 64])) + reshape1338: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1337, R.shape([1, seq_len, 1280])) + lv214_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_30_self_attn_out_proj_weight4, reshape1338, model_decoder_layers_30_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1203: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1199, lv214_1) + layer_norm350: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1203, model_decoder_layers_30_encoder_attn_layer_norm_weight4, model_decoder_layers_30_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv215_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_30_encoder_attn_q_proj_weight4, layer_norm350, model_decoder_layers_30_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1339: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv215_1, R.shape([1, seq_len, 20, 64])) + reshape1340: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1339, R.shape([seq_len, 20, 64])) + lv260 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(30), R.prim_value(T.float32(1)), reshape1340), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1341: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv260, R.shape([1, seq_len, 20, 64])) + reshape1342: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1341, R.shape([1, seq_len, 1280])) + lv216_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_30_encoder_attn_out_proj_weight4, reshape1342, model_decoder_layers_30_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1206: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1203, lv216_1) + layer_norm351: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1206, model_decoder_layers_30_final_layer_norm_weight4, model_decoder_layers_30_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv30 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_30_fc1_weight4, layer_norm351, model_decoder_layers_30_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv217_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_30_fc2_weight4, lv30, model_decoder_layers_30_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1209: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1206, lv217_1) + layer_norm352: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1209, model_decoder_layers_31_self_attn_layer_norm_weight4, model_decoder_layers_31_self_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv218_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_31_self_attn_q_proj_weight4, layer_norm352, model_decoder_layers_31_self_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1343: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv218_1, R.shape([1, seq_len, 20, 64])) + lv63_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul1_cublas", (model_decoder_layers_31_self_attn_k_proj_weight4, layer_norm352), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1344: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv63_1, R.shape([1, seq_len, 20, 64])) + lv219_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_31_self_attn_v_proj_weight4, layer_norm352, model_decoder_layers_31_self_attn_v_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1345: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv219_1, R.shape([1, seq_len, 20, 64])) + concat95: R.Tensor((1, seq_len, 60, 64), dtype="float16") = R.concat((reshape1343, reshape1344, reshape1345), axis=2) + reshape1346: R.Tensor((seq_len, 60, 64), dtype="float16") = R.reshape(concat95, R.shape([seq_len, 60, 64])) + lv261 = R.call_dps_packed("vm.builtin.attention_kv_cache_attention_with_fused_qkv", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape1346), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1347: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv261, R.shape([1, seq_len, 20, 64])) + reshape1348: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1347, R.shape([1, seq_len, 1280])) + lv220_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_31_self_attn_out_proj_weight4, reshape1348, model_decoder_layers_31_self_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1213: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1209, lv220_1) + layer_norm353: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1213, model_decoder_layers_31_encoder_attn_layer_norm_weight4, model_decoder_layers_31_encoder_attn_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv221_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_31_encoder_attn_q_proj_weight4, layer_norm353, model_decoder_layers_31_encoder_attn_q_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + reshape1349: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv221_1, R.shape([1, seq_len, 20, 64])) + reshape1350: R.Tensor((seq_len, 20, 64), dtype="float16") = R.reshape(reshape1349, R.shape([seq_len, 20, 64])) + lv262 = R.call_dps_packed("vm.builtin.attention_kv_cache_cross_attention", (paged_kv_cache, R.prim_value(31), R.prim_value(T.float32(1)), reshape1350), out_sinfo=R.Tensor((seq_len, 20, 64), dtype="float16")) + reshape1351: R.Tensor((1, seq_len, 20, 64), dtype="float16") = R.reshape(lv262, R.shape([1, seq_len, 20, 64])) + reshape1352: R.Tensor((1, seq_len, 1280), dtype="float16") = R.reshape(reshape1351, R.shape([1, seq_len, 1280])) + lv222_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add1_cublas", (model_decoder_layers_31_encoder_attn_out_proj_weight4, reshape1352, model_decoder_layers_31_encoder_attn_out_proj_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1216: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1213, lv222_1) + layer_norm354: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1216, model_decoder_layers_31_final_layer_norm_weight4, model_decoder_layers_31_final_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv31 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add_relax_nn_gelu_cublas", (model_decoder_layers_31_fc1_weight4, layer_norm354, model_decoder_layers_31_fc1_bias4), out_sinfo=R.Tensor((1, seq_len, 5120), dtype="float16")) + lv223_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul_relax_add2_cublas", (model_decoder_layers_31_fc2_weight4, lv31, model_decoder_layers_31_fc2_bias4), out_sinfo=R.Tensor((1, seq_len, 1280), dtype="float16")) + add1219: R.Tensor((1, seq_len, 1280), dtype="float16") = R.add(add1216, lv223_1) + layer_norm355: R.Tensor((1, seq_len, 1280), dtype="float16") = R.nn.layer_norm(add1219, model_decoder_layer_norm_weight4, model_decoder_layer_norm_bias4, axes=[-1], epsilon=1.0000000000000001e-05, center=True, scale=True) + lv263 = R.call_tir(cls.index, (layer_norm355,), out_sinfo=R.Tensor((1, 1, 1280), dtype="float16")) + lv64_1 = R.call_dps_packed("fused_relax_permute_dims_relax_matmul2_cublas", (model_decoder_embed_tokens_weight4, lv263), out_sinfo=R.Tensor((1, 1, 51866), dtype="float32")) + gv4: R.Tensor((1, 1, 51866), dtype="float32") = lv64_1 + R.output(gv4) + return gv4 + + @R.function + def renormalize_by_top_p(probs: R.Tensor(("batch_size", "vocab_size"), dtype="float32"), top_p: R.Tensor(("batch_size",), dtype="float32"), init_pivots: R.Tensor(("batch_size", 3), dtype="float32")) -> R.Tensor(("batch_size", "vocab_size"), dtype="float32"): + batch_size = T.int64() + vocab_size = T.int64() + R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "num_positions": 48, "num_samples": 8}}) + cls = Module + with R.dataflow(): + lv6 = R.call_tir(cls.top_p_pivot_cutoff, (probs, top_p, init_pivots), out_sinfo=[R.Tensor((batch_size,), dtype="float32"), R.Tensor((batch_size,), dtype="float32")]) + lv7: R.Tensor((batch_size,), dtype="float32") = lv6[0] + lv8: R.Tensor((batch_size,), dtype="float32") = lv6[1] + gv5 = R.call_tir(cls.top_p_renorm_after_cutoff, (probs, lv7, lv8), out_sinfo=R.Tensor((batch_size, vocab_size), dtype="float32")) + R.output(gv5) + return gv5 + + @R.function + def sample_with_top_p(sorted_probs: R.Tensor(("batch_size", "vocab_size"), dtype="float32"), sorted_indices: R.Tensor(("batch_size", "vocab_size"), dtype="int32"), uniform_samples: R.Tensor(("num_samples",), dtype="float32"), sample_indices: R.Tensor(("num_samples",), dtype="int32"), top_p: R.Tensor(("batch_size",), dtype="float32")) -> R.Tensor(("num_samples",), dtype="int32"): + num_samples = T.int64() + batch_size = T.int64() + vocab_size = T.int64() + R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "num_positions": 48, "num_samples": 8}}) + cls = Module + with R.dataflow(): + sorted_probs_1: R.Tensor((batch_size, vocab_size), dtype="float32") = sorted_probs + sorted_indices_1: R.Tensor((batch_size, vocab_size), dtype="int32") = sorted_indices + uniform_samples1: R.Tensor((num_samples, 1), dtype="float32") = R.call_pure_packed("vm.builtin.reshape", uniform_samples, R.shape([num_samples, 1]), sinfo_args=(R.Tensor((num_samples, 1), dtype="float32"),)) + sample_indices1: R.Tensor((num_samples, 1), dtype="int32") = R.call_pure_packed("vm.builtin.reshape", sample_indices, R.shape([num_samples, 1]), sinfo_args=(R.Tensor((num_samples, 1), dtype="int32"),)) + sample_indices2: R.Tensor((batch_size, 1), dtype="float32") = R.call_pure_packed("vm.builtin.reshape", top_p, R.shape([batch_size, 1]), sinfo_args=(R.Tensor((batch_size, 1), dtype="float32"),)) + lv3 = R.call_tir(cls.full, R.tuple(), out_sinfo=R.Tensor((batch_size, 1), dtype="int32"), tir_vars=R.shape([vocab_size])) + cumsum: R.Tensor((batch_size, vocab_size), dtype="float32") = R.cumsum(sorted_probs_1, axis=1, dtype="void", exclusive=None) + lv4 = R.call_tir(cls.get_renorm_prob, (cumsum, sample_indices2, lv3), out_sinfo=R.Tensor((batch_size, 1), dtype="float32")) + lv5 = R.call_tir(cls.get_index_from_sorted, (cumsum, sorted_indices_1, lv4, uniform_samples1, sample_indices1), out_sinfo=R.Tensor((num_samples, 1), dtype="int32")) + gv2: R.Tensor((num_samples,), dtype="int32") = R.call_pure_packed("vm.builtin.reshape", lv5, R.shape([num_samples]), sinfo_args=(R.Tensor((num_samples,), dtype="int32"),)) + R.output(gv2) + return gv2 + + @R.function + def sampler_take_probs(unsorted_probs: R.Tensor(("batch_size", "vocab_size"), dtype="float32"), sorted_indices: R.Tensor(("batch_size", "vocab_size"), dtype="int32"), sample_indices: R.Tensor(("num_samples",), dtype="int32"), sampling_result: R.Tensor(("num_samples",), dtype="int32"), lobprob_offsets: R.Tensor(("num_positions",), dtype="int32")) -> R.Tuple(R.Tensor(("num_samples",), dtype="float32"), R.Tensor(("num_positions",), dtype="float32"), R.Tensor(("num_positions",), dtype="int32")): + num_samples = T.int64() + num_positions = T.int64() + batch_size = T.int64() + vocab_size = T.int64() + R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "num_positions": 48, "num_samples": 8}}) + cls = Module + with R.dataflow(): + gv3 = R.call_tir(cls.sampler_take_probs_tir, (unsorted_probs, sorted_indices, sample_indices, sampling_result, lobprob_offsets), out_sinfo=[R.Tensor((num_samples,), dtype="float32"), R.Tensor((num_positions,), dtype="float32"), R.Tensor((num_positions,), dtype="int32")]) + R.output(gv3) + return gv3 + + @R.function + def sampler_verify_draft_tokens(draft_probs: R.Tensor(("num_nodes", "vocab_size"), dtype="float32"), draft_tokens: R.Tensor(("num_nodes",), dtype="int32"), model_probs: R.Tensor(("num_nodes", "vocab_size"), dtype="float32"), token_tree_first_child: R.Tensor(("num_nodes",), dtype="int32"), token_tree_next_sibling: R.Tensor(("num_nodes",), dtype="int32"), uniform_samples: R.Tensor(("num_nodes",), dtype="float32"), token_tree_parent_ptr: R.Tensor(("nbatch",), dtype="int32")) -> R.Tuple(R.Tensor(("num_nodes", "vocab_size"), dtype="float32"), R.Tensor(("nbatch",), dtype="int32")): + num_nodes = T.int64() + vocab_size = T.int64() + nbatch = T.int64() + R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "num_positions": 48, "num_samples": 8}}) + cls = Module + with R.dataflow(): + gv4: R.Tuple(R.Tensor((num_nodes, vocab_size), dtype="float32"), R.Tensor((nbatch,), dtype="int32")) = R.call_tir_inplace(cls.batch_verify_on_gpu_single_kernel, (draft_probs, draft_tokens, model_probs, token_tree_first_child, token_tree_next_sibling, uniform_samples, token_tree_parent_ptr), out_sinfo=[R.Tensor((num_nodes, vocab_size), dtype="float32"), R.Tensor((nbatch,), dtype="int32")], inplace_indices=[2, 6]) + R.output(gv4) + return gv4 + + @R.function + def softmax_with_temperature(logits: R.Tensor(("batch_size", 1, "vocab_size"), dtype="float32"), temperature: R.Tensor(("batch_size",), dtype="float32")) -> R.Tensor(("batch_size", 1, "vocab_size"), dtype="float32"): + batch_size = T.int64() + vocab_size = T.int64() + R.func_attr({"relax.memory_plan_dynamic_func_output": 1, "tir_non_negative_var": ["vocab_size"], "tir_var_upper_bound": {"batch_size": 8, "seq_len": 15000, "total_seq_len": 1500}}) + cls = Module + with R.dataflow(): + lv: R.Tensor((batch_size, vocab_size), dtype="float32") = R.call_pure_packed("vm.builtin.reshape", logits, R.shape([batch_size, vocab_size]), sinfo_args=(R.Tensor((batch_size, vocab_size), dtype="float32"),)) + lv1 = R.call_tir(cls.chunk_lse, (lv, temperature), out_sinfo=[R.Tensor((batch_size, (vocab_size + 4096 - 1) // 4096), dtype="float32"), R.Tensor((batch_size, (vocab_size + 4096 - 1) // 4096), dtype="float32")]) + lv2: R.Tensor((batch_size, (vocab_size + 4096 - 1) // 4096), dtype="float32") = lv1[0] + lv3: R.Tensor((batch_size, (vocab_size + 4096 - 1) // 4096), dtype="float32") = lv1[1] + lv4 = R.call_tir(cls.softmax_with_chunked_sum, (lv, temperature, lv2, lv3), out_sinfo=R.Tensor((batch_size, vocab_size), dtype="float32")) + gv: R.Tensor((batch_size, 1, vocab_size), dtype="float32") = R.call_pure_packed("vm.builtin.reshape", lv4, R.shape([batch_size, 1, vocab_size]), sinfo_args=(R.Tensor((batch_size, 1, vocab_size), dtype="float32"),)) + R.output(gv) + return gv + +# Metadata omitted. Use show_meta=True in script() method to show it. \ No newline at end of file