File size: 207,501 Bytes
b5586fe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
program(1.0)
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}})]
{
    func main<ios16>(tensor<fp32, [1, 77]> input_ids) {
            tensor<int32, []> var_5 = const()[name = tensor<string, []>("op_5"), val = tensor<int32, []>(-1)];
            tensor<bool, []> var_6 = const()[name = tensor<string, []>("op_6"), val = tensor<bool, []>(false)];
            tensor<string, []> cast_1_dtype_0 = const()[name = tensor<string, []>("cast_1_dtype_0"), val = tensor<string, []>("int32")];
            tensor<int32, []> inputs_embeds_axis_0 = const()[name = tensor<string, []>("inputs_embeds_axis_0"), val = tensor<int32, []>(0)];
            tensor<int32, []> inputs_embeds_batch_dims_0 = const()[name = tensor<string, []>("inputs_embeds_batch_dims_0"), val = tensor<int32, []>(0)];
            tensor<fp16, [49408, 768]> text_encoder_text_model_embeddings_token_embedding_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_embeddings_token_embedding_weight_to_fp16"), val = tensor<fp16, [49408, 768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
            tensor<int32, [1, 77]> cast_1 = cast(dtype = cast_1_dtype_0, x = input_ids)[name = tensor<string, []>("cast_2")];
            tensor<fp16, [1, 77, 768]> inputs_embeds_cast_fp16 = gather(axis = inputs_embeds_axis_0, batch_dims = inputs_embeds_batch_dims_0, indices = cast_1, x = text_encoder_text_model_embeddings_token_embedding_weight_to_fp16)[name = tensor<string, []>("inputs_embeds_cast_fp16")];
            tensor<fp16, [1, 77, 768]> position_embeddings_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [44352]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(75890816))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(75935232))), name = tensor<string, []>("position_embeddings_to_fp16_palettized"), shape = tensor<uint32, [3]>([1, 77, 768])];
            tensor<fp16, [1, 77, 768]> input_3_cast_fp16 = add(x = inputs_embeds_cast_fp16, y = position_embeddings_to_fp16_palettized)[name = tensor<string, []>("input_3_cast_fp16")];
            tensor<int32, [1]> hidden_states_1_axes_0 = const()[name = tensor<string, []>("hidden_states_1_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(75935424)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(75937024)))];
            tensor<fp16, []> var_15_to_fp16 = const()[name = tensor<string, []>("op_15_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
            tensor<fp16, [1, 77, 768]> hidden_states_1_cast_fp16 = layer_norm(axes = hidden_states_1_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("hidden_states_1_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(75938624))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(76381056))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(76381248)))];
            tensor<fp16, [1, 77, 768]> linear_0_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_1_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
            tensor<fp16, []> var_107_to_fp16 = const()[name = tensor<string, []>("op_107_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
            tensor<fp16, [1, 77, 768]> tensor_5_cast_fp16 = mul(x = linear_0_cast_fp16, y = var_107_to_fp16)[name = tensor<string, []>("tensor_5_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(76382848))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(76825280))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(76825472)))];
            tensor<fp16, [1, 77, 768]> linear_1_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_1_cast_fp16)[name = tensor<string, []>("linear_1_cast_fp16")];
            tensor<int32, [4]> var_112 = const()[name = tensor<string, []>("op_112"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_113_cast_fp16 = reshape(shape = var_112, x = linear_1_cast_fp16)[name = tensor<string, []>("op_113_cast_fp16")];
            tensor<int32, [4]> var_114_perm_0 = const()[name = tensor<string, []>("op_114_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(76827072))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77269504))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77269696)))];
            tensor<fp16, [1, 77, 768]> linear_2_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_1_cast_fp16)[name = tensor<string, []>("linear_2_cast_fp16")];
            tensor<int32, [4]> var_119 = const()[name = tensor<string, []>("op_119"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_120_cast_fp16 = reshape(shape = var_119, x = linear_2_cast_fp16)[name = tensor<string, []>("op_120_cast_fp16")];
            tensor<int32, [4]> var_121_perm_0 = const()[name = tensor<string, []>("op_121_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [4]> var_128 = const()[name = tensor<string, []>("op_128"), val = tensor<int32, [4]>([1, 77, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_129_cast_fp16 = reshape(shape = var_128, x = tensor_5_cast_fp16)[name = tensor<string, []>("op_129_cast_fp16")];
            tensor<int32, [4]> var_130_perm_0 = const()[name = tensor<string, []>("op_130_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_132 = const()[name = tensor<string, []>("op_132"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_130_cast_fp16 = transpose(perm = var_130_perm_0, x = var_129_cast_fp16)[name = tensor<string, []>("transpose_47")];
            tensor<fp16, [12, 77, 64]> query_states_1_cast_fp16 = reshape(shape = var_132, x = var_130_cast_fp16)[name = tensor<string, []>("query_states_1_cast_fp16")];
            tensor<int32, [3]> var_134 = const()[name = tensor<string, []>("op_134"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_114_cast_fp16 = transpose(perm = var_114_perm_0, x = var_113_cast_fp16)[name = tensor<string, []>("transpose_46")];
            tensor<fp16, [12, 77, 64]> key_states_3_cast_fp16 = reshape(shape = var_134, x = var_114_cast_fp16)[name = tensor<string, []>("key_states_3_cast_fp16")];
            tensor<int32, [3]> var_136 = const()[name = tensor<string, []>("op_136"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_121_cast_fp16 = transpose(perm = var_121_perm_0, x = var_120_cast_fp16)[name = tensor<string, []>("transpose_45")];
            tensor<fp16, [12, 77, 64]> value_states_3_cast_fp16 = reshape(shape = var_136, x = var_121_cast_fp16)[name = tensor<string, []>("value_states_3_cast_fp16")];
            tensor<bool, []> attn_weights_1_transpose_x_1 = const()[name = tensor<string, []>("attn_weights_1_transpose_x_1"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_weights_1_transpose_y_1 = const()[name = tensor<string, []>("attn_weights_1_transpose_y_1"), val = tensor<bool, []>(true)];
            tensor<fp16, [12, 77, 77]> attn_weights_1_cast_fp16 = matmul(transpose_x = attn_weights_1_transpose_x_1, transpose_y = attn_weights_1_transpose_y_1, x = query_states_1_cast_fp16, y = key_states_3_cast_fp16)[name = tensor<string, []>("attn_weights_1_cast_fp16")];
            tensor<int32, [4]> var_141 = const()[name = tensor<string, []>("op_141"), val = tensor<int32, [4]>([1, 12, 77, 77])];
            tensor<fp16, [1, 12, 77, 77]> var_142_cast_fp16 = reshape(shape = var_141, x = attn_weights_1_cast_fp16)[name = tensor<string, []>("op_142_cast_fp16")];
            tensor<fp16, [1, 1, 77, 77]> op_56_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [4447]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77271296))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77275840))), name = tensor<string, []>("op_56_to_fp16_palettized"), shape = tensor<uint32, [4]>([1, 1, 77, 77])];
            tensor<fp16, [1, 12, 77, 77]> attn_weights_3_cast_fp16 = add(x = var_142_cast_fp16, y = op_56_to_fp16_palettized)[name = tensor<string, []>("attn_weights_3_cast_fp16")];
            tensor<int32, [3]> var_147 = const()[name = tensor<string, []>("op_147"), val = tensor<int32, [3]>([12, 77, 77])];
            tensor<fp16, [12, 77, 77]> input_5_cast_fp16 = reshape(shape = var_147, x = attn_weights_3_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
            tensor<fp16, [12, 77, 77]> input_7_cast_fp16 = softmax(axis = var_5, x = input_5_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
            tensor<bool, []> attn_output_1_transpose_x_0 = const()[name = tensor<string, []>("attn_output_1_transpose_x_0"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_output_1_transpose_y_0 = const()[name = tensor<string, []>("attn_output_1_transpose_y_0"), val = tensor<bool, []>(false)];
            tensor<fp16, [12, 77, 64]> attn_output_1_cast_fp16 = matmul(transpose_x = attn_output_1_transpose_x_0, transpose_y = attn_output_1_transpose_y_0, x = input_7_cast_fp16, y = value_states_3_cast_fp16)[name = tensor<string, []>("attn_output_1_cast_fp16")];
            tensor<int32, [4]> var_152 = const()[name = tensor<string, []>("op_152"), val = tensor<int32, [4]>([1, 12, 77, 64])];
            tensor<fp16, [1, 12, 77, 64]> attn_output_3_cast_fp16 = reshape(shape = var_152, x = attn_output_1_cast_fp16)[name = tensor<string, []>("attn_output_3_cast_fp16")];
            tensor<int32, [4]> attn_output_5_perm_0 = const()[name = tensor<string, []>("attn_output_5_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_155 = const()[name = tensor<string, []>("op_155"), val = tensor<int32, [3]>([1, 77, 768])];
            tensor<fp16, [1, 77, 12, 64]> attn_output_5_cast_fp16 = transpose(perm = attn_output_5_perm_0, x = attn_output_3_cast_fp16)[name = tensor<string, []>("transpose_44")];
            tensor<fp16, [1, 77, 768]> input_9_cast_fp16 = reshape(shape = var_155, x = attn_output_5_cast_fp16)[name = tensor<string, []>("input_9_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77276032))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77718464))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77718656)))];
            tensor<fp16, [1, 77, 768]> linear_3_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16_palettized, x = input_9_cast_fp16)[name = tensor<string, []>("linear_3_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_11_cast_fp16 = add(x = input_3_cast_fp16, y = linear_3_cast_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
            tensor<int32, [1]> input_13_axes_0 = const()[name = tensor<string, []>("input_13_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77720256)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77721856)))];
            tensor<fp16, [1, 77, 768]> input_13_cast_fp16 = layer_norm(axes = input_13_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
            tensor<fp16, [3072, 768]> text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(77723456))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(79492992))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([3072, 768])];
            tensor<fp16, [3072]> text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16"), val = tensor<fp16, [3072]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(79493184)))];
            tensor<fp16, [1, 77, 3072]> linear_4_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16_palettized, x = input_13_cast_fp16)[name = tensor<string, []>("linear_4_cast_fp16")];
            tensor<fp16, []> var_170_to_fp16 = const()[name = tensor<string, []>("op_170_to_fp16"), val = tensor<fp16, []>(0x1.b3cp+0)];
            tensor<fp16, [1, 77, 3072]> var_171_cast_fp16 = mul(x = linear_4_cast_fp16, y = var_170_to_fp16)[name = tensor<string, []>("op_171_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> var_172_cast_fp16 = sigmoid(x = var_171_cast_fp16)[name = tensor<string, []>("op_172_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> input_17_cast_fp16 = mul(x = linear_4_cast_fp16, y = var_172_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")];
            tensor<fp16, [768, 3072]> text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(79499392))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(81268928))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 3072])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(81269120)))];
            tensor<fp16, [1, 77, 768]> linear_5_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16_palettized, x = input_17_cast_fp16)[name = tensor<string, []>("linear_5_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_19_cast_fp16 = add(x = input_11_cast_fp16, y = linear_5_cast_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
            tensor<int32, [1]> hidden_states_7_axes_0 = const()[name = tensor<string, []>("hidden_states_7_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(81270720)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(81272320)))];
            tensor<fp16, [1, 77, 768]> hidden_states_7_cast_fp16 = layer_norm(axes = hidden_states_7_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("hidden_states_7_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(81273920))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(81716352))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(81716544)))];
            tensor<fp16, [1, 77, 768]> linear_6_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_7_cast_fp16)[name = tensor<string, []>("linear_6_cast_fp16")];
            tensor<fp16, []> var_197_to_fp16 = const()[name = tensor<string, []>("op_197_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
            tensor<fp16, [1, 77, 768]> tensor_11_cast_fp16 = mul(x = linear_6_cast_fp16, y = var_197_to_fp16)[name = tensor<string, []>("tensor_11_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(81718144))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82160576))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82160768)))];
            tensor<fp16, [1, 77, 768]> linear_7_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_7_cast_fp16)[name = tensor<string, []>("linear_7_cast_fp16")];
            tensor<int32, [4]> var_202 = const()[name = tensor<string, []>("op_202"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_203_cast_fp16 = reshape(shape = var_202, x = linear_7_cast_fp16)[name = tensor<string, []>("op_203_cast_fp16")];
            tensor<int32, [4]> var_204_perm_0 = const()[name = tensor<string, []>("op_204_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82162368))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82604800))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82604992)))];
            tensor<fp16, [1, 77, 768]> linear_8_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_7_cast_fp16)[name = tensor<string, []>("linear_8_cast_fp16")];
            tensor<int32, [4]> var_209 = const()[name = tensor<string, []>("op_209"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_210_cast_fp16 = reshape(shape = var_209, x = linear_8_cast_fp16)[name = tensor<string, []>("op_210_cast_fp16")];
            tensor<int32, [4]> var_211_perm_0 = const()[name = tensor<string, []>("op_211_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [4]> var_218 = const()[name = tensor<string, []>("op_218"), val = tensor<int32, [4]>([1, 77, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_219_cast_fp16 = reshape(shape = var_218, x = tensor_11_cast_fp16)[name = tensor<string, []>("op_219_cast_fp16")];
            tensor<int32, [4]> var_220_perm_0 = const()[name = tensor<string, []>("op_220_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_222 = const()[name = tensor<string, []>("op_222"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_220_cast_fp16 = transpose(perm = var_220_perm_0, x = var_219_cast_fp16)[name = tensor<string, []>("transpose_43")];
            tensor<fp16, [12, 77, 64]> query_states_3_cast_fp16 = reshape(shape = var_222, x = var_220_cast_fp16)[name = tensor<string, []>("query_states_3_cast_fp16")];
            tensor<int32, [3]> var_224 = const()[name = tensor<string, []>("op_224"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_204_cast_fp16 = transpose(perm = var_204_perm_0, x = var_203_cast_fp16)[name = tensor<string, []>("transpose_42")];
            tensor<fp16, [12, 77, 64]> key_states_7_cast_fp16 = reshape(shape = var_224, x = var_204_cast_fp16)[name = tensor<string, []>("key_states_7_cast_fp16")];
            tensor<int32, [3]> var_226 = const()[name = tensor<string, []>("op_226"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_211_cast_fp16 = transpose(perm = var_211_perm_0, x = var_210_cast_fp16)[name = tensor<string, []>("transpose_41")];
            tensor<fp16, [12, 77, 64]> value_states_7_cast_fp16 = reshape(shape = var_226, x = var_211_cast_fp16)[name = tensor<string, []>("value_states_7_cast_fp16")];
            tensor<bool, []> attn_weights_7_transpose_x_1 = const()[name = tensor<string, []>("attn_weights_7_transpose_x_1"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_weights_7_transpose_y_1 = const()[name = tensor<string, []>("attn_weights_7_transpose_y_1"), val = tensor<bool, []>(true)];
            tensor<fp16, [12, 77, 77]> attn_weights_7_cast_fp16 = matmul(transpose_x = attn_weights_7_transpose_x_1, transpose_y = attn_weights_7_transpose_y_1, x = query_states_3_cast_fp16, y = key_states_7_cast_fp16)[name = tensor<string, []>("attn_weights_7_cast_fp16")];
            tensor<int32, [4]> var_231 = const()[name = tensor<string, []>("op_231"), val = tensor<int32, [4]>([1, 12, 77, 77])];
            tensor<fp16, [1, 12, 77, 77]> var_232_cast_fp16 = reshape(shape = var_231, x = attn_weights_7_cast_fp16)[name = tensor<string, []>("op_232_cast_fp16")];
            tensor<fp16, [1, 12, 77, 77]> attn_weights_9_cast_fp16 = add(x = var_232_cast_fp16, y = op_56_to_fp16_palettized)[name = tensor<string, []>("attn_weights_9_cast_fp16")];
            tensor<int32, [3]> var_237 = const()[name = tensor<string, []>("op_237"), val = tensor<int32, [3]>([12, 77, 77])];
            tensor<fp16, [12, 77, 77]> input_21_cast_fp16 = reshape(shape = var_237, x = attn_weights_9_cast_fp16)[name = tensor<string, []>("input_21_cast_fp16")];
            tensor<fp16, [12, 77, 77]> input_23_cast_fp16 = softmax(axis = var_5, x = input_21_cast_fp16)[name = tensor<string, []>("input_23_cast_fp16")];
            tensor<bool, []> attn_output_7_transpose_x_0 = const()[name = tensor<string, []>("attn_output_7_transpose_x_0"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_output_7_transpose_y_0 = const()[name = tensor<string, []>("attn_output_7_transpose_y_0"), val = tensor<bool, []>(false)];
            tensor<fp16, [12, 77, 64]> attn_output_7_cast_fp16 = matmul(transpose_x = attn_output_7_transpose_x_0, transpose_y = attn_output_7_transpose_y_0, x = input_23_cast_fp16, y = value_states_7_cast_fp16)[name = tensor<string, []>("attn_output_7_cast_fp16")];
            tensor<int32, [4]> var_242 = const()[name = tensor<string, []>("op_242"), val = tensor<int32, [4]>([1, 12, 77, 64])];
            tensor<fp16, [1, 12, 77, 64]> attn_output_9_cast_fp16 = reshape(shape = var_242, x = attn_output_7_cast_fp16)[name = tensor<string, []>("attn_output_9_cast_fp16")];
            tensor<int32, [4]> attn_output_11_perm_0 = const()[name = tensor<string, []>("attn_output_11_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_245 = const()[name = tensor<string, []>("op_245"), val = tensor<int32, [3]>([1, 77, 768])];
            tensor<fp16, [1, 77, 12, 64]> attn_output_11_cast_fp16 = transpose(perm = attn_output_11_perm_0, x = attn_output_9_cast_fp16)[name = tensor<string, []>("transpose_40")];
            tensor<fp16, [1, 77, 768]> input_25_cast_fp16 = reshape(shape = var_245, x = attn_output_11_cast_fp16)[name = tensor<string, []>("input_25_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(82606592))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(83049024))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(83049216)))];
            tensor<fp16, [1, 77, 768]> linear_9_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16_palettized, x = input_25_cast_fp16)[name = tensor<string, []>("linear_9_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_27_cast_fp16 = add(x = input_19_cast_fp16, y = linear_9_cast_fp16)[name = tensor<string, []>("input_27_cast_fp16")];
            tensor<int32, [1]> input_29_axes_0 = const()[name = tensor<string, []>("input_29_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(83050816)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(83052416)))];
            tensor<fp16, [1, 77, 768]> input_29_cast_fp16 = layer_norm(axes = input_29_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16, x = input_27_cast_fp16)[name = tensor<string, []>("input_29_cast_fp16")];
            tensor<fp16, [3072, 768]> text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(83054016))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(84823552))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([3072, 768])];
            tensor<fp16, [3072]> text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16"), val = tensor<fp16, [3072]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(84823744)))];
            tensor<fp16, [1, 77, 3072]> linear_10_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16_palettized, x = input_29_cast_fp16)[name = tensor<string, []>("linear_10_cast_fp16")];
            tensor<fp16, []> var_260_to_fp16 = const()[name = tensor<string, []>("op_260_to_fp16"), val = tensor<fp16, []>(0x1.b3cp+0)];
            tensor<fp16, [1, 77, 3072]> var_261_cast_fp16 = mul(x = linear_10_cast_fp16, y = var_260_to_fp16)[name = tensor<string, []>("op_261_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> var_262_cast_fp16 = sigmoid(x = var_261_cast_fp16)[name = tensor<string, []>("op_262_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> input_33_cast_fp16 = mul(x = linear_10_cast_fp16, y = var_262_cast_fp16)[name = tensor<string, []>("input_33_cast_fp16")];
            tensor<fp16, [768, 3072]> text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(84829952))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(86599488))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 3072])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(86599680)))];
            tensor<fp16, [1, 77, 768]> linear_11_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16_palettized, x = input_33_cast_fp16)[name = tensor<string, []>("linear_11_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_35_cast_fp16 = add(x = input_27_cast_fp16, y = linear_11_cast_fp16)[name = tensor<string, []>("input_35_cast_fp16")];
            tensor<int32, [1]> hidden_states_13_axes_0 = const()[name = tensor<string, []>("hidden_states_13_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(86601280)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(86602880)))];
            tensor<fp16, [1, 77, 768]> hidden_states_13_cast_fp16 = layer_norm(axes = hidden_states_13_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16, x = input_35_cast_fp16)[name = tensor<string, []>("hidden_states_13_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(86604480))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(87046912))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(87047104)))];
            tensor<fp16, [1, 77, 768]> linear_12_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_13_cast_fp16)[name = tensor<string, []>("linear_12_cast_fp16")];
            tensor<fp16, []> var_287_to_fp16 = const()[name = tensor<string, []>("op_287_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
            tensor<fp16, [1, 77, 768]> tensor_17_cast_fp16 = mul(x = linear_12_cast_fp16, y = var_287_to_fp16)[name = tensor<string, []>("tensor_17_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(87048704))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(87491136))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(87491328)))];
            tensor<fp16, [1, 77, 768]> linear_13_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_13_cast_fp16)[name = tensor<string, []>("linear_13_cast_fp16")];
            tensor<int32, [4]> var_292 = const()[name = tensor<string, []>("op_292"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_293_cast_fp16 = reshape(shape = var_292, x = linear_13_cast_fp16)[name = tensor<string, []>("op_293_cast_fp16")];
            tensor<int32, [4]> var_294_perm_0 = const()[name = tensor<string, []>("op_294_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(87492928))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(87935360))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(87935552)))];
            tensor<fp16, [1, 77, 768]> linear_14_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_13_cast_fp16)[name = tensor<string, []>("linear_14_cast_fp16")];
            tensor<int32, [4]> var_299 = const()[name = tensor<string, []>("op_299"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_300_cast_fp16 = reshape(shape = var_299, x = linear_14_cast_fp16)[name = tensor<string, []>("op_300_cast_fp16")];
            tensor<int32, [4]> var_301_perm_0 = const()[name = tensor<string, []>("op_301_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [4]> var_308 = const()[name = tensor<string, []>("op_308"), val = tensor<int32, [4]>([1, 77, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_309_cast_fp16 = reshape(shape = var_308, x = tensor_17_cast_fp16)[name = tensor<string, []>("op_309_cast_fp16")];
            tensor<int32, [4]> var_310_perm_0 = const()[name = tensor<string, []>("op_310_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_312 = const()[name = tensor<string, []>("op_312"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_310_cast_fp16 = transpose(perm = var_310_perm_0, x = var_309_cast_fp16)[name = tensor<string, []>("transpose_39")];
            tensor<fp16, [12, 77, 64]> query_states_5_cast_fp16 = reshape(shape = var_312, x = var_310_cast_fp16)[name = tensor<string, []>("query_states_5_cast_fp16")];
            tensor<int32, [3]> var_314 = const()[name = tensor<string, []>("op_314"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_294_cast_fp16 = transpose(perm = var_294_perm_0, x = var_293_cast_fp16)[name = tensor<string, []>("transpose_38")];
            tensor<fp16, [12, 77, 64]> key_states_11_cast_fp16 = reshape(shape = var_314, x = var_294_cast_fp16)[name = tensor<string, []>("key_states_11_cast_fp16")];
            tensor<int32, [3]> var_316 = const()[name = tensor<string, []>("op_316"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_301_cast_fp16 = transpose(perm = var_301_perm_0, x = var_300_cast_fp16)[name = tensor<string, []>("transpose_37")];
            tensor<fp16, [12, 77, 64]> value_states_11_cast_fp16 = reshape(shape = var_316, x = var_301_cast_fp16)[name = tensor<string, []>("value_states_11_cast_fp16")];
            tensor<bool, []> attn_weights_13_transpose_x_1 = const()[name = tensor<string, []>("attn_weights_13_transpose_x_1"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_weights_13_transpose_y_1 = const()[name = tensor<string, []>("attn_weights_13_transpose_y_1"), val = tensor<bool, []>(true)];
            tensor<fp16, [12, 77, 77]> attn_weights_13_cast_fp16 = matmul(transpose_x = attn_weights_13_transpose_x_1, transpose_y = attn_weights_13_transpose_y_1, x = query_states_5_cast_fp16, y = key_states_11_cast_fp16)[name = tensor<string, []>("attn_weights_13_cast_fp16")];
            tensor<int32, [4]> var_321 = const()[name = tensor<string, []>("op_321"), val = tensor<int32, [4]>([1, 12, 77, 77])];
            tensor<fp16, [1, 12, 77, 77]> var_322_cast_fp16 = reshape(shape = var_321, x = attn_weights_13_cast_fp16)[name = tensor<string, []>("op_322_cast_fp16")];
            tensor<fp16, [1, 12, 77, 77]> attn_weights_15_cast_fp16 = add(x = var_322_cast_fp16, y = op_56_to_fp16_palettized)[name = tensor<string, []>("attn_weights_15_cast_fp16")];
            tensor<int32, [3]> var_327 = const()[name = tensor<string, []>("op_327"), val = tensor<int32, [3]>([12, 77, 77])];
            tensor<fp16, [12, 77, 77]> input_37_cast_fp16 = reshape(shape = var_327, x = attn_weights_15_cast_fp16)[name = tensor<string, []>("input_37_cast_fp16")];
            tensor<fp16, [12, 77, 77]> input_39_cast_fp16 = softmax(axis = var_5, x = input_37_cast_fp16)[name = tensor<string, []>("input_39_cast_fp16")];
            tensor<bool, []> attn_output_13_transpose_x_0 = const()[name = tensor<string, []>("attn_output_13_transpose_x_0"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_output_13_transpose_y_0 = const()[name = tensor<string, []>("attn_output_13_transpose_y_0"), val = tensor<bool, []>(false)];
            tensor<fp16, [12, 77, 64]> attn_output_13_cast_fp16 = matmul(transpose_x = attn_output_13_transpose_x_0, transpose_y = attn_output_13_transpose_y_0, x = input_39_cast_fp16, y = value_states_11_cast_fp16)[name = tensor<string, []>("attn_output_13_cast_fp16")];
            tensor<int32, [4]> var_332 = const()[name = tensor<string, []>("op_332"), val = tensor<int32, [4]>([1, 12, 77, 64])];
            tensor<fp16, [1, 12, 77, 64]> attn_output_15_cast_fp16 = reshape(shape = var_332, x = attn_output_13_cast_fp16)[name = tensor<string, []>("attn_output_15_cast_fp16")];
            tensor<int32, [4]> attn_output_17_perm_0 = const()[name = tensor<string, []>("attn_output_17_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_335 = const()[name = tensor<string, []>("op_335"), val = tensor<int32, [3]>([1, 77, 768])];
            tensor<fp16, [1, 77, 12, 64]> attn_output_17_cast_fp16 = transpose(perm = attn_output_17_perm_0, x = attn_output_15_cast_fp16)[name = tensor<string, []>("transpose_36")];
            tensor<fp16, [1, 77, 768]> input_41_cast_fp16 = reshape(shape = var_335, x = attn_output_17_cast_fp16)[name = tensor<string, []>("input_41_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(87937152))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(88379584))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(88379776)))];
            tensor<fp16, [1, 77, 768]> linear_15_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16_palettized, x = input_41_cast_fp16)[name = tensor<string, []>("linear_15_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_43_cast_fp16 = add(x = input_35_cast_fp16, y = linear_15_cast_fp16)[name = tensor<string, []>("input_43_cast_fp16")];
            tensor<int32, [1]> input_45_axes_0 = const()[name = tensor<string, []>("input_45_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(88381376)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(88382976)))];
            tensor<fp16, [1, 77, 768]> input_45_cast_fp16 = layer_norm(axes = input_45_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16, x = input_43_cast_fp16)[name = tensor<string, []>("input_45_cast_fp16")];
            tensor<fp16, [3072, 768]> text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(88384576))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(90154112))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([3072, 768])];
            tensor<fp16, [3072]> text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16"), val = tensor<fp16, [3072]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(90154304)))];
            tensor<fp16, [1, 77, 3072]> linear_16_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16_palettized, x = input_45_cast_fp16)[name = tensor<string, []>("linear_16_cast_fp16")];
            tensor<fp16, []> var_350_to_fp16 = const()[name = tensor<string, []>("op_350_to_fp16"), val = tensor<fp16, []>(0x1.b3cp+0)];
            tensor<fp16, [1, 77, 3072]> var_351_cast_fp16 = mul(x = linear_16_cast_fp16, y = var_350_to_fp16)[name = tensor<string, []>("op_351_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> var_352_cast_fp16 = sigmoid(x = var_351_cast_fp16)[name = tensor<string, []>("op_352_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> input_49_cast_fp16 = mul(x = linear_16_cast_fp16, y = var_352_cast_fp16)[name = tensor<string, []>("input_49_cast_fp16")];
            tensor<fp16, [768, 3072]> text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(90160512))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91930048))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 3072])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91930240)))];
            tensor<fp16, [1, 77, 768]> linear_17_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16_palettized, x = input_49_cast_fp16)[name = tensor<string, []>("linear_17_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_51_cast_fp16 = add(x = input_43_cast_fp16, y = linear_17_cast_fp16)[name = tensor<string, []>("input_51_cast_fp16")];
            tensor<int32, [1]> hidden_states_19_axes_0 = const()[name = tensor<string, []>("hidden_states_19_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91931840)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91933440)))];
            tensor<fp16, [1, 77, 768]> hidden_states_19_cast_fp16 = layer_norm(axes = hidden_states_19_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16, x = input_51_cast_fp16)[name = tensor<string, []>("hidden_states_19_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91935040))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(92377472))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(92377664)))];
            tensor<fp16, [1, 77, 768]> linear_18_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_19_cast_fp16)[name = tensor<string, []>("linear_18_cast_fp16")];
            tensor<fp16, []> var_377_to_fp16 = const()[name = tensor<string, []>("op_377_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
            tensor<fp16, [1, 77, 768]> tensor_23_cast_fp16 = mul(x = linear_18_cast_fp16, y = var_377_to_fp16)[name = tensor<string, []>("tensor_23_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(92379264))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(92821696))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(92821888)))];
            tensor<fp16, [1, 77, 768]> linear_19_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_19_cast_fp16)[name = tensor<string, []>("linear_19_cast_fp16")];
            tensor<int32, [4]> var_382 = const()[name = tensor<string, []>("op_382"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_383_cast_fp16 = reshape(shape = var_382, x = linear_19_cast_fp16)[name = tensor<string, []>("op_383_cast_fp16")];
            tensor<int32, [4]> var_384_perm_0 = const()[name = tensor<string, []>("op_384_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(92823488))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(93265920))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(93266112)))];
            tensor<fp16, [1, 77, 768]> linear_20_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_19_cast_fp16)[name = tensor<string, []>("linear_20_cast_fp16")];
            tensor<int32, [4]> var_389 = const()[name = tensor<string, []>("op_389"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_390_cast_fp16 = reshape(shape = var_389, x = linear_20_cast_fp16)[name = tensor<string, []>("op_390_cast_fp16")];
            tensor<int32, [4]> var_391_perm_0 = const()[name = tensor<string, []>("op_391_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [4]> var_398 = const()[name = tensor<string, []>("op_398"), val = tensor<int32, [4]>([1, 77, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_399_cast_fp16 = reshape(shape = var_398, x = tensor_23_cast_fp16)[name = tensor<string, []>("op_399_cast_fp16")];
            tensor<int32, [4]> var_400_perm_0 = const()[name = tensor<string, []>("op_400_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_402 = const()[name = tensor<string, []>("op_402"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_400_cast_fp16 = transpose(perm = var_400_perm_0, x = var_399_cast_fp16)[name = tensor<string, []>("transpose_35")];
            tensor<fp16, [12, 77, 64]> query_states_7_cast_fp16 = reshape(shape = var_402, x = var_400_cast_fp16)[name = tensor<string, []>("query_states_7_cast_fp16")];
            tensor<int32, [3]> var_404 = const()[name = tensor<string, []>("op_404"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_384_cast_fp16 = transpose(perm = var_384_perm_0, x = var_383_cast_fp16)[name = tensor<string, []>("transpose_34")];
            tensor<fp16, [12, 77, 64]> key_states_15_cast_fp16 = reshape(shape = var_404, x = var_384_cast_fp16)[name = tensor<string, []>("key_states_15_cast_fp16")];
            tensor<int32, [3]> var_406 = const()[name = tensor<string, []>("op_406"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_391_cast_fp16 = transpose(perm = var_391_perm_0, x = var_390_cast_fp16)[name = tensor<string, []>("transpose_33")];
            tensor<fp16, [12, 77, 64]> value_states_15_cast_fp16 = reshape(shape = var_406, x = var_391_cast_fp16)[name = tensor<string, []>("value_states_15_cast_fp16")];
            tensor<bool, []> attn_weights_19_transpose_x_1 = const()[name = tensor<string, []>("attn_weights_19_transpose_x_1"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_weights_19_transpose_y_1 = const()[name = tensor<string, []>("attn_weights_19_transpose_y_1"), val = tensor<bool, []>(true)];
            tensor<fp16, [12, 77, 77]> attn_weights_19_cast_fp16 = matmul(transpose_x = attn_weights_19_transpose_x_1, transpose_y = attn_weights_19_transpose_y_1, x = query_states_7_cast_fp16, y = key_states_15_cast_fp16)[name = tensor<string, []>("attn_weights_19_cast_fp16")];
            tensor<int32, [4]> var_411 = const()[name = tensor<string, []>("op_411"), val = tensor<int32, [4]>([1, 12, 77, 77])];
            tensor<fp16, [1, 12, 77, 77]> var_412_cast_fp16 = reshape(shape = var_411, x = attn_weights_19_cast_fp16)[name = tensor<string, []>("op_412_cast_fp16")];
            tensor<fp16, [1, 12, 77, 77]> attn_weights_21_cast_fp16 = add(x = var_412_cast_fp16, y = op_56_to_fp16_palettized)[name = tensor<string, []>("attn_weights_21_cast_fp16")];
            tensor<int32, [3]> var_417 = const()[name = tensor<string, []>("op_417"), val = tensor<int32, [3]>([12, 77, 77])];
            tensor<fp16, [12, 77, 77]> input_53_cast_fp16 = reshape(shape = var_417, x = attn_weights_21_cast_fp16)[name = tensor<string, []>("input_53_cast_fp16")];
            tensor<fp16, [12, 77, 77]> input_55_cast_fp16 = softmax(axis = var_5, x = input_53_cast_fp16)[name = tensor<string, []>("input_55_cast_fp16")];
            tensor<bool, []> attn_output_19_transpose_x_0 = const()[name = tensor<string, []>("attn_output_19_transpose_x_0"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_output_19_transpose_y_0 = const()[name = tensor<string, []>("attn_output_19_transpose_y_0"), val = tensor<bool, []>(false)];
            tensor<fp16, [12, 77, 64]> attn_output_19_cast_fp16 = matmul(transpose_x = attn_output_19_transpose_x_0, transpose_y = attn_output_19_transpose_y_0, x = input_55_cast_fp16, y = value_states_15_cast_fp16)[name = tensor<string, []>("attn_output_19_cast_fp16")];
            tensor<int32, [4]> var_422 = const()[name = tensor<string, []>("op_422"), val = tensor<int32, [4]>([1, 12, 77, 64])];
            tensor<fp16, [1, 12, 77, 64]> attn_output_21_cast_fp16 = reshape(shape = var_422, x = attn_output_19_cast_fp16)[name = tensor<string, []>("attn_output_21_cast_fp16")];
            tensor<int32, [4]> attn_output_23_perm_0 = const()[name = tensor<string, []>("attn_output_23_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_425 = const()[name = tensor<string, []>("op_425"), val = tensor<int32, [3]>([1, 77, 768])];
            tensor<fp16, [1, 77, 12, 64]> attn_output_23_cast_fp16 = transpose(perm = attn_output_23_perm_0, x = attn_output_21_cast_fp16)[name = tensor<string, []>("transpose_32")];
            tensor<fp16, [1, 77, 768]> input_57_cast_fp16 = reshape(shape = var_425, x = attn_output_23_cast_fp16)[name = tensor<string, []>("input_57_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(93267712))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(93710144))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(93710336)))];
            tensor<fp16, [1, 77, 768]> linear_21_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16_palettized, x = input_57_cast_fp16)[name = tensor<string, []>("linear_21_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_59_cast_fp16 = add(x = input_51_cast_fp16, y = linear_21_cast_fp16)[name = tensor<string, []>("input_59_cast_fp16")];
            tensor<int32, [1]> input_61_axes_0 = const()[name = tensor<string, []>("input_61_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(93711936)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(93713536)))];
            tensor<fp16, [1, 77, 768]> input_61_cast_fp16 = layer_norm(axes = input_61_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16, x = input_59_cast_fp16)[name = tensor<string, []>("input_61_cast_fp16")];
            tensor<fp16, [3072, 768]> text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(93715136))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(95484672))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([3072, 768])];
            tensor<fp16, [3072]> text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16"), val = tensor<fp16, [3072]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(95484864)))];
            tensor<fp16, [1, 77, 3072]> linear_22_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16_palettized, x = input_61_cast_fp16)[name = tensor<string, []>("linear_22_cast_fp16")];
            tensor<fp16, []> var_440_to_fp16 = const()[name = tensor<string, []>("op_440_to_fp16"), val = tensor<fp16, []>(0x1.b3cp+0)];
            tensor<fp16, [1, 77, 3072]> var_441_cast_fp16 = mul(x = linear_22_cast_fp16, y = var_440_to_fp16)[name = tensor<string, []>("op_441_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> var_442_cast_fp16 = sigmoid(x = var_441_cast_fp16)[name = tensor<string, []>("op_442_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> input_65_cast_fp16 = mul(x = linear_22_cast_fp16, y = var_442_cast_fp16)[name = tensor<string, []>("input_65_cast_fp16")];
            tensor<fp16, [768, 3072]> text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(95491072))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97260608))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 3072])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97260800)))];
            tensor<fp16, [1, 77, 768]> linear_23_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16_palettized, x = input_65_cast_fp16)[name = tensor<string, []>("linear_23_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_67_cast_fp16 = add(x = input_59_cast_fp16, y = linear_23_cast_fp16)[name = tensor<string, []>("input_67_cast_fp16")];
            tensor<int32, [1]> hidden_states_25_axes_0 = const()[name = tensor<string, []>("hidden_states_25_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97262400)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97264000)))];
            tensor<fp16, [1, 77, 768]> hidden_states_25_cast_fp16 = layer_norm(axes = hidden_states_25_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16, x = input_67_cast_fp16)[name = tensor<string, []>("hidden_states_25_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97265600))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97708032))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97708224)))];
            tensor<fp16, [1, 77, 768]> linear_24_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_25_cast_fp16)[name = tensor<string, []>("linear_24_cast_fp16")];
            tensor<fp16, []> var_467_to_fp16 = const()[name = tensor<string, []>("op_467_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
            tensor<fp16, [1, 77, 768]> tensor_29_cast_fp16 = mul(x = linear_24_cast_fp16, y = var_467_to_fp16)[name = tensor<string, []>("tensor_29_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(97709824))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98152256))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98152448)))];
            tensor<fp16, [1, 77, 768]> linear_25_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_25_cast_fp16)[name = tensor<string, []>("linear_25_cast_fp16")];
            tensor<int32, [4]> var_472 = const()[name = tensor<string, []>("op_472"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_473_cast_fp16 = reshape(shape = var_472, x = linear_25_cast_fp16)[name = tensor<string, []>("op_473_cast_fp16")];
            tensor<int32, [4]> var_474_perm_0 = const()[name = tensor<string, []>("op_474_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98154048))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98596480))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98596672)))];
            tensor<fp16, [1, 77, 768]> linear_26_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_25_cast_fp16)[name = tensor<string, []>("linear_26_cast_fp16")];
            tensor<int32, [4]> var_479 = const()[name = tensor<string, []>("op_479"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_480_cast_fp16 = reshape(shape = var_479, x = linear_26_cast_fp16)[name = tensor<string, []>("op_480_cast_fp16")];
            tensor<int32, [4]> var_481_perm_0 = const()[name = tensor<string, []>("op_481_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [4]> var_488 = const()[name = tensor<string, []>("op_488"), val = tensor<int32, [4]>([1, 77, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_489_cast_fp16 = reshape(shape = var_488, x = tensor_29_cast_fp16)[name = tensor<string, []>("op_489_cast_fp16")];
            tensor<int32, [4]> var_490_perm_0 = const()[name = tensor<string, []>("op_490_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_492 = const()[name = tensor<string, []>("op_492"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_490_cast_fp16 = transpose(perm = var_490_perm_0, x = var_489_cast_fp16)[name = tensor<string, []>("transpose_31")];
            tensor<fp16, [12, 77, 64]> query_states_9_cast_fp16 = reshape(shape = var_492, x = var_490_cast_fp16)[name = tensor<string, []>("query_states_9_cast_fp16")];
            tensor<int32, [3]> var_494 = const()[name = tensor<string, []>("op_494"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_474_cast_fp16 = transpose(perm = var_474_perm_0, x = var_473_cast_fp16)[name = tensor<string, []>("transpose_30")];
            tensor<fp16, [12, 77, 64]> key_states_19_cast_fp16 = reshape(shape = var_494, x = var_474_cast_fp16)[name = tensor<string, []>("key_states_19_cast_fp16")];
            tensor<int32, [3]> var_496 = const()[name = tensor<string, []>("op_496"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_481_cast_fp16 = transpose(perm = var_481_perm_0, x = var_480_cast_fp16)[name = tensor<string, []>("transpose_29")];
            tensor<fp16, [12, 77, 64]> value_states_19_cast_fp16 = reshape(shape = var_496, x = var_481_cast_fp16)[name = tensor<string, []>("value_states_19_cast_fp16")];
            tensor<bool, []> attn_weights_25_transpose_x_1 = const()[name = tensor<string, []>("attn_weights_25_transpose_x_1"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_weights_25_transpose_y_1 = const()[name = tensor<string, []>("attn_weights_25_transpose_y_1"), val = tensor<bool, []>(true)];
            tensor<fp16, [12, 77, 77]> attn_weights_25_cast_fp16 = matmul(transpose_x = attn_weights_25_transpose_x_1, transpose_y = attn_weights_25_transpose_y_1, x = query_states_9_cast_fp16, y = key_states_19_cast_fp16)[name = tensor<string, []>("attn_weights_25_cast_fp16")];
            tensor<int32, [4]> var_501 = const()[name = tensor<string, []>("op_501"), val = tensor<int32, [4]>([1, 12, 77, 77])];
            tensor<fp16, [1, 12, 77, 77]> var_502_cast_fp16 = reshape(shape = var_501, x = attn_weights_25_cast_fp16)[name = tensor<string, []>("op_502_cast_fp16")];
            tensor<fp16, [1, 12, 77, 77]> attn_weights_27_cast_fp16 = add(x = var_502_cast_fp16, y = op_56_to_fp16_palettized)[name = tensor<string, []>("attn_weights_27_cast_fp16")];
            tensor<int32, [3]> var_507 = const()[name = tensor<string, []>("op_507"), val = tensor<int32, [3]>([12, 77, 77])];
            tensor<fp16, [12, 77, 77]> input_69_cast_fp16 = reshape(shape = var_507, x = attn_weights_27_cast_fp16)[name = tensor<string, []>("input_69_cast_fp16")];
            tensor<fp16, [12, 77, 77]> input_71_cast_fp16 = softmax(axis = var_5, x = input_69_cast_fp16)[name = tensor<string, []>("input_71_cast_fp16")];
            tensor<bool, []> attn_output_25_transpose_x_0 = const()[name = tensor<string, []>("attn_output_25_transpose_x_0"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_output_25_transpose_y_0 = const()[name = tensor<string, []>("attn_output_25_transpose_y_0"), val = tensor<bool, []>(false)];
            tensor<fp16, [12, 77, 64]> attn_output_25_cast_fp16 = matmul(transpose_x = attn_output_25_transpose_x_0, transpose_y = attn_output_25_transpose_y_0, x = input_71_cast_fp16, y = value_states_19_cast_fp16)[name = tensor<string, []>("attn_output_25_cast_fp16")];
            tensor<int32, [4]> var_512 = const()[name = tensor<string, []>("op_512"), val = tensor<int32, [4]>([1, 12, 77, 64])];
            tensor<fp16, [1, 12, 77, 64]> attn_output_27_cast_fp16 = reshape(shape = var_512, x = attn_output_25_cast_fp16)[name = tensor<string, []>("attn_output_27_cast_fp16")];
            tensor<int32, [4]> attn_output_29_perm_0 = const()[name = tensor<string, []>("attn_output_29_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_515 = const()[name = tensor<string, []>("op_515"), val = tensor<int32, [3]>([1, 77, 768])];
            tensor<fp16, [1, 77, 12, 64]> attn_output_29_cast_fp16 = transpose(perm = attn_output_29_perm_0, x = attn_output_27_cast_fp16)[name = tensor<string, []>("transpose_28")];
            tensor<fp16, [1, 77, 768]> input_73_cast_fp16 = reshape(shape = var_515, x = attn_output_29_cast_fp16)[name = tensor<string, []>("input_73_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98598272))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(99040704))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(99040896)))];
            tensor<fp16, [1, 77, 768]> linear_27_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16_palettized, x = input_73_cast_fp16)[name = tensor<string, []>("linear_27_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_75_cast_fp16 = add(x = input_67_cast_fp16, y = linear_27_cast_fp16)[name = tensor<string, []>("input_75_cast_fp16")];
            tensor<int32, [1]> input_77_axes_0 = const()[name = tensor<string, []>("input_77_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(99042496)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(99044096)))];
            tensor<fp16, [1, 77, 768]> input_77_cast_fp16 = layer_norm(axes = input_77_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16, x = input_75_cast_fp16)[name = tensor<string, []>("input_77_cast_fp16")];
            tensor<fp16, [3072, 768]> text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(99045696))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(100815232))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([3072, 768])];
            tensor<fp16, [3072]> text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16"), val = tensor<fp16, [3072]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(100815424)))];
            tensor<fp16, [1, 77, 3072]> linear_28_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16_palettized, x = input_77_cast_fp16)[name = tensor<string, []>("linear_28_cast_fp16")];
            tensor<fp16, []> var_530_to_fp16 = const()[name = tensor<string, []>("op_530_to_fp16"), val = tensor<fp16, []>(0x1.b3cp+0)];
            tensor<fp16, [1, 77, 3072]> var_531_cast_fp16 = mul(x = linear_28_cast_fp16, y = var_530_to_fp16)[name = tensor<string, []>("op_531_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> var_532_cast_fp16 = sigmoid(x = var_531_cast_fp16)[name = tensor<string, []>("op_532_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> input_81_cast_fp16 = mul(x = linear_28_cast_fp16, y = var_532_cast_fp16)[name = tensor<string, []>("input_81_cast_fp16")];
            tensor<fp16, [768, 3072]> text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(100821632))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(102591168))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 3072])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(102591360)))];
            tensor<fp16, [1, 77, 768]> linear_29_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16_palettized, x = input_81_cast_fp16)[name = tensor<string, []>("linear_29_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_83_cast_fp16 = add(x = input_75_cast_fp16, y = linear_29_cast_fp16)[name = tensor<string, []>("input_83_cast_fp16")];
            tensor<int32, [1]> hidden_states_31_axes_0 = const()[name = tensor<string, []>("hidden_states_31_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(102592960)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(102594560)))];
            tensor<fp16, [1, 77, 768]> hidden_states_31_cast_fp16 = layer_norm(axes = hidden_states_31_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16, x = input_83_cast_fp16)[name = tensor<string, []>("hidden_states_31_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(102596160))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(103038592))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(103038784)))];
            tensor<fp16, [1, 77, 768]> linear_30_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_31_cast_fp16)[name = tensor<string, []>("linear_30_cast_fp16")];
            tensor<fp16, []> var_557_to_fp16 = const()[name = tensor<string, []>("op_557_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
            tensor<fp16, [1, 77, 768]> tensor_35_cast_fp16 = mul(x = linear_30_cast_fp16, y = var_557_to_fp16)[name = tensor<string, []>("tensor_35_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(103040384))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(103482816))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(103483008)))];
            tensor<fp16, [1, 77, 768]> linear_31_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_31_cast_fp16)[name = tensor<string, []>("linear_31_cast_fp16")];
            tensor<int32, [4]> var_562 = const()[name = tensor<string, []>("op_562"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_563_cast_fp16 = reshape(shape = var_562, x = linear_31_cast_fp16)[name = tensor<string, []>("op_563_cast_fp16")];
            tensor<int32, [4]> var_564_perm_0 = const()[name = tensor<string, []>("op_564_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(103484608))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(103927040))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(103927232)))];
            tensor<fp16, [1, 77, 768]> linear_32_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_31_cast_fp16)[name = tensor<string, []>("linear_32_cast_fp16")];
            tensor<int32, [4]> var_569 = const()[name = tensor<string, []>("op_569"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_570_cast_fp16 = reshape(shape = var_569, x = linear_32_cast_fp16)[name = tensor<string, []>("op_570_cast_fp16")];
            tensor<int32, [4]> var_571_perm_0 = const()[name = tensor<string, []>("op_571_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [4]> var_578 = const()[name = tensor<string, []>("op_578"), val = tensor<int32, [4]>([1, 77, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_579_cast_fp16 = reshape(shape = var_578, x = tensor_35_cast_fp16)[name = tensor<string, []>("op_579_cast_fp16")];
            tensor<int32, [4]> var_580_perm_0 = const()[name = tensor<string, []>("op_580_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_582 = const()[name = tensor<string, []>("op_582"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_580_cast_fp16 = transpose(perm = var_580_perm_0, x = var_579_cast_fp16)[name = tensor<string, []>("transpose_27")];
            tensor<fp16, [12, 77, 64]> query_states_11_cast_fp16 = reshape(shape = var_582, x = var_580_cast_fp16)[name = tensor<string, []>("query_states_11_cast_fp16")];
            tensor<int32, [3]> var_584 = const()[name = tensor<string, []>("op_584"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_564_cast_fp16 = transpose(perm = var_564_perm_0, x = var_563_cast_fp16)[name = tensor<string, []>("transpose_26")];
            tensor<fp16, [12, 77, 64]> key_states_23_cast_fp16 = reshape(shape = var_584, x = var_564_cast_fp16)[name = tensor<string, []>("key_states_23_cast_fp16")];
            tensor<int32, [3]> var_586 = const()[name = tensor<string, []>("op_586"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_571_cast_fp16 = transpose(perm = var_571_perm_0, x = var_570_cast_fp16)[name = tensor<string, []>("transpose_25")];
            tensor<fp16, [12, 77, 64]> value_states_23_cast_fp16 = reshape(shape = var_586, x = var_571_cast_fp16)[name = tensor<string, []>("value_states_23_cast_fp16")];
            tensor<bool, []> attn_weights_31_transpose_x_1 = const()[name = tensor<string, []>("attn_weights_31_transpose_x_1"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_weights_31_transpose_y_1 = const()[name = tensor<string, []>("attn_weights_31_transpose_y_1"), val = tensor<bool, []>(true)];
            tensor<fp16, [12, 77, 77]> attn_weights_31_cast_fp16 = matmul(transpose_x = attn_weights_31_transpose_x_1, transpose_y = attn_weights_31_transpose_y_1, x = query_states_11_cast_fp16, y = key_states_23_cast_fp16)[name = tensor<string, []>("attn_weights_31_cast_fp16")];
            tensor<int32, [4]> var_591 = const()[name = tensor<string, []>("op_591"), val = tensor<int32, [4]>([1, 12, 77, 77])];
            tensor<fp16, [1, 12, 77, 77]> var_592_cast_fp16 = reshape(shape = var_591, x = attn_weights_31_cast_fp16)[name = tensor<string, []>("op_592_cast_fp16")];
            tensor<fp16, [1, 12, 77, 77]> attn_weights_33_cast_fp16 = add(x = var_592_cast_fp16, y = op_56_to_fp16_palettized)[name = tensor<string, []>("attn_weights_33_cast_fp16")];
            tensor<int32, [3]> var_597 = const()[name = tensor<string, []>("op_597"), val = tensor<int32, [3]>([12, 77, 77])];
            tensor<fp16, [12, 77, 77]> input_85_cast_fp16 = reshape(shape = var_597, x = attn_weights_33_cast_fp16)[name = tensor<string, []>("input_85_cast_fp16")];
            tensor<fp16, [12, 77, 77]> input_87_cast_fp16 = softmax(axis = var_5, x = input_85_cast_fp16)[name = tensor<string, []>("input_87_cast_fp16")];
            tensor<bool, []> attn_output_31_transpose_x_0 = const()[name = tensor<string, []>("attn_output_31_transpose_x_0"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_output_31_transpose_y_0 = const()[name = tensor<string, []>("attn_output_31_transpose_y_0"), val = tensor<bool, []>(false)];
            tensor<fp16, [12, 77, 64]> attn_output_31_cast_fp16 = matmul(transpose_x = attn_output_31_transpose_x_0, transpose_y = attn_output_31_transpose_y_0, x = input_87_cast_fp16, y = value_states_23_cast_fp16)[name = tensor<string, []>("attn_output_31_cast_fp16")];
            tensor<int32, [4]> var_602 = const()[name = tensor<string, []>("op_602"), val = tensor<int32, [4]>([1, 12, 77, 64])];
            tensor<fp16, [1, 12, 77, 64]> attn_output_33_cast_fp16 = reshape(shape = var_602, x = attn_output_31_cast_fp16)[name = tensor<string, []>("attn_output_33_cast_fp16")];
            tensor<int32, [4]> attn_output_35_perm_0 = const()[name = tensor<string, []>("attn_output_35_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_605 = const()[name = tensor<string, []>("op_605"), val = tensor<int32, [3]>([1, 77, 768])];
            tensor<fp16, [1, 77, 12, 64]> attn_output_35_cast_fp16 = transpose(perm = attn_output_35_perm_0, x = attn_output_33_cast_fp16)[name = tensor<string, []>("transpose_24")];
            tensor<fp16, [1, 77, 768]> input_89_cast_fp16 = reshape(shape = var_605, x = attn_output_35_cast_fp16)[name = tensor<string, []>("input_89_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(103928832))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(104371264))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(104371456)))];
            tensor<fp16, [1, 77, 768]> linear_33_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16_palettized, x = input_89_cast_fp16)[name = tensor<string, []>("linear_33_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_91_cast_fp16 = add(x = input_83_cast_fp16, y = linear_33_cast_fp16)[name = tensor<string, []>("input_91_cast_fp16")];
            tensor<int32, [1]> input_93_axes_0 = const()[name = tensor<string, []>("input_93_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(104373056)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(104374656)))];
            tensor<fp16, [1, 77, 768]> input_93_cast_fp16 = layer_norm(axes = input_93_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16, x = input_91_cast_fp16)[name = tensor<string, []>("input_93_cast_fp16")];
            tensor<fp16, [3072, 768]> text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(104376256))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(106145792))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([3072, 768])];
            tensor<fp16, [3072]> text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16"), val = tensor<fp16, [3072]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(106145984)))];
            tensor<fp16, [1, 77, 3072]> linear_34_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16_palettized, x = input_93_cast_fp16)[name = tensor<string, []>("linear_34_cast_fp16")];
            tensor<fp16, []> var_620_to_fp16 = const()[name = tensor<string, []>("op_620_to_fp16"), val = tensor<fp16, []>(0x1.b3cp+0)];
            tensor<fp16, [1, 77, 3072]> var_621_cast_fp16 = mul(x = linear_34_cast_fp16, y = var_620_to_fp16)[name = tensor<string, []>("op_621_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> var_622_cast_fp16 = sigmoid(x = var_621_cast_fp16)[name = tensor<string, []>("op_622_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> input_97_cast_fp16 = mul(x = linear_34_cast_fp16, y = var_622_cast_fp16)[name = tensor<string, []>("input_97_cast_fp16")];
            tensor<fp16, [768, 3072]> text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(106152192))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(107921728))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 3072])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(107921920)))];
            tensor<fp16, [1, 77, 768]> linear_35_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16_palettized, x = input_97_cast_fp16)[name = tensor<string, []>("linear_35_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_99_cast_fp16 = add(x = input_91_cast_fp16, y = linear_35_cast_fp16)[name = tensor<string, []>("input_99_cast_fp16")];
            tensor<int32, [1]> hidden_states_37_axes_0 = const()[name = tensor<string, []>("hidden_states_37_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(107923520)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(107925120)))];
            tensor<fp16, [1, 77, 768]> hidden_states_37_cast_fp16 = layer_norm(axes = hidden_states_37_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16, x = input_99_cast_fp16)[name = tensor<string, []>("hidden_states_37_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(107926720))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(108369152))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(108369344)))];
            tensor<fp16, [1, 77, 768]> linear_36_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_37_cast_fp16)[name = tensor<string, []>("linear_36_cast_fp16")];
            tensor<fp16, []> var_647_to_fp16 = const()[name = tensor<string, []>("op_647_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
            tensor<fp16, [1, 77, 768]> tensor_41_cast_fp16 = mul(x = linear_36_cast_fp16, y = var_647_to_fp16)[name = tensor<string, []>("tensor_41_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(108370944))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(108813376))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(108813568)))];
            tensor<fp16, [1, 77, 768]> linear_37_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_37_cast_fp16)[name = tensor<string, []>("linear_37_cast_fp16")];
            tensor<int32, [4]> var_652 = const()[name = tensor<string, []>("op_652"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_653_cast_fp16 = reshape(shape = var_652, x = linear_37_cast_fp16)[name = tensor<string, []>("op_653_cast_fp16")];
            tensor<int32, [4]> var_654_perm_0 = const()[name = tensor<string, []>("op_654_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(108815168))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109257600))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109257792)))];
            tensor<fp16, [1, 77, 768]> linear_38_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_37_cast_fp16)[name = tensor<string, []>("linear_38_cast_fp16")];
            tensor<int32, [4]> var_659 = const()[name = tensor<string, []>("op_659"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_660_cast_fp16 = reshape(shape = var_659, x = linear_38_cast_fp16)[name = tensor<string, []>("op_660_cast_fp16")];
            tensor<int32, [4]> var_661_perm_0 = const()[name = tensor<string, []>("op_661_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [4]> var_668 = const()[name = tensor<string, []>("op_668"), val = tensor<int32, [4]>([1, 77, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_669_cast_fp16 = reshape(shape = var_668, x = tensor_41_cast_fp16)[name = tensor<string, []>("op_669_cast_fp16")];
            tensor<int32, [4]> var_670_perm_0 = const()[name = tensor<string, []>("op_670_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_672 = const()[name = tensor<string, []>("op_672"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_670_cast_fp16 = transpose(perm = var_670_perm_0, x = var_669_cast_fp16)[name = tensor<string, []>("transpose_23")];
            tensor<fp16, [12, 77, 64]> query_states_13_cast_fp16 = reshape(shape = var_672, x = var_670_cast_fp16)[name = tensor<string, []>("query_states_13_cast_fp16")];
            tensor<int32, [3]> var_674 = const()[name = tensor<string, []>("op_674"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_654_cast_fp16 = transpose(perm = var_654_perm_0, x = var_653_cast_fp16)[name = tensor<string, []>("transpose_22")];
            tensor<fp16, [12, 77, 64]> key_states_27_cast_fp16 = reshape(shape = var_674, x = var_654_cast_fp16)[name = tensor<string, []>("key_states_27_cast_fp16")];
            tensor<int32, [3]> var_676 = const()[name = tensor<string, []>("op_676"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_661_cast_fp16 = transpose(perm = var_661_perm_0, x = var_660_cast_fp16)[name = tensor<string, []>("transpose_21")];
            tensor<fp16, [12, 77, 64]> value_states_27_cast_fp16 = reshape(shape = var_676, x = var_661_cast_fp16)[name = tensor<string, []>("value_states_27_cast_fp16")];
            tensor<bool, []> attn_weights_37_transpose_x_1 = const()[name = tensor<string, []>("attn_weights_37_transpose_x_1"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_weights_37_transpose_y_1 = const()[name = tensor<string, []>("attn_weights_37_transpose_y_1"), val = tensor<bool, []>(true)];
            tensor<fp16, [12, 77, 77]> attn_weights_37_cast_fp16 = matmul(transpose_x = attn_weights_37_transpose_x_1, transpose_y = attn_weights_37_transpose_y_1, x = query_states_13_cast_fp16, y = key_states_27_cast_fp16)[name = tensor<string, []>("attn_weights_37_cast_fp16")];
            tensor<int32, [4]> var_681 = const()[name = tensor<string, []>("op_681"), val = tensor<int32, [4]>([1, 12, 77, 77])];
            tensor<fp16, [1, 12, 77, 77]> var_682_cast_fp16 = reshape(shape = var_681, x = attn_weights_37_cast_fp16)[name = tensor<string, []>("op_682_cast_fp16")];
            tensor<fp16, [1, 12, 77, 77]> attn_weights_39_cast_fp16 = add(x = var_682_cast_fp16, y = op_56_to_fp16_palettized)[name = tensor<string, []>("attn_weights_39_cast_fp16")];
            tensor<int32, [3]> var_687 = const()[name = tensor<string, []>("op_687"), val = tensor<int32, [3]>([12, 77, 77])];
            tensor<fp16, [12, 77, 77]> input_101_cast_fp16 = reshape(shape = var_687, x = attn_weights_39_cast_fp16)[name = tensor<string, []>("input_101_cast_fp16")];
            tensor<fp16, [12, 77, 77]> input_103_cast_fp16 = softmax(axis = var_5, x = input_101_cast_fp16)[name = tensor<string, []>("input_103_cast_fp16")];
            tensor<bool, []> attn_output_37_transpose_x_0 = const()[name = tensor<string, []>("attn_output_37_transpose_x_0"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_output_37_transpose_y_0 = const()[name = tensor<string, []>("attn_output_37_transpose_y_0"), val = tensor<bool, []>(false)];
            tensor<fp16, [12, 77, 64]> attn_output_37_cast_fp16 = matmul(transpose_x = attn_output_37_transpose_x_0, transpose_y = attn_output_37_transpose_y_0, x = input_103_cast_fp16, y = value_states_27_cast_fp16)[name = tensor<string, []>("attn_output_37_cast_fp16")];
            tensor<int32, [4]> var_692 = const()[name = tensor<string, []>("op_692"), val = tensor<int32, [4]>([1, 12, 77, 64])];
            tensor<fp16, [1, 12, 77, 64]> attn_output_39_cast_fp16 = reshape(shape = var_692, x = attn_output_37_cast_fp16)[name = tensor<string, []>("attn_output_39_cast_fp16")];
            tensor<int32, [4]> attn_output_41_perm_0 = const()[name = tensor<string, []>("attn_output_41_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_695 = const()[name = tensor<string, []>("op_695"), val = tensor<int32, [3]>([1, 77, 768])];
            tensor<fp16, [1, 77, 12, 64]> attn_output_41_cast_fp16 = transpose(perm = attn_output_41_perm_0, x = attn_output_39_cast_fp16)[name = tensor<string, []>("transpose_20")];
            tensor<fp16, [1, 77, 768]> input_105_cast_fp16 = reshape(shape = var_695, x = attn_output_41_cast_fp16)[name = tensor<string, []>("input_105_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109259392))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109701824))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109702016)))];
            tensor<fp16, [1, 77, 768]> linear_39_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16_palettized, x = input_105_cast_fp16)[name = tensor<string, []>("linear_39_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_107_cast_fp16 = add(x = input_99_cast_fp16, y = linear_39_cast_fp16)[name = tensor<string, []>("input_107_cast_fp16")];
            tensor<int32, [1]> input_109_axes_0 = const()[name = tensor<string, []>("input_109_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109703616)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109705216)))];
            tensor<fp16, [1, 77, 768]> input_109_cast_fp16 = layer_norm(axes = input_109_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16, x = input_107_cast_fp16)[name = tensor<string, []>("input_109_cast_fp16")];
            tensor<fp16, [3072, 768]> text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109706816))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(111476352))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([3072, 768])];
            tensor<fp16, [3072]> text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16"), val = tensor<fp16, [3072]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(111476544)))];
            tensor<fp16, [1, 77, 3072]> linear_40_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16_palettized, x = input_109_cast_fp16)[name = tensor<string, []>("linear_40_cast_fp16")];
            tensor<fp16, []> var_710_to_fp16 = const()[name = tensor<string, []>("op_710_to_fp16"), val = tensor<fp16, []>(0x1.b3cp+0)];
            tensor<fp16, [1, 77, 3072]> var_711_cast_fp16 = mul(x = linear_40_cast_fp16, y = var_710_to_fp16)[name = tensor<string, []>("op_711_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> var_712_cast_fp16 = sigmoid(x = var_711_cast_fp16)[name = tensor<string, []>("op_712_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> input_113_cast_fp16 = mul(x = linear_40_cast_fp16, y = var_712_cast_fp16)[name = tensor<string, []>("input_113_cast_fp16")];
            tensor<fp16, [768, 3072]> text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(111482752))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113252288))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 3072])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113252480)))];
            tensor<fp16, [1, 77, 768]> linear_41_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16_palettized, x = input_113_cast_fp16)[name = tensor<string, []>("linear_41_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_115_cast_fp16 = add(x = input_107_cast_fp16, y = linear_41_cast_fp16)[name = tensor<string, []>("input_115_cast_fp16")];
            tensor<int32, [1]> hidden_states_43_axes_0 = const()[name = tensor<string, []>("hidden_states_43_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113254080)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113255680)))];
            tensor<fp16, [1, 77, 768]> hidden_states_43_cast_fp16 = layer_norm(axes = hidden_states_43_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16, x = input_115_cast_fp16)[name = tensor<string, []>("hidden_states_43_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113257280))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113699712))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113699904)))];
            tensor<fp16, [1, 77, 768]> linear_42_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_43_cast_fp16)[name = tensor<string, []>("linear_42_cast_fp16")];
            tensor<fp16, []> var_737_to_fp16 = const()[name = tensor<string, []>("op_737_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
            tensor<fp16, [1, 77, 768]> tensor_47_cast_fp16 = mul(x = linear_42_cast_fp16, y = var_737_to_fp16)[name = tensor<string, []>("tensor_47_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(113701504))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(114143936))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(114144128)))];
            tensor<fp16, [1, 77, 768]> linear_43_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_43_cast_fp16)[name = tensor<string, []>("linear_43_cast_fp16")];
            tensor<int32, [4]> var_742 = const()[name = tensor<string, []>("op_742"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_743_cast_fp16 = reshape(shape = var_742, x = linear_43_cast_fp16)[name = tensor<string, []>("op_743_cast_fp16")];
            tensor<int32, [4]> var_744_perm_0 = const()[name = tensor<string, []>("op_744_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(114145728))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(114588160))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(114588352)))];
            tensor<fp16, [1, 77, 768]> linear_44_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_43_cast_fp16)[name = tensor<string, []>("linear_44_cast_fp16")];
            tensor<int32, [4]> var_749 = const()[name = tensor<string, []>("op_749"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_750_cast_fp16 = reshape(shape = var_749, x = linear_44_cast_fp16)[name = tensor<string, []>("op_750_cast_fp16")];
            tensor<int32, [4]> var_751_perm_0 = const()[name = tensor<string, []>("op_751_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [4]> var_758 = const()[name = tensor<string, []>("op_758"), val = tensor<int32, [4]>([1, 77, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_759_cast_fp16 = reshape(shape = var_758, x = tensor_47_cast_fp16)[name = tensor<string, []>("op_759_cast_fp16")];
            tensor<int32, [4]> var_760_perm_0 = const()[name = tensor<string, []>("op_760_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_762 = const()[name = tensor<string, []>("op_762"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_760_cast_fp16 = transpose(perm = var_760_perm_0, x = var_759_cast_fp16)[name = tensor<string, []>("transpose_19")];
            tensor<fp16, [12, 77, 64]> query_states_15_cast_fp16 = reshape(shape = var_762, x = var_760_cast_fp16)[name = tensor<string, []>("query_states_15_cast_fp16")];
            tensor<int32, [3]> var_764 = const()[name = tensor<string, []>("op_764"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_744_cast_fp16 = transpose(perm = var_744_perm_0, x = var_743_cast_fp16)[name = tensor<string, []>("transpose_18")];
            tensor<fp16, [12, 77, 64]> key_states_31_cast_fp16 = reshape(shape = var_764, x = var_744_cast_fp16)[name = tensor<string, []>("key_states_31_cast_fp16")];
            tensor<int32, [3]> var_766 = const()[name = tensor<string, []>("op_766"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_751_cast_fp16 = transpose(perm = var_751_perm_0, x = var_750_cast_fp16)[name = tensor<string, []>("transpose_17")];
            tensor<fp16, [12, 77, 64]> value_states_31_cast_fp16 = reshape(shape = var_766, x = var_751_cast_fp16)[name = tensor<string, []>("value_states_31_cast_fp16")];
            tensor<bool, []> attn_weights_43_transpose_x_1 = const()[name = tensor<string, []>("attn_weights_43_transpose_x_1"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_weights_43_transpose_y_1 = const()[name = tensor<string, []>("attn_weights_43_transpose_y_1"), val = tensor<bool, []>(true)];
            tensor<fp16, [12, 77, 77]> attn_weights_43_cast_fp16 = matmul(transpose_x = attn_weights_43_transpose_x_1, transpose_y = attn_weights_43_transpose_y_1, x = query_states_15_cast_fp16, y = key_states_31_cast_fp16)[name = tensor<string, []>("attn_weights_43_cast_fp16")];
            tensor<int32, [4]> var_771 = const()[name = tensor<string, []>("op_771"), val = tensor<int32, [4]>([1, 12, 77, 77])];
            tensor<fp16, [1, 12, 77, 77]> var_772_cast_fp16 = reshape(shape = var_771, x = attn_weights_43_cast_fp16)[name = tensor<string, []>("op_772_cast_fp16")];
            tensor<fp16, [1, 12, 77, 77]> attn_weights_45_cast_fp16 = add(x = var_772_cast_fp16, y = op_56_to_fp16_palettized)[name = tensor<string, []>("attn_weights_45_cast_fp16")];
            tensor<int32, [3]> var_777 = const()[name = tensor<string, []>("op_777"), val = tensor<int32, [3]>([12, 77, 77])];
            tensor<fp16, [12, 77, 77]> input_117_cast_fp16 = reshape(shape = var_777, x = attn_weights_45_cast_fp16)[name = tensor<string, []>("input_117_cast_fp16")];
            tensor<fp16, [12, 77, 77]> input_119_cast_fp16 = softmax(axis = var_5, x = input_117_cast_fp16)[name = tensor<string, []>("input_119_cast_fp16")];
            tensor<bool, []> attn_output_43_transpose_x_0 = const()[name = tensor<string, []>("attn_output_43_transpose_x_0"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_output_43_transpose_y_0 = const()[name = tensor<string, []>("attn_output_43_transpose_y_0"), val = tensor<bool, []>(false)];
            tensor<fp16, [12, 77, 64]> attn_output_43_cast_fp16 = matmul(transpose_x = attn_output_43_transpose_x_0, transpose_y = attn_output_43_transpose_y_0, x = input_119_cast_fp16, y = value_states_31_cast_fp16)[name = tensor<string, []>("attn_output_43_cast_fp16")];
            tensor<int32, [4]> var_782 = const()[name = tensor<string, []>("op_782"), val = tensor<int32, [4]>([1, 12, 77, 64])];
            tensor<fp16, [1, 12, 77, 64]> attn_output_45_cast_fp16 = reshape(shape = var_782, x = attn_output_43_cast_fp16)[name = tensor<string, []>("attn_output_45_cast_fp16")];
            tensor<int32, [4]> attn_output_47_perm_0 = const()[name = tensor<string, []>("attn_output_47_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_785 = const()[name = tensor<string, []>("op_785"), val = tensor<int32, [3]>([1, 77, 768])];
            tensor<fp16, [1, 77, 12, 64]> attn_output_47_cast_fp16 = transpose(perm = attn_output_47_perm_0, x = attn_output_45_cast_fp16)[name = tensor<string, []>("transpose_16")];
            tensor<fp16, [1, 77, 768]> input_121_cast_fp16 = reshape(shape = var_785, x = attn_output_47_cast_fp16)[name = tensor<string, []>("input_121_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(114589952))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(115032384))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(115032576)))];
            tensor<fp16, [1, 77, 768]> linear_45_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16_palettized, x = input_121_cast_fp16)[name = tensor<string, []>("linear_45_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_123_cast_fp16 = add(x = input_115_cast_fp16, y = linear_45_cast_fp16)[name = tensor<string, []>("input_123_cast_fp16")];
            tensor<int32, [1]> input_125_axes_0 = const()[name = tensor<string, []>("input_125_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(115034176)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(115035776)))];
            tensor<fp16, [1, 77, 768]> input_125_cast_fp16 = layer_norm(axes = input_125_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16, x = input_123_cast_fp16)[name = tensor<string, []>("input_125_cast_fp16")];
            tensor<fp16, [3072, 768]> text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(115037376))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(116806912))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([3072, 768])];
            tensor<fp16, [3072]> text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16"), val = tensor<fp16, [3072]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(116807104)))];
            tensor<fp16, [1, 77, 3072]> linear_46_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16_palettized, x = input_125_cast_fp16)[name = tensor<string, []>("linear_46_cast_fp16")];
            tensor<fp16, []> var_800_to_fp16 = const()[name = tensor<string, []>("op_800_to_fp16"), val = tensor<fp16, []>(0x1.b3cp+0)];
            tensor<fp16, [1, 77, 3072]> var_801_cast_fp16 = mul(x = linear_46_cast_fp16, y = var_800_to_fp16)[name = tensor<string, []>("op_801_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> var_802_cast_fp16 = sigmoid(x = var_801_cast_fp16)[name = tensor<string, []>("op_802_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> input_129_cast_fp16 = mul(x = linear_46_cast_fp16, y = var_802_cast_fp16)[name = tensor<string, []>("input_129_cast_fp16")];
            tensor<fp16, [768, 3072]> text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(116813312))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(118582848))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 3072])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(118583040)))];
            tensor<fp16, [1, 77, 768]> linear_47_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16_palettized, x = input_129_cast_fp16)[name = tensor<string, []>("linear_47_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_131_cast_fp16 = add(x = input_123_cast_fp16, y = linear_47_cast_fp16)[name = tensor<string, []>("input_131_cast_fp16")];
            tensor<int32, [1]> hidden_states_49_axes_0 = const()[name = tensor<string, []>("hidden_states_49_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(118584640)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(118586240)))];
            tensor<fp16, [1, 77, 768]> hidden_states_49_cast_fp16 = layer_norm(axes = hidden_states_49_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16, x = input_131_cast_fp16)[name = tensor<string, []>("hidden_states_49_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(118587840))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119030272))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119030464)))];
            tensor<fp16, [1, 77, 768]> linear_48_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_49_cast_fp16)[name = tensor<string, []>("linear_48_cast_fp16")];
            tensor<fp16, []> var_827_to_fp16 = const()[name = tensor<string, []>("op_827_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
            tensor<fp16, [1, 77, 768]> tensor_53_cast_fp16 = mul(x = linear_48_cast_fp16, y = var_827_to_fp16)[name = tensor<string, []>("tensor_53_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119032064))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119474496))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119474688)))];
            tensor<fp16, [1, 77, 768]> linear_49_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_49_cast_fp16)[name = tensor<string, []>("linear_49_cast_fp16")];
            tensor<int32, [4]> var_832 = const()[name = tensor<string, []>("op_832"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_833_cast_fp16 = reshape(shape = var_832, x = linear_49_cast_fp16)[name = tensor<string, []>("op_833_cast_fp16")];
            tensor<int32, [4]> var_834_perm_0 = const()[name = tensor<string, []>("op_834_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119476288))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119918720))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119918912)))];
            tensor<fp16, [1, 77, 768]> linear_50_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_49_cast_fp16)[name = tensor<string, []>("linear_50_cast_fp16")];
            tensor<int32, [4]> var_839 = const()[name = tensor<string, []>("op_839"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_840_cast_fp16 = reshape(shape = var_839, x = linear_50_cast_fp16)[name = tensor<string, []>("op_840_cast_fp16")];
            tensor<int32, [4]> var_841_perm_0 = const()[name = tensor<string, []>("op_841_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [4]> var_848 = const()[name = tensor<string, []>("op_848"), val = tensor<int32, [4]>([1, 77, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_849_cast_fp16 = reshape(shape = var_848, x = tensor_53_cast_fp16)[name = tensor<string, []>("op_849_cast_fp16")];
            tensor<int32, [4]> var_850_perm_0 = const()[name = tensor<string, []>("op_850_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_852 = const()[name = tensor<string, []>("op_852"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_850_cast_fp16 = transpose(perm = var_850_perm_0, x = var_849_cast_fp16)[name = tensor<string, []>("transpose_15")];
            tensor<fp16, [12, 77, 64]> query_states_17_cast_fp16 = reshape(shape = var_852, x = var_850_cast_fp16)[name = tensor<string, []>("query_states_17_cast_fp16")];
            tensor<int32, [3]> var_854 = const()[name = tensor<string, []>("op_854"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_834_cast_fp16 = transpose(perm = var_834_perm_0, x = var_833_cast_fp16)[name = tensor<string, []>("transpose_14")];
            tensor<fp16, [12, 77, 64]> key_states_35_cast_fp16 = reshape(shape = var_854, x = var_834_cast_fp16)[name = tensor<string, []>("key_states_35_cast_fp16")];
            tensor<int32, [3]> var_856 = const()[name = tensor<string, []>("op_856"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_841_cast_fp16 = transpose(perm = var_841_perm_0, x = var_840_cast_fp16)[name = tensor<string, []>("transpose_13")];
            tensor<fp16, [12, 77, 64]> value_states_35_cast_fp16 = reshape(shape = var_856, x = var_841_cast_fp16)[name = tensor<string, []>("value_states_35_cast_fp16")];
            tensor<bool, []> attn_weights_49_transpose_x_1 = const()[name = tensor<string, []>("attn_weights_49_transpose_x_1"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_weights_49_transpose_y_1 = const()[name = tensor<string, []>("attn_weights_49_transpose_y_1"), val = tensor<bool, []>(true)];
            tensor<fp16, [12, 77, 77]> attn_weights_49_cast_fp16 = matmul(transpose_x = attn_weights_49_transpose_x_1, transpose_y = attn_weights_49_transpose_y_1, x = query_states_17_cast_fp16, y = key_states_35_cast_fp16)[name = tensor<string, []>("attn_weights_49_cast_fp16")];
            tensor<int32, [4]> var_861 = const()[name = tensor<string, []>("op_861"), val = tensor<int32, [4]>([1, 12, 77, 77])];
            tensor<fp16, [1, 12, 77, 77]> var_862_cast_fp16 = reshape(shape = var_861, x = attn_weights_49_cast_fp16)[name = tensor<string, []>("op_862_cast_fp16")];
            tensor<fp16, [1, 12, 77, 77]> attn_weights_51_cast_fp16 = add(x = var_862_cast_fp16, y = op_56_to_fp16_palettized)[name = tensor<string, []>("attn_weights_51_cast_fp16")];
            tensor<int32, [3]> var_867 = const()[name = tensor<string, []>("op_867"), val = tensor<int32, [3]>([12, 77, 77])];
            tensor<fp16, [12, 77, 77]> input_133_cast_fp16 = reshape(shape = var_867, x = attn_weights_51_cast_fp16)[name = tensor<string, []>("input_133_cast_fp16")];
            tensor<fp16, [12, 77, 77]> input_135_cast_fp16 = softmax(axis = var_5, x = input_133_cast_fp16)[name = tensor<string, []>("input_135_cast_fp16")];
            tensor<bool, []> attn_output_49_transpose_x_0 = const()[name = tensor<string, []>("attn_output_49_transpose_x_0"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_output_49_transpose_y_0 = const()[name = tensor<string, []>("attn_output_49_transpose_y_0"), val = tensor<bool, []>(false)];
            tensor<fp16, [12, 77, 64]> attn_output_49_cast_fp16 = matmul(transpose_x = attn_output_49_transpose_x_0, transpose_y = attn_output_49_transpose_y_0, x = input_135_cast_fp16, y = value_states_35_cast_fp16)[name = tensor<string, []>("attn_output_49_cast_fp16")];
            tensor<int32, [4]> var_872 = const()[name = tensor<string, []>("op_872"), val = tensor<int32, [4]>([1, 12, 77, 64])];
            tensor<fp16, [1, 12, 77, 64]> attn_output_51_cast_fp16 = reshape(shape = var_872, x = attn_output_49_cast_fp16)[name = tensor<string, []>("attn_output_51_cast_fp16")];
            tensor<int32, [4]> attn_output_53_perm_0 = const()[name = tensor<string, []>("attn_output_53_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_875 = const()[name = tensor<string, []>("op_875"), val = tensor<int32, [3]>([1, 77, 768])];
            tensor<fp16, [1, 77, 12, 64]> attn_output_53_cast_fp16 = transpose(perm = attn_output_53_perm_0, x = attn_output_51_cast_fp16)[name = tensor<string, []>("transpose_12")];
            tensor<fp16, [1, 77, 768]> input_137_cast_fp16 = reshape(shape = var_875, x = attn_output_53_cast_fp16)[name = tensor<string, []>("input_137_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119920512))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(120362944))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(120363136)))];
            tensor<fp16, [1, 77, 768]> linear_51_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16_palettized, x = input_137_cast_fp16)[name = tensor<string, []>("linear_51_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_139_cast_fp16 = add(x = input_131_cast_fp16, y = linear_51_cast_fp16)[name = tensor<string, []>("input_139_cast_fp16")];
            tensor<int32, [1]> input_141_axes_0 = const()[name = tensor<string, []>("input_141_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(120364736)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(120366336)))];
            tensor<fp16, [1, 77, 768]> input_141_cast_fp16 = layer_norm(axes = input_141_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16, x = input_139_cast_fp16)[name = tensor<string, []>("input_141_cast_fp16")];
            tensor<fp16, [3072, 768]> text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(120367936))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(122137472))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([3072, 768])];
            tensor<fp16, [3072]> text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16"), val = tensor<fp16, [3072]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(122137664)))];
            tensor<fp16, [1, 77, 3072]> linear_52_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16_palettized, x = input_141_cast_fp16)[name = tensor<string, []>("linear_52_cast_fp16")];
            tensor<fp16, []> var_890_to_fp16 = const()[name = tensor<string, []>("op_890_to_fp16"), val = tensor<fp16, []>(0x1.b3cp+0)];
            tensor<fp16, [1, 77, 3072]> var_891_cast_fp16 = mul(x = linear_52_cast_fp16, y = var_890_to_fp16)[name = tensor<string, []>("op_891_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> var_892_cast_fp16 = sigmoid(x = var_891_cast_fp16)[name = tensor<string, []>("op_892_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> input_145_cast_fp16 = mul(x = linear_52_cast_fp16, y = var_892_cast_fp16)[name = tensor<string, []>("input_145_cast_fp16")];
            tensor<fp16, [768, 3072]> text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(122143872))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(123913408))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 3072])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(123913600)))];
            tensor<fp16, [1, 77, 768]> linear_53_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16_palettized, x = input_145_cast_fp16)[name = tensor<string, []>("linear_53_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_147_cast_fp16 = add(x = input_139_cast_fp16, y = linear_53_cast_fp16)[name = tensor<string, []>("input_147_cast_fp16")];
            tensor<int32, [1]> hidden_states_55_axes_0 = const()[name = tensor<string, []>("hidden_states_55_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(123915200)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(123916800)))];
            tensor<fp16, [1, 77, 768]> hidden_states_55_cast_fp16 = layer_norm(axes = hidden_states_55_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16, x = input_147_cast_fp16)[name = tensor<string, []>("hidden_states_55_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(123918400))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(124360832))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(124361024)))];
            tensor<fp16, [1, 77, 768]> linear_54_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_55_cast_fp16)[name = tensor<string, []>("linear_54_cast_fp16")];
            tensor<fp16, []> var_917_to_fp16 = const()[name = tensor<string, []>("op_917_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
            tensor<fp16, [1, 77, 768]> tensor_59_cast_fp16 = mul(x = linear_54_cast_fp16, y = var_917_to_fp16)[name = tensor<string, []>("tensor_59_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(124362624))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(124805056))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(124805248)))];
            tensor<fp16, [1, 77, 768]> linear_55_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_55_cast_fp16)[name = tensor<string, []>("linear_55_cast_fp16")];
            tensor<int32, [4]> var_922 = const()[name = tensor<string, []>("op_922"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_923_cast_fp16 = reshape(shape = var_922, x = linear_55_cast_fp16)[name = tensor<string, []>("op_923_cast_fp16")];
            tensor<int32, [4]> var_924_perm_0 = const()[name = tensor<string, []>("op_924_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(124806848))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(125249280))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(125249472)))];
            tensor<fp16, [1, 77, 768]> linear_56_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_55_cast_fp16)[name = tensor<string, []>("linear_56_cast_fp16")];
            tensor<int32, [4]> var_929 = const()[name = tensor<string, []>("op_929"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_930_cast_fp16 = reshape(shape = var_929, x = linear_56_cast_fp16)[name = tensor<string, []>("op_930_cast_fp16")];
            tensor<int32, [4]> var_931_perm_0 = const()[name = tensor<string, []>("op_931_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [4]> var_938 = const()[name = tensor<string, []>("op_938"), val = tensor<int32, [4]>([1, 77, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_939_cast_fp16 = reshape(shape = var_938, x = tensor_59_cast_fp16)[name = tensor<string, []>("op_939_cast_fp16")];
            tensor<int32, [4]> var_940_perm_0 = const()[name = tensor<string, []>("op_940_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_942 = const()[name = tensor<string, []>("op_942"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_940_cast_fp16 = transpose(perm = var_940_perm_0, x = var_939_cast_fp16)[name = tensor<string, []>("transpose_11")];
            tensor<fp16, [12, 77, 64]> query_states_19_cast_fp16 = reshape(shape = var_942, x = var_940_cast_fp16)[name = tensor<string, []>("query_states_19_cast_fp16")];
            tensor<int32, [3]> var_944 = const()[name = tensor<string, []>("op_944"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_924_cast_fp16 = transpose(perm = var_924_perm_0, x = var_923_cast_fp16)[name = tensor<string, []>("transpose_10")];
            tensor<fp16, [12, 77, 64]> key_states_39_cast_fp16 = reshape(shape = var_944, x = var_924_cast_fp16)[name = tensor<string, []>("key_states_39_cast_fp16")];
            tensor<int32, [3]> var_946 = const()[name = tensor<string, []>("op_946"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_931_cast_fp16 = transpose(perm = var_931_perm_0, x = var_930_cast_fp16)[name = tensor<string, []>("transpose_9")];
            tensor<fp16, [12, 77, 64]> value_states_39_cast_fp16 = reshape(shape = var_946, x = var_931_cast_fp16)[name = tensor<string, []>("value_states_39_cast_fp16")];
            tensor<bool, []> attn_weights_55_transpose_x_1 = const()[name = tensor<string, []>("attn_weights_55_transpose_x_1"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_weights_55_transpose_y_1 = const()[name = tensor<string, []>("attn_weights_55_transpose_y_1"), val = tensor<bool, []>(true)];
            tensor<fp16, [12, 77, 77]> attn_weights_55_cast_fp16 = matmul(transpose_x = attn_weights_55_transpose_x_1, transpose_y = attn_weights_55_transpose_y_1, x = query_states_19_cast_fp16, y = key_states_39_cast_fp16)[name = tensor<string, []>("attn_weights_55_cast_fp16")];
            tensor<int32, [4]> var_951 = const()[name = tensor<string, []>("op_951"), val = tensor<int32, [4]>([1, 12, 77, 77])];
            tensor<fp16, [1, 12, 77, 77]> var_952_cast_fp16 = reshape(shape = var_951, x = attn_weights_55_cast_fp16)[name = tensor<string, []>("op_952_cast_fp16")];
            tensor<fp16, [1, 12, 77, 77]> attn_weights_57_cast_fp16 = add(x = var_952_cast_fp16, y = op_56_to_fp16_palettized)[name = tensor<string, []>("attn_weights_57_cast_fp16")];
            tensor<int32, [3]> var_957 = const()[name = tensor<string, []>("op_957"), val = tensor<int32, [3]>([12, 77, 77])];
            tensor<fp16, [12, 77, 77]> input_149_cast_fp16 = reshape(shape = var_957, x = attn_weights_57_cast_fp16)[name = tensor<string, []>("input_149_cast_fp16")];
            tensor<fp16, [12, 77, 77]> input_151_cast_fp16 = softmax(axis = var_5, x = input_149_cast_fp16)[name = tensor<string, []>("input_151_cast_fp16")];
            tensor<bool, []> attn_output_55_transpose_x_0 = const()[name = tensor<string, []>("attn_output_55_transpose_x_0"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_output_55_transpose_y_0 = const()[name = tensor<string, []>("attn_output_55_transpose_y_0"), val = tensor<bool, []>(false)];
            tensor<fp16, [12, 77, 64]> attn_output_55_cast_fp16 = matmul(transpose_x = attn_output_55_transpose_x_0, transpose_y = attn_output_55_transpose_y_0, x = input_151_cast_fp16, y = value_states_39_cast_fp16)[name = tensor<string, []>("attn_output_55_cast_fp16")];
            tensor<int32, [4]> var_962 = const()[name = tensor<string, []>("op_962"), val = tensor<int32, [4]>([1, 12, 77, 64])];
            tensor<fp16, [1, 12, 77, 64]> attn_output_57_cast_fp16 = reshape(shape = var_962, x = attn_output_55_cast_fp16)[name = tensor<string, []>("attn_output_57_cast_fp16")];
            tensor<int32, [4]> attn_output_59_perm_0 = const()[name = tensor<string, []>("attn_output_59_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_965 = const()[name = tensor<string, []>("op_965"), val = tensor<int32, [3]>([1, 77, 768])];
            tensor<fp16, [1, 77, 12, 64]> attn_output_59_cast_fp16 = transpose(perm = attn_output_59_perm_0, x = attn_output_57_cast_fp16)[name = tensor<string, []>("transpose_8")];
            tensor<fp16, [1, 77, 768]> input_153_cast_fp16 = reshape(shape = var_965, x = attn_output_59_cast_fp16)[name = tensor<string, []>("input_153_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(125251072))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(125693504))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(125693696)))];
            tensor<fp16, [1, 77, 768]> linear_57_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16_palettized, x = input_153_cast_fp16)[name = tensor<string, []>("linear_57_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_155_cast_fp16 = add(x = input_147_cast_fp16, y = linear_57_cast_fp16)[name = tensor<string, []>("input_155_cast_fp16")];
            tensor<int32, [1]> input_157_axes_0 = const()[name = tensor<string, []>("input_157_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(125695296)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(125696896)))];
            tensor<fp16, [1, 77, 768]> input_157_cast_fp16 = layer_norm(axes = input_157_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16, x = input_155_cast_fp16)[name = tensor<string, []>("input_157_cast_fp16")];
            tensor<fp16, [3072, 768]> text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(125698496))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(127468032))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([3072, 768])];
            tensor<fp16, [3072]> text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16"), val = tensor<fp16, [3072]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(127468224)))];
            tensor<fp16, [1, 77, 3072]> linear_58_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16_palettized, x = input_157_cast_fp16)[name = tensor<string, []>("linear_58_cast_fp16")];
            tensor<fp16, []> var_980_to_fp16 = const()[name = tensor<string, []>("op_980_to_fp16"), val = tensor<fp16, []>(0x1.b3cp+0)];
            tensor<fp16, [1, 77, 3072]> var_981_cast_fp16 = mul(x = linear_58_cast_fp16, y = var_980_to_fp16)[name = tensor<string, []>("op_981_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> var_982_cast_fp16 = sigmoid(x = var_981_cast_fp16)[name = tensor<string, []>("op_982_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> input_161_cast_fp16 = mul(x = linear_58_cast_fp16, y = var_982_cast_fp16)[name = tensor<string, []>("input_161_cast_fp16")];
            tensor<fp16, [768, 3072]> text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(127474432))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(129243968))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 3072])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(129244160)))];
            tensor<fp16, [1, 77, 768]> linear_59_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16_palettized, x = input_161_cast_fp16)[name = tensor<string, []>("linear_59_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_163_cast_fp16 = add(x = input_155_cast_fp16, y = linear_59_cast_fp16)[name = tensor<string, []>("input_163_cast_fp16")];
            tensor<int32, [1]> hidden_states_61_axes_0 = const()[name = tensor<string, []>("hidden_states_61_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(129245760)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(129247360)))];
            tensor<fp16, [1, 77, 768]> hidden_states_61_cast_fp16 = layer_norm(axes = hidden_states_61_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16, x = input_163_cast_fp16)[name = tensor<string, []>("hidden_states_61_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(129248960))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(129691392))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(129691584)))];
            tensor<fp16, [1, 77, 768]> linear_60_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_61_cast_fp16)[name = tensor<string, []>("linear_60_cast_fp16")];
            tensor<fp16, []> var_1007_to_fp16 = const()[name = tensor<string, []>("op_1007_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
            tensor<fp16, [1, 77, 768]> tensor_65_cast_fp16 = mul(x = linear_60_cast_fp16, y = var_1007_to_fp16)[name = tensor<string, []>("tensor_65_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(129693184))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(130135616))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(130135808)))];
            tensor<fp16, [1, 77, 768]> linear_61_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_61_cast_fp16)[name = tensor<string, []>("linear_61_cast_fp16")];
            tensor<int32, [4]> var_1012 = const()[name = tensor<string, []>("op_1012"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_1013_cast_fp16 = reshape(shape = var_1012, x = linear_61_cast_fp16)[name = tensor<string, []>("op_1013_cast_fp16")];
            tensor<int32, [4]> var_1014_perm_0 = const()[name = tensor<string, []>("op_1014_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(130137408))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(130579840))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(130580032)))];
            tensor<fp16, [1, 77, 768]> linear_62_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_61_cast_fp16)[name = tensor<string, []>("linear_62_cast_fp16")];
            tensor<int32, [4]> var_1019 = const()[name = tensor<string, []>("op_1019"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_1020_cast_fp16 = reshape(shape = var_1019, x = linear_62_cast_fp16)[name = tensor<string, []>("op_1020_cast_fp16")];
            tensor<int32, [4]> var_1021_perm_0 = const()[name = tensor<string, []>("op_1021_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [4]> var_1028 = const()[name = tensor<string, []>("op_1028"), val = tensor<int32, [4]>([1, 77, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_1029_cast_fp16 = reshape(shape = var_1028, x = tensor_65_cast_fp16)[name = tensor<string, []>("op_1029_cast_fp16")];
            tensor<int32, [4]> var_1030_perm_0 = const()[name = tensor<string, []>("op_1030_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_1032 = const()[name = tensor<string, []>("op_1032"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_1030_cast_fp16 = transpose(perm = var_1030_perm_0, x = var_1029_cast_fp16)[name = tensor<string, []>("transpose_7")];
            tensor<fp16, [12, 77, 64]> query_states_21_cast_fp16 = reshape(shape = var_1032, x = var_1030_cast_fp16)[name = tensor<string, []>("query_states_21_cast_fp16")];
            tensor<int32, [3]> var_1034 = const()[name = tensor<string, []>("op_1034"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_1014_cast_fp16 = transpose(perm = var_1014_perm_0, x = var_1013_cast_fp16)[name = tensor<string, []>("transpose_6")];
            tensor<fp16, [12, 77, 64]> key_states_43_cast_fp16 = reshape(shape = var_1034, x = var_1014_cast_fp16)[name = tensor<string, []>("key_states_43_cast_fp16")];
            tensor<int32, [3]> var_1036 = const()[name = tensor<string, []>("op_1036"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_1021_cast_fp16 = transpose(perm = var_1021_perm_0, x = var_1020_cast_fp16)[name = tensor<string, []>("transpose_5")];
            tensor<fp16, [12, 77, 64]> value_states_43_cast_fp16 = reshape(shape = var_1036, x = var_1021_cast_fp16)[name = tensor<string, []>("value_states_43_cast_fp16")];
            tensor<bool, []> attn_weights_61_transpose_x_1 = const()[name = tensor<string, []>("attn_weights_61_transpose_x_1"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_weights_61_transpose_y_1 = const()[name = tensor<string, []>("attn_weights_61_transpose_y_1"), val = tensor<bool, []>(true)];
            tensor<fp16, [12, 77, 77]> attn_weights_61_cast_fp16 = matmul(transpose_x = attn_weights_61_transpose_x_1, transpose_y = attn_weights_61_transpose_y_1, x = query_states_21_cast_fp16, y = key_states_43_cast_fp16)[name = tensor<string, []>("attn_weights_61_cast_fp16")];
            tensor<int32, [4]> var_1041 = const()[name = tensor<string, []>("op_1041"), val = tensor<int32, [4]>([1, 12, 77, 77])];
            tensor<fp16, [1, 12, 77, 77]> var_1042_cast_fp16 = reshape(shape = var_1041, x = attn_weights_61_cast_fp16)[name = tensor<string, []>("op_1042_cast_fp16")];
            tensor<fp16, [1, 12, 77, 77]> attn_weights_63_cast_fp16 = add(x = var_1042_cast_fp16, y = op_56_to_fp16_palettized)[name = tensor<string, []>("attn_weights_63_cast_fp16")];
            tensor<int32, [3]> var_1047 = const()[name = tensor<string, []>("op_1047"), val = tensor<int32, [3]>([12, 77, 77])];
            tensor<fp16, [12, 77, 77]> input_165_cast_fp16 = reshape(shape = var_1047, x = attn_weights_63_cast_fp16)[name = tensor<string, []>("input_165_cast_fp16")];
            tensor<fp16, [12, 77, 77]> input_167_cast_fp16 = softmax(axis = var_5, x = input_165_cast_fp16)[name = tensor<string, []>("input_167_cast_fp16")];
            tensor<bool, []> attn_output_61_transpose_x_0 = const()[name = tensor<string, []>("attn_output_61_transpose_x_0"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_output_61_transpose_y_0 = const()[name = tensor<string, []>("attn_output_61_transpose_y_0"), val = tensor<bool, []>(false)];
            tensor<fp16, [12, 77, 64]> attn_output_61_cast_fp16 = matmul(transpose_x = attn_output_61_transpose_x_0, transpose_y = attn_output_61_transpose_y_0, x = input_167_cast_fp16, y = value_states_43_cast_fp16)[name = tensor<string, []>("attn_output_61_cast_fp16")];
            tensor<int32, [4]> var_1052 = const()[name = tensor<string, []>("op_1052"), val = tensor<int32, [4]>([1, 12, 77, 64])];
            tensor<fp16, [1, 12, 77, 64]> attn_output_63_cast_fp16 = reshape(shape = var_1052, x = attn_output_61_cast_fp16)[name = tensor<string, []>("attn_output_63_cast_fp16")];
            tensor<int32, [4]> attn_output_65_perm_0 = const()[name = tensor<string, []>("attn_output_65_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_1055 = const()[name = tensor<string, []>("op_1055"), val = tensor<int32, [3]>([1, 77, 768])];
            tensor<fp16, [1, 77, 12, 64]> attn_output_65_cast_fp16 = transpose(perm = attn_output_65_perm_0, x = attn_output_63_cast_fp16)[name = tensor<string, []>("transpose_4")];
            tensor<fp16, [1, 77, 768]> input_169_cast_fp16 = reshape(shape = var_1055, x = attn_output_65_cast_fp16)[name = tensor<string, []>("input_169_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(130581632))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(131024064))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(131024256)))];
            tensor<fp16, [1, 77, 768]> linear_63_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16_palettized, x = input_169_cast_fp16)[name = tensor<string, []>("linear_63_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_171_cast_fp16 = add(x = input_163_cast_fp16, y = linear_63_cast_fp16)[name = tensor<string, []>("input_171_cast_fp16")];
            tensor<int32, [1]> input_173_axes_0 = const()[name = tensor<string, []>("input_173_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(131025856)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(131027456)))];
            tensor<fp16, [1, 77, 768]> input_173_cast_fp16 = layer_norm(axes = input_173_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16, x = input_171_cast_fp16)[name = tensor<string, []>("input_173_cast_fp16")];
            tensor<fp16, [3072, 768]> text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(131029056))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132798592))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([3072, 768])];
            tensor<fp16, [3072]> text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16"), val = tensor<fp16, [3072]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132798784)))];
            tensor<fp16, [1, 77, 3072]> linear_64_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16_palettized, x = input_173_cast_fp16)[name = tensor<string, []>("linear_64_cast_fp16")];
            tensor<fp16, []> var_1070_to_fp16 = const()[name = tensor<string, []>("op_1070_to_fp16"), val = tensor<fp16, []>(0x1.b3cp+0)];
            tensor<fp16, [1, 77, 3072]> var_1071_cast_fp16 = mul(x = linear_64_cast_fp16, y = var_1070_to_fp16)[name = tensor<string, []>("op_1071_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> var_1072_cast_fp16 = sigmoid(x = var_1071_cast_fp16)[name = tensor<string, []>("op_1072_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> input_177_cast_fp16 = mul(x = linear_64_cast_fp16, y = var_1072_cast_fp16)[name = tensor<string, []>("input_177_cast_fp16")];
            tensor<fp16, [768, 3072]> text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132804992))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134574528))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 3072])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134574720)))];
            tensor<fp16, [1, 77, 768]> linear_65_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16_palettized, x = input_177_cast_fp16)[name = tensor<string, []>("linear_65_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_179_cast_fp16 = add(x = input_171_cast_fp16, y = linear_65_cast_fp16)[name = tensor<string, []>("input_179_cast_fp16")];
            tensor<string, []> input_179_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("input_179_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
            tensor<int32, [1]> hidden_states_67_axes_0 = const()[name = tensor<string, []>("hidden_states_67_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134576320)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134577920)))];
            tensor<fp16, [1, 77, 768]> hidden_states_67_cast_fp16 = layer_norm(axes = hidden_states_67_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16, x = input_179_cast_fp16)[name = tensor<string, []>("hidden_states_67_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134579520))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(135021952))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(135022144)))];
            tensor<fp16, [1, 77, 768]> linear_66_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_67_cast_fp16)[name = tensor<string, []>("linear_66_cast_fp16")];
            tensor<fp16, []> var_1097_to_fp16 = const()[name = tensor<string, []>("op_1097_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
            tensor<fp16, [1, 77, 768]> tensor_cast_fp16 = mul(x = linear_66_cast_fp16, y = var_1097_to_fp16)[name = tensor<string, []>("tensor_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(135023744))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(135466176))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(135466368)))];
            tensor<fp16, [1, 77, 768]> linear_67_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_67_cast_fp16)[name = tensor<string, []>("linear_67_cast_fp16")];
            tensor<int32, [4]> var_1102 = const()[name = tensor<string, []>("op_1102"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_1103_cast_fp16 = reshape(shape = var_1102, x = linear_67_cast_fp16)[name = tensor<string, []>("op_1103_cast_fp16")];
            tensor<int32, [4]> var_1104_perm_0 = const()[name = tensor<string, []>("op_1104_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(135467968))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(135910400))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(135910592)))];
            tensor<fp16, [1, 77, 768]> linear_68_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_67_cast_fp16)[name = tensor<string, []>("linear_68_cast_fp16")];
            tensor<int32, [4]> var_1109 = const()[name = tensor<string, []>("op_1109"), val = tensor<int32, [4]>([1, -1, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_1110_cast_fp16 = reshape(shape = var_1109, x = linear_68_cast_fp16)[name = tensor<string, []>("op_1110_cast_fp16")];
            tensor<int32, [4]> var_1111_perm_0 = const()[name = tensor<string, []>("op_1111_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [4]> var_1118 = const()[name = tensor<string, []>("op_1118"), val = tensor<int32, [4]>([1, 77, 12, 64])];
            tensor<fp16, [1, 77, 12, 64]> var_1119_cast_fp16 = reshape(shape = var_1118, x = tensor_cast_fp16)[name = tensor<string, []>("op_1119_cast_fp16")];
            tensor<int32, [4]> var_1120_perm_0 = const()[name = tensor<string, []>("op_1120_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_1122 = const()[name = tensor<string, []>("op_1122"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_1120_cast_fp16 = transpose(perm = var_1120_perm_0, x = var_1119_cast_fp16)[name = tensor<string, []>("transpose_3")];
            tensor<fp16, [12, 77, 64]> query_states_cast_fp16 = reshape(shape = var_1122, x = var_1120_cast_fp16)[name = tensor<string, []>("query_states_cast_fp16")];
            tensor<int32, [3]> var_1124 = const()[name = tensor<string, []>("op_1124"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_1104_cast_fp16 = transpose(perm = var_1104_perm_0, x = var_1103_cast_fp16)[name = tensor<string, []>("transpose_2")];
            tensor<fp16, [12, 77, 64]> key_states_cast_fp16 = reshape(shape = var_1124, x = var_1104_cast_fp16)[name = tensor<string, []>("key_states_cast_fp16")];
            tensor<int32, [3]> var_1126 = const()[name = tensor<string, []>("op_1126"), val = tensor<int32, [3]>([12, -1, 64])];
            tensor<fp16, [1, 12, 77, 64]> var_1111_cast_fp16 = transpose(perm = var_1111_perm_0, x = var_1110_cast_fp16)[name = tensor<string, []>("transpose_1")];
            tensor<fp16, [12, 77, 64]> value_states_cast_fp16 = reshape(shape = var_1126, x = var_1111_cast_fp16)[name = tensor<string, []>("value_states_cast_fp16")];
            tensor<bool, []> attn_weights_67_transpose_x_1 = const()[name = tensor<string, []>("attn_weights_67_transpose_x_1"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_weights_67_transpose_y_1 = const()[name = tensor<string, []>("attn_weights_67_transpose_y_1"), val = tensor<bool, []>(true)];
            tensor<fp16, [12, 77, 77]> attn_weights_67_cast_fp16 = matmul(transpose_x = attn_weights_67_transpose_x_1, transpose_y = attn_weights_67_transpose_y_1, x = query_states_cast_fp16, y = key_states_cast_fp16)[name = tensor<string, []>("attn_weights_67_cast_fp16")];
            tensor<int32, [4]> var_1131 = const()[name = tensor<string, []>("op_1131"), val = tensor<int32, [4]>([1, 12, 77, 77])];
            tensor<fp16, [1, 12, 77, 77]> var_1132_cast_fp16 = reshape(shape = var_1131, x = attn_weights_67_cast_fp16)[name = tensor<string, []>("op_1132_cast_fp16")];
            tensor<fp16, [1, 12, 77, 77]> attn_weights_69_cast_fp16 = add(x = var_1132_cast_fp16, y = op_56_to_fp16_palettized)[name = tensor<string, []>("attn_weights_69_cast_fp16")];
            tensor<int32, [3]> var_1137 = const()[name = tensor<string, []>("op_1137"), val = tensor<int32, [3]>([12, 77, 77])];
            tensor<fp16, [12, 77, 77]> input_181_cast_fp16 = reshape(shape = var_1137, x = attn_weights_69_cast_fp16)[name = tensor<string, []>("input_181_cast_fp16")];
            tensor<fp16, [12, 77, 77]> input_183_cast_fp16 = softmax(axis = var_5, x = input_181_cast_fp16)[name = tensor<string, []>("input_183_cast_fp16")];
            tensor<bool, []> attn_output_67_transpose_x_0 = const()[name = tensor<string, []>("attn_output_67_transpose_x_0"), val = tensor<bool, []>(false)];
            tensor<bool, []> attn_output_67_transpose_y_0 = const()[name = tensor<string, []>("attn_output_67_transpose_y_0"), val = tensor<bool, []>(false)];
            tensor<fp16, [12, 77, 64]> attn_output_67_cast_fp16 = matmul(transpose_x = attn_output_67_transpose_x_0, transpose_y = attn_output_67_transpose_y_0, x = input_183_cast_fp16, y = value_states_cast_fp16)[name = tensor<string, []>("attn_output_67_cast_fp16")];
            tensor<int32, [4]> var_1142 = const()[name = tensor<string, []>("op_1142"), val = tensor<int32, [4]>([1, 12, 77, 64])];
            tensor<fp16, [1, 12, 77, 64]> attn_output_69_cast_fp16 = reshape(shape = var_1142, x = attn_output_67_cast_fp16)[name = tensor<string, []>("attn_output_69_cast_fp16")];
            tensor<int32, [4]> attn_output_perm_0 = const()[name = tensor<string, []>("attn_output_perm_0"), val = tensor<int32, [4]>([0, 2, 1, 3])];
            tensor<int32, [3]> var_1145 = const()[name = tensor<string, []>("op_1145"), val = tensor<int32, [3]>([1, 77, 768])];
            tensor<fp16, [1, 77, 12, 64]> attn_output_cast_fp16 = transpose(perm = attn_output_perm_0, x = attn_output_69_cast_fp16)[name = tensor<string, []>("transpose_0")];
            tensor<fp16, [1, 77, 768]> input_185_cast_fp16 = reshape(shape = var_1145, x = attn_output_cast_fp16)[name = tensor<string, []>("input_185_cast_fp16")];
            tensor<fp16, [768, 768]> text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(135912192))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(136354624))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 768])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(136354816)))];
            tensor<fp16, [1, 77, 768]> linear_69_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16_palettized, x = input_185_cast_fp16)[name = tensor<string, []>("linear_69_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_187_cast_fp16 = add(x = input_179_cast_fp16, y = linear_69_cast_fp16)[name = tensor<string, []>("input_187_cast_fp16")];
            tensor<int32, [1]> input_189_axes_0 = const()[name = tensor<string, []>("input_189_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(136356416)))];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(136358016)))];
            tensor<fp16, [1, 77, 768]> input_189_cast_fp16 = layer_norm(axes = input_189_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16, x = input_187_cast_fp16)[name = tensor<string, []>("input_189_cast_fp16")];
            tensor<fp16, [3072, 768]> text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(136359616))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(138129152))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([3072, 768])];
            tensor<fp16, [3072]> text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16"), val = tensor<fp16, [3072]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(138129344)))];
            tensor<fp16, [1, 77, 3072]> linear_70_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16_palettized, x = input_189_cast_fp16)[name = tensor<string, []>("linear_70_cast_fp16")];
            tensor<fp16, []> var_1160_to_fp16 = const()[name = tensor<string, []>("op_1160_to_fp16"), val = tensor<fp16, []>(0x1.b3cp+0)];
            tensor<fp16, [1, 77, 3072]> var_1161_cast_fp16 = mul(x = linear_70_cast_fp16, y = var_1160_to_fp16)[name = tensor<string, []>("op_1161_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> var_1162_cast_fp16 = sigmoid(x = var_1161_cast_fp16)[name = tensor<string, []>("op_1162_cast_fp16")];
            tensor<fp16, [1, 77, 3072]> input_193_cast_fp16 = mul(x = linear_70_cast_fp16, y = var_1162_cast_fp16)[name = tensor<string, []>("input_193_cast_fp16")];
            tensor<fp16, [768, 3072]> text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1769472]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(138135552))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(139905088))), name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([768, 3072])];
            tensor<fp16, [768]> text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(139905280)))];
            tensor<fp16, [1, 77, 768]> linear_71_cast_fp16 = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16_palettized, x = input_193_cast_fp16)[name = tensor<string, []>("linear_71_cast_fp16")];
            tensor<fp16, [1, 77, 768]> input_cast_fp16 = add(x = input_187_cast_fp16, y = linear_71_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
            tensor<int32, [1]> last_hidden_state_axes_0 = const()[name = tensor<string, []>("last_hidden_state_axes_0"), val = tensor<int32, [1]>([-1])];
            tensor<fp16, [768]> text_encoder_text_model_final_layer_norm_weight_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_final_layer_norm_weight_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(139906880)))];
            tensor<fp16, [768]> text_encoder_text_model_final_layer_norm_bias_to_fp16 = const()[name = tensor<string, []>("text_encoder_text_model_final_layer_norm_bias_to_fp16"), val = tensor<fp16, [768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(139908480)))];
            tensor<fp16, [1, 77, 768]> last_hidden_state_cast_fp16 = layer_norm(axes = last_hidden_state_axes_0, beta = text_encoder_text_model_final_layer_norm_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_final_layer_norm_weight_to_fp16, x = input_cast_fp16)[name = tensor<string, []>("last_hidden_state_cast_fp16")];
            tensor<int32, [1]> var_1176 = const()[name = tensor<string, []>("op_1176"), val = tensor<int32, [1]>([0])];
            tensor<int32, [1]> var_1178 = reduce_argmax(axis = var_5, keep_dims = var_6, x = cast_1)[name = tensor<string, []>("op_1178")];
            tensor<int32, []> stack_0_axis_0 = const()[name = tensor<string, []>("stack_0_axis_0"), val = tensor<int32, []>(1)];
            tensor<int32, [1, 2]> stack_0 = stack(axis = stack_0_axis_0, values = (var_1176, var_1178))[name = tensor<string, []>("stack_0")];
            tensor<int32, []> var_1180_transpose_batch_dims_0 = const()[name = tensor<string, []>("op_1180_transpose_batch_dims_0"), val = tensor<int32, []>(0)];
            tensor<fp16, [1, 768]> var_1180_transpose_cast_fp16 = gather_nd(batch_dims = var_1180_transpose_batch_dims_0, indices = stack_0, x = last_hidden_state_cast_fp16)[name = tensor<string, []>("op_1180_transpose_cast_fp16")];
            tensor<string, []> var_1180_cast_fp16_to_fp32_dtype_0 = const()[name = tensor<string, []>("op_1180_cast_fp16_to_fp32_dtype_0"), val = tensor<string, []>("fp32")];
            tensor<fp32, [1, 77, 768]> hidden_embeds = cast(dtype = input_179_cast_fp16_to_fp32_dtype_0, x = input_179_cast_fp16)[name = tensor<string, []>("cast_0")];
            tensor<fp32, [1, 768]> pooled_outputs = cast(dtype = var_1180_cast_fp16_to_fp32_dtype_0, x = var_1180_transpose_cast_fp16)[name = tensor<string, []>("cast_1")];
        } -> (hidden_embeds, pooled_outputs);
}