-
1.18 kB
- all-MiniLM-L6-v2_pytorch_model.pth90.9 MB
Detected Pickle imports (4)
- "torch.FloatStorage",
- "torch._utils._rebuild_tensor_v2",
- "torch.LongStorage",
- "collections.OrderedDict"
LFS - all-distilroberta-v1_pytorch_model.pth329 MB
Detected Pickle imports (4)
- "torch.FloatStorage",
- "torch._utils._rebuild_tensor_v2",
- "collections.OrderedDict",
- "torch.LongStorage"
LFS - all-mpnet-base-v1_pytorch_model.pth438 MB
Detected Pickle imports (4)
- "collections.OrderedDict",
- "torch._utils._rebuild_tensor_v2",
- "torch.LongStorage",
- "torch.FloatStorage"
LFS - qall-MiniLM-L6-v2_pytorch_model.pth58.9 MB
Detected Pickle imports (166)
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1500.Linear",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1485.BertAttention",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1465.BertIntermediate",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1442.BertIntermediate",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1463.LinearPackedParams",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1456.BertSelfAttention",
- "collections.OrderedDict",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1508.BertAttention",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1469.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1498.Linear",
- "__torch__.torch.nn.modules.sparse.___torch_mangle_1404.Embedding",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1432.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1431.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1487.Linear",
- "__torch__.sentence_transformers.models.Normalize.___torch_mangle_1547.Normalize",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1453.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1457.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1414.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1440.LinearPackedParams",
- "__torch__.sentence_transformers.SentenceTransformer.___torch_mangle_1548.SentenceTransformer",
- "__torch__.sentence_transformers.models.Pooling.___torch_mangle_1546.Pooling",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1436.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1473.Linear",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1445.LayerNorm",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1492.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1459.LayerNorm",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1517.BertLayer",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1495.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1420.LinearPackedParams",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1494.BertLayer",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1493.BertOutput",
- "torch.LongStorage",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1462.BertAttention",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1460.Dropout",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1539.BertOutput",
- "torch._utils._rebuild_qtensor",
- "__torch__.transformers.models.bert.modeling_bert.BertEmbeddings",
- "__torch__.transformers.models.bert.modeling_bert.BertAttention",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1502.BertSelfAttention",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1484.BertSelfOutput",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1507.BertSelfOutput",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1542.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1518.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1543.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1409.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1536.Linear",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1471.BertLayer",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1467.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1441.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1483.Dropout",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1443.LinearPackedParams",
- "__torch__.transformers.models.bert.modeling_bert.BertIntermediate",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1522.LinearPackedParams",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1447.BertOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1421.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1452.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1509.LinearPackedParams",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1424.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1423.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1513.Linear",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1470.BertOutput",
- "torch.per_tensor_affine",
- "__torch__.sentence_transformers.models.Transformer.___torch_mangle_1545.Transformer",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1450.Linear",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1439.BertAttention",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1538.Dropout",
- "__torch__.transformers.models.bert.modeling_bert.BertLayer",
- "torch.FloatStorage",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1461.BertSelfOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1464.Linear",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1491.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1519.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1526.LinearPackedParams",
- "__torch__.transformers.models.bert.modeling_bert.BertSelfOutput",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1534.BertIntermediate",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1533.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1477.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1489.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1422.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1523.Linear",
- "__torch__.transformers.models.bert.modeling_bert.BertEncoder",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1521.Linear",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1448.BertLayer",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1510.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1426.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1490.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1455.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1527.Linear",
- "torch._utils._rebuild_tensor_v2",
- "__torch__.transformers.models.bert.modeling_bert.BertOutput",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1413.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1475.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1410.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1474.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1429.Linear",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1438.BertSelfOutput",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1478.Dropout",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1449.LinearPackedParams",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1468.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1444.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1472.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1416.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1425.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1482.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1434.LinearPackedParams",
- "__torch__.torch.nn.modules.sparse.___torch_mangle_1406.Embedding",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1480.LinearPackedParams",
- "__torch__.transformers.models.bert.modeling_bert.BertPooler",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1529.Dropout",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1503.LinearPackedParams",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1479.BertSelfAttention",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1418.LayerNorm",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1488.BertIntermediate",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1499.LinearPackedParams",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1505.LayerNorm",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1408.Dropout",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1516.BertOutput",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1446.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1514.LayerNorm",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1506.Dropout",
- "__torch__.transformers.models.bert.modeling_bert.BertModel",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1411.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1476.LinearPackedParams",
- "__torch__.torch.nn.modules.container.___torch_mangle_1541.ModuleList",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1504.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1532.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1512.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1486.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1458.Linear",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1530.BertSelfOutput",
- "torch.QInt8Storage",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1412.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1520.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1524.Dropout",
- "__torch__.transformers.models.bert.modeling_bert.BertSelfAttention",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1417.Linear",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1433.BertSelfAttention",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1407.LayerNorm",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1419.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1496.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1535.LinearPackedParams",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1537.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1451.LinearPackedParams",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1531.BertAttention",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1415.Dropout",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1497.LinearPackedParams",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1511.BertIntermediate",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1525.BertSelfAttention",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1430.LinearPackedParams",
- "__torch__.torch.nn.modules.activation.___torch_mangle_1544.Tanh",
- "__torch__.transformers.models.bert.modeling_bert.___torch_mangle_1540.BertLayer",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1437.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1481.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1428.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1501.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1528.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1427.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1466.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1435.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1454.Linear",
- "__torch__.torch.nn.modules.sparse.___torch_mangle_1405.Embedding",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1515.Dropout",
- "torch.LongStorage",
- "collections.OrderedDict",
- "torch._utils._rebuild_tensor_v2",
- "torch.DoubleStorage"
LFS - qall-distilroberta-v1_pytorch_model.pth200 MB
Detected Pickle imports (166)
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaModel",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1115.Dropout",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1146.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1159.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1157.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1135.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1231.RobertaAttention",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1127.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1239.RobertaOutput",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1194.RobertaLayer",
- "torch.QInt8Storage",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1209.LinearPackedParams",
- "__torch__.sentence_transformers.models.Transformer.___torch_mangle_1245.Transformer",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1238.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1200.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1222.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1201.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1205.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1154.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1227.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1211.RobertaIntermediate",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1142.RobertaIntermediate",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1224.Dropout",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1138.RobertaSelfOutput",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1136.LayerNorm",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1214.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1235.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1151.LinearPackedParams",
- "__torch__.torch.nn.modules.sparse.___torch_mangle_1105.Embedding",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1172.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1169.Dropout",
- "__torch__.torch.nn.modules.container.___torch_mangle_1241.ModuleList",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1191.LayerNorm",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1156.RobertaSelfAttention",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1139.RobertaAttention",
- "__torch__.sentence_transformers.SentenceTransformer.___torch_mangle_1248.SentenceTransformer",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1176.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1122.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1148.RobertaLayer",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1162.RobertaAttention",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1175.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1134.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1174.LinearPackedParams",
- "__torch__.torch.nn.modules.sparse.___torch_mangle_1106.Embedding",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1182.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1126.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1193.RobertaOutput",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1202.RobertaSelfAttention",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1228.LayerNorm",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaAttention",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1236.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaPooler",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1229.Dropout",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1207.RobertaSelfOutput",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1153.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1132.Dropout",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1137.Dropout",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1216.RobertaOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1177.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1108.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1168.LayerNorm",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaEmbeddings",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1121.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1192.Dropout",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1125.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1131.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1243.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1184.RobertaSelfOutput",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1189.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1197.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1186.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1187.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1204.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1109.LinearPackedParams",
- "torch._utils._rebuild_qtensor",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1219.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1163.LinearPackedParams",
- "__torch__.sentence_transformers.models.Normalize.___torch_mangle_1247.Normalize",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1173.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1232.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1166.LinearPackedParams",
- "torch.LongStorage",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1203.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1140.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1116.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaEncoder",
- "torch.FloatStorage",
- "torch.per_tensor_affine",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1225.RobertaSelfAttention",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1141.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1242.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1178.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1198.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1240.RobertaLayer",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1218.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1230.RobertaSelfOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1213.Linear",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1237.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1195.LinearPackedParams",
- "__torch__.torch.nn.modules.sparse.___torch_mangle_1104.Embedding",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1155.Dropout",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1199.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1114.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaIntermediate",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1181.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1208.RobertaAttention",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1113.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1133.RobertaSelfAttention",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1149.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1120.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1221.Linear",
- "__torch__.torch.nn.modules.activation.___torch_mangle_1244.Tanh",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1170.RobertaOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1190.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1171.RobertaLayer",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1160.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1123.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1196.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1147.RobertaOutput",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaLayer",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1152.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1130.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1129.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1223.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1210.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1212.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1112.Linear",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1124.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1150.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1165.RobertaIntermediate",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1107.LayerNorm",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1118.LayerNorm",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaSelfOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1117.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaSelfAttention",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1220.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1143.LinearPackedParams",
- "__torch__.sentence_transformers.models.Pooling.___torch_mangle_1246.Pooling",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1188.RobertaIntermediate",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1206.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1158.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1167.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1183.Dropout",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1217.RobertaLayer",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1180.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1128.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1234.RobertaIntermediate",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1161.RobertaSelfOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1233.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1119.Dropout",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1215.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1144.Linear",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1145.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1226.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1179.RobertaSelfAttention",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1185.RobertaAttention",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1164.Linear",
- "torch._utils._rebuild_tensor_v2",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1110.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1111.LinearPackedParams",
- "collections.OrderedDict",
- "torch._utils._rebuild_tensor_v2",
- "torch.DoubleStorage",
- "collections.OrderedDict",
- "torch.LongStorage"
LFS - qall-mpnet-base-v1_pytorch_model.pth182 MB
Detected Pickle imports (292)
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_672.MPNetIntermediate",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_612.MPNetLayer",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_746.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_570.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_573.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_678.MPNetLayer",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_605.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_615.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_743.MPNetOutput",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_793.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_716.MPNetIntermediate",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_610.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_645.LayerNorm",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_694.MPNetIntermediate",
- "__torch__.torch.nn.modules.sparse.___torch_mangle_542.Embedding",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_705.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_655.MPNetOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_554.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_651.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_618.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_580.Dropout",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_546.MPNetEmbeddings",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_619.LinearPackedParams",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_653.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_561.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_681.LinearPackedParams",
- "__torch__.torch.nn.modules.sparse.___torch_mangle_812.Embedding",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_707.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_684.Linear",
- "torch._utils._rebuild_qtensor",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_577.Dropout",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_734.Dropout",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_648.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_759.Linear",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_733.LayerNorm",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_721.MPNetOutput",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_732.MPNetSelfAttention",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_810.MPNetLayer",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_726.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_752.Linear",
- "torch._utils._rebuild_tensor_v2",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_725.LinearPackedParams",
- "__torch__.torch.nn.modules.container.___torch_mangle_811.ModuleList",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_635.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_800.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_636.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_703.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_673.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_629.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_685.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_708.Linear",
- "__torch__.sentence_transformers.models.Normalize.___torch_mangle_821.Normalize",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_558.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_671.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_761.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_622.MPNetSelfAttention",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_567.MPNetOutput",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_654.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_785.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_724.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_797.Dropout",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_690.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_770.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_742.Dropout",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_692.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_766.MPNetLayer",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_717.LinearPackedParams",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_675.LayerNorm",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_760.MPNetIntermediate",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_792.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_765.MPNetOutput",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_587.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_614.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_581.MPNetAttention",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_652.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_783.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_555.Dropout",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_722.MPNetLayer",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_704.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_764.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_762.Linear",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_557.LayerNorm",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_668.Dropout",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_709.Dropout",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_805.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_616.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_745.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_813.MPNetEncoder",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_599.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_815.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_594.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_549.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_710.MPNetSelfAttention",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_545.Dropout",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_802.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_640.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_691.MPNetAttention",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_731.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_763.LayerNorm",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_778.Dropout",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_600.MPNetSelfAttention",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_593.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_695.LinearPackedParams",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_719.LayerNorm",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_787.MPNetOutput",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_551.LinearPackedParams",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_807.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_758.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_665.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_784.Linear",
- "torch.LongStorage",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_578.MPNetSelfAttention",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_606.MPNetIntermediate",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_656.MPNetLayer",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_698.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_682.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_767.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_641.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_582.LinearPackedParams",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_631.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_701.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_670.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_795.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_574.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_791.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_603.MPNetAttention",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_660.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_680.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_686.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_757.MPNetAttention",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_801.MPNetAttention",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_601.LayerNorm",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_565.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_627.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_639.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_751.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_803.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_737.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_735.MPNetAttention",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_771.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_553.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_598.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_604.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_597.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_569.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_659.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_626.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_720.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_755.LayerNorm",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_808.Dropout",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_566.Dropout",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_568.MPNetLayer",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_585.LinearPackedParams",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_667.LayerNorm",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_590.MPNetLayer",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_727.LinearPackedParams",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_741.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_715.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_608.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_662.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_696.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_804.MPNetIntermediate",
- "__torch__.sentence_transformers.models.Transformer.___torch_mangle_819.Transformer",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_576.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_788.MPNetLayer",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_571.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_798.MPNetSelfAttention",
- "torch.QInt8Storage",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_617.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_632.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_750.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_794.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_638.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_700.MPNetLayer",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_663.LinearPackedParams",
- "__torch__.sentence_transformers.SentenceTransformer.___torch_mangle_822.SentenceTransformer",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_588.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_630.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_642.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_739.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_748.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_747.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_756.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_702.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_625.MPNetAttention",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_547.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_624.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_689.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_723.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_676.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_697.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_773.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_592.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_683.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_613.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_666.MPNetSelfAttention",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_657.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_786.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_658.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_677.MPNetOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_774.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_586.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_591.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_744.MPNetLayer",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_548.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_575.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_730.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_753.Dropout",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_679.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_620.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_712.Dropout",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_559.MPNetAttention",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_550.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_564.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_718.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_729.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_749.LinearPackedParams",
- "__torch__.torch.nn.modules.activation.___torch_mangle_816.Tanh",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_552.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_796.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_669.MPNetAttention",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_595.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_780.LinearPackedParams",
- "__torch__.sentence_transformers.models.Pooling.___torch_mangle_820.Pooling",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_602.Dropout",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_607.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_809.MPNetOutput",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_560.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_688.MPNetSelfAttention",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_583.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_736.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_644.MPNetSelfAttention",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_782.MPNetIntermediate",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_799.LayerNorm",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_611.MPNetOutput",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_779.MPNetAttention",
- "torch.FloatStorage",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_646.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_790.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_769.LinearPackedParams",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_650.MPNetIntermediate",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_544.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_714.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_649.Linear",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_579.LayerNorm",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_628.MPNetIntermediate",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_706.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_754.MPNetSelfAttention",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_777.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_781.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_776.MPNetSelfAttention",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_699.MPNetOutput",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_584.MPNetIntermediate",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_817.MPNetPooler",
- "torch.per_tensor_affine",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_589.MPNetOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_596.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_634.MPNetLayer",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_693.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_738.MPNetIntermediate",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_775.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_806.Linear",
- "collections.OrderedDict",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_623.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_664.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_728.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_556.MPNetSelfAttention",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_633.MPNetOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_572.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_674.Linear",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_711.LayerNorm",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_818.MPNetModel",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_562.MPNetIntermediate",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_772.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_789.LinearPackedParams",
- "__torch__.torch.nn.modules.sparse.___torch_mangle_543.Embedding",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_814.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_643.Dropout",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_687.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_740.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_563.LinearPackedParams",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_609.LayerNorm",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_621.Dropout",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_637.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_661.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_768.Linear",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_647.MPNetAttention",
- "__torch__.transformers.models.mpnet.modeling_mpnet.___torch_mangle_713.MPNetAttention",
- "torch.DoubleStorage",
- "torch._utils._rebuild_tensor_v2",
- "collections.OrderedDict",
- "torch.LongStorage"
LFS