download
history
blame
contribute
delete
200 MB
No virus
Detected Pickle imports (166)
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaModel",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1115.Dropout",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1146.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1159.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1157.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1135.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1231.RobertaAttention",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1127.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1239.RobertaOutput",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1194.RobertaLayer",
- "torch.QInt8Storage",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1209.LinearPackedParams",
- "__torch__.sentence_transformers.models.Transformer.___torch_mangle_1245.Transformer",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1238.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1200.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1222.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1201.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1205.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1154.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1227.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1211.RobertaIntermediate",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1142.RobertaIntermediate",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1224.Dropout",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1138.RobertaSelfOutput",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1136.LayerNorm",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1214.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1235.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1151.LinearPackedParams",
- "__torch__.torch.nn.modules.sparse.___torch_mangle_1105.Embedding",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1172.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1169.Dropout",
- "__torch__.torch.nn.modules.container.___torch_mangle_1241.ModuleList",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1191.LayerNorm",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1156.RobertaSelfAttention",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1139.RobertaAttention",
- "__torch__.sentence_transformers.SentenceTransformer.___torch_mangle_1248.SentenceTransformer",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1176.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1122.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1148.RobertaLayer",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1162.RobertaAttention",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1175.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1134.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1174.LinearPackedParams",
- "__torch__.torch.nn.modules.sparse.___torch_mangle_1106.Embedding",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1182.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1126.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1193.RobertaOutput",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1202.RobertaSelfAttention",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1228.LayerNorm",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaAttention",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1236.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaPooler",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1229.Dropout",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1207.RobertaSelfOutput",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1153.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1132.Dropout",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1137.Dropout",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1216.RobertaOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1177.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1108.Dropout",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1168.LayerNorm",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaEmbeddings",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1121.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1192.Dropout",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1125.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1131.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1243.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1184.RobertaSelfOutput",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1189.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1197.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1186.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1187.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1204.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1109.LinearPackedParams",
- "torch._utils._rebuild_qtensor",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1219.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1163.LinearPackedParams",
- "__torch__.sentence_transformers.models.Normalize.___torch_mangle_1247.Normalize",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1173.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1232.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1166.LinearPackedParams",
- "torch.LongStorage",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1203.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1140.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1116.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaEncoder",
- "torch.FloatStorage",
- "torch.per_tensor_affine",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1225.RobertaSelfAttention",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1141.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1242.LinearPackedParams",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1178.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1198.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1240.RobertaLayer",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1218.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1230.RobertaSelfOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1213.Linear",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1237.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1195.LinearPackedParams",
- "__torch__.torch.nn.modules.sparse.___torch_mangle_1104.Embedding",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1155.Dropout",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1199.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1114.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaIntermediate",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1181.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1208.RobertaAttention",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1113.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1133.RobertaSelfAttention",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1149.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1120.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1221.Linear",
- "__torch__.torch.nn.modules.activation.___torch_mangle_1244.Tanh",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1170.RobertaOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1190.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1171.RobertaLayer",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1160.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1123.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1196.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1147.RobertaOutput",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaLayer",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1152.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1130.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1129.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1223.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1210.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1212.LinearPackedParams",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1112.Linear",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1124.LayerNorm",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1150.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1165.RobertaIntermediate",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1107.LayerNorm",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1118.LayerNorm",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaSelfOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1117.Linear",
- "__torch__.transformers.models.roberta.modeling_roberta.RobertaSelfAttention",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1220.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1143.LinearPackedParams",
- "__torch__.sentence_transformers.models.Pooling.___torch_mangle_1246.Pooling",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1188.RobertaIntermediate",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1206.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1158.Linear",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1167.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1183.Dropout",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1217.RobertaLayer",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1180.LinearPackedParams",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1128.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1234.RobertaIntermediate",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1161.RobertaSelfOutput",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1233.Linear",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1119.Dropout",
- "__torch__.torch.nn.modules.dropout.___torch_mangle_1215.Dropout",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1144.Linear",
- "__torch__.torch.nn.modules.normalization.___torch_mangle_1145.LayerNorm",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1226.LinearPackedParams",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1179.RobertaSelfAttention",
- "__torch__.transformers.models.roberta.modeling_roberta.___torch_mangle_1185.RobertaAttention",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1164.Linear",
- "torch._utils._rebuild_tensor_v2",
- "__torch__.torch.nn.quantized.dynamic.modules.linear.___torch_mangle_1110.Linear",
- "__torch__.torch.nn.quantized.modules.linear.___torch_mangle_1111.LinearPackedParams",
- "collections.OrderedDict",
- "torch._utils._rebuild_tensor_v2",
- "torch.DoubleStorage",
- "collections.OrderedDict",
- "torch.LongStorage"
Git LFS Details
- SHA256: 971c9ec4a197fe06a9618921476a4bf4562168d549c5fe09c373702f1c00a14c
- Pointer size: 134 Bytes
- Size of remote file: 200 MB
Git Large File Storage (LFS) replaces large files with text pointers inside Git, while storing the file contents on a remote server. More info.