LukasHug commited on
Commit
5571213
1 Parent(s): 8820cae

Update v-lol-trains.py

Browse files
Files changed (1) hide show
  1. v-lol-trains.py +47 -27
v-lol-trains.py CHANGED
@@ -31,7 +31,6 @@ _HOMEPAGE = "https://huggingface.co/datasets/LukasHug/v-lol-trains/"
31
 
32
  _LICENSE = "cc-by-4.0"
33
  _IMAGES_URL = "https://huggingface.co/datasets/LukasHug/v-lol-trains/resolve/main/data"
34
- # _DIR = './data'
35
  _DIR = _IMAGES_URL
36
  # _URL_DATA = {
37
  # "V-LoL-Trains-TheoryX": f"{_DIR}/V-LoL-Trains-TheoryX.zip",
@@ -53,28 +52,43 @@ _URL_DATA = {
53
  "V-LoL-Blocks-TheoryX": f"{_DIR}/SimpleObjects_theoryx_MichalskiTrains_base_scene_len_2-4.zip",
54
  "V-LoL-Blocks-Numerical": f"{_DIR}/SimpleObjects_numerical_MichalskiTrains_base_scene_len_2-4.zip",
55
  "V-LoL-Blocks-Complex": f"{_DIR}/SimpleObjects_complex_MichalskiTrains_base_scene_len_2-4.zip",
56
- "V-LoL-Trains-TheoryX-len7": f"{_DIR}/Trains_theoryx_MichalskiTrains_base_scene_len_7.zip",
57
- "V-LoL-Trains-Numerical-len7": f"{_DIR}/Trains_numerical_MichalskiTrains_base_scene_len_7.zip",
58
- "V-LoL-Trains-Complex-len7": f"{_DIR}/Trains_complex_MichalskiTrains_base_scene_len_7.zip",
59
- "V-LoL-Random-Blocks-TheoryX": f"{_DIR}/SimpleObjects_theoryx_RandomTrains_base_scene_len_2-4.zip",
60
- "V-LoL-Random-Trains-TheoryX": f"{_DIR}/Trains_theoryx_RandomTrains_base_scene_len_2-4.zip",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
 
63
  _NAMES = ["westbound", "eastbound"]
64
-
65
-
66
  class VLoLConfig(datasets.BuilderConfig):
67
  """Builder Config for Food-101"""
68
 
69
  def __init__(self, data_url, **kwargs):
70
  """BuilderConfig for Food-101.
71
  Args:
72
- data_url: `string`, url to download the zip file from.
73
  metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
74
  **kwargs: keyword arguments forwarded to super.
75
  """
76
  super(VLoLConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
77
- self.data_url = data_url
 
 
 
78
 
79
 
80
  class vloltrains(datasets.GeneratorBasedBuilder):
@@ -104,24 +118,22 @@ class vloltrains(datasets.GeneratorBasedBuilder):
104
  task_templates=ImageClassification(image_column="image", label_column="label"),
105
  )
106
 
107
- def _split_generators(self, dl_manager):
108
- """Returns SplitGenerators."""
109
- archive_path = os.path.join(dl_manager.download_and_extract(self.config.data_url), self.config.data_url.split('/')[-1].split('.')[0])
110
  # print containg folders
111
  print(os.listdir(archive_path))
112
  image_dir = os.path.join(archive_path, "images")
113
  metadata_pth = os.path.join(archive_path, "all_scenes", "all_scenes.json")
114
- self.images, self.y, self.trains, self.masks = [], [], [], []
115
  # ds settings
116
  # load data
117
  with open(metadata_pth, 'r') as f:
118
  all_scenes = json.load(f)
119
  for scene in all_scenes['scenes']:
120
- self.images.append(scene['image_filename'])
121
  train = scene['train']
122
- y = int(train.split(' ')[0] == 'east')
123
- self.y.append(y)
124
- # self.depths.append(scene['depth_map_filename'])
125
  # if 'train' in scene:
126
  # # new json data format
127
  # train = scene['train']
@@ -133,24 +145,32 @@ class vloltrains(datasets.GeneratorBasedBuilder):
133
  # # old json data format
134
  # train = scene['m_train']
135
  # train = jsonpickle.decode(train)
136
- # # self.trains.append(train.replace('michalski_trains.m_train.', 'm_train.'))
137
  # # text = train.to_txt()
138
  # # t1 = MichalskiTrain.from_text(text, train_vis)
139
  # lab = int(train.get_label() == 'east')
140
- # self.y.append(lab)
141
- # self.trains.append(train)
142
- # self.masks.append(scene['car_masks'])
143
- # split y and images into train and test
144
- self.y_train, self.y_test, self.images_train, self.images_test = train_test_split(self.y, self.images,
145
- test_size=0.2, random_state=0)
 
 
 
 
 
 
 
 
146
  return [
147
  datasets.SplitGenerator(
148
  name=datasets.Split.TRAIN,
149
- gen_kwargs={"image_dir": image_dir, "labels": self.y_train, "images": self.images_train}
150
  ),
151
  datasets.SplitGenerator(
152
  name=datasets.Split.TEST,
153
- gen_kwargs={"image_dir": image_dir, "labels": self.y_test, "images": self.images_test}
154
  ),
155
  ]
156
 
 
31
 
32
  _LICENSE = "cc-by-4.0"
33
  _IMAGES_URL = "https://huggingface.co/datasets/LukasHug/v-lol-trains/resolve/main/data"
 
34
  _DIR = _IMAGES_URL
35
  # _URL_DATA = {
36
  # "V-LoL-Trains-TheoryX": f"{_DIR}/V-LoL-Trains-TheoryX.zip",
 
52
  "V-LoL-Blocks-TheoryX": f"{_DIR}/SimpleObjects_theoryx_MichalskiTrains_base_scene_len_2-4.zip",
53
  "V-LoL-Blocks-Numerical": f"{_DIR}/SimpleObjects_numerical_MichalskiTrains_base_scene_len_2-4.zip",
54
  "V-LoL-Blocks-Complex": f"{_DIR}/SimpleObjects_complex_MichalskiTrains_base_scene_len_2-4.zip",
55
+ "V-LoL-Trains-TheoryX-len7":
56
+ {'train': f"{_DIR}/Trains_theoryx_MichalskiTrains_base_scene_len_2-4.zip",
57
+ 'test': f"{_DIR}/Trains_theoryx_MichalskiTrains_base_scene_len_7.zip"},
58
+ "V-LoL-Trains-Numerical-len7":
59
+ {'train': f"{_DIR}/Trains_numerical_MichalskiTrains_base_scene_len_2-4.zip",
60
+ 'test': f"{_DIR}/Trains_numerical_MichalskiTrains_base_scene_len_7.zip"},
61
+ "V-LoL-Trains-Complex-len7":
62
+ {'train': f"{_DIR}/Trains_complex_MichalskiTrains_base_scene_len_2-4.zip",
63
+ 'test': f"{_DIR}/Trains_complex_MichalskiTrains_base_scene_len_7.zip"},
64
+ "V-LoL-Random-Blocks-TheoryX":
65
+ {'train': f"{_DIR}/SimpleObjects_theoryx_MichalskiTrains_base_scene_len_2-4.zip",
66
+ 'test': f"{_DIR}/SimpleObjects_theoryx_RandomTrains_base_scene_len_7.zip"},
67
+ "V-LoL-Random-Trains-TheoryX":
68
+ {'train': f"{_DIR}/Trains_theoryx_MichalskiTrains_base_scene_len_2-4.zip",
69
+ 'test': f"{_DIR}/Trains_theoryx_RandomTrains_base_scene_len_7.zip"},
70
+ # "V-LoL-Trains-TheoryX-len7": f"{_DIR}/Trains_theoryx_MichalskiTrains_base_scene_len_7.zip",
71
+ # "V-LoL-Trains-Numerical-len7": f"{_DIR}/Trains_numerical_MichalskiTrains_base_scene_len_7.zip",
72
+ # "V-LoL-Trains-Complex-len7": f"{_DIR}/Trains_complex_MichalskiTrains_base_scene_len_7.zip",
73
+ # "V-LoL-Random-Blocks-TheoryX": f"{_DIR}/SimpleObjects_theoryx_RandomTrains_base_scene_len_2-4.zip",
74
+ # "V-LoL-Random-Trains-TheoryX": f"{_DIR}/Trains_theoryx_RandomTrains_base_scene_len_2-4.zip",
75
  }
76
 
77
  _NAMES = ["westbound", "eastbound"]
 
 
78
  class VLoLConfig(datasets.BuilderConfig):
79
  """Builder Config for Food-101"""
80
 
81
  def __init__(self, data_url, **kwargs):
82
  """BuilderConfig for Food-101.
83
  Args:
 
84
  metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
85
  **kwargs: keyword arguments forwarded to super.
86
  """
87
  super(VLoLConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
88
+ if isinstance(data_url, dict):
89
+ self.metadata_urls = data_url
90
+ else:
91
+ self.metadata_urls = {'train': data_url, 'test': None}
92
 
93
 
94
  class vloltrains(datasets.GeneratorBasedBuilder):
 
118
  task_templates=ImageClassification(image_column="image", label_column="label"),
119
  )
120
 
121
+ def get_data(self, dl_manager, url):
122
+ archive_path = os.path.join(dl_manager.download_and_extract(url), url.split('/')[-1].split('.')[0])
 
123
  # print containg folders
124
  print(os.listdir(archive_path))
125
  image_dir = os.path.join(archive_path, "images")
126
  metadata_pth = os.path.join(archive_path, "all_scenes", "all_scenes.json")
127
+ images, y, trains, masks = [], [], [], []
128
  # ds settings
129
  # load data
130
  with open(metadata_pth, 'r') as f:
131
  all_scenes = json.load(f)
132
  for scene in all_scenes['scenes']:
133
+ images.append(scene['image_filename'])
134
  train = scene['train']
135
+ y.append(int(train.split(' ')[0] == 'east'))
136
+ # depths.append(scene['depth_map_filename'])
 
137
  # if 'train' in scene:
138
  # # new json data format
139
  # train = scene['train']
 
145
  # # old json data format
146
  # train = scene['m_train']
147
  # train = jsonpickle.decode(train)
148
+ # # trains.append(train.replace('michalski_trains.m_train.', 'm_train.'))
149
  # # text = train.to_txt()
150
  # # t1 = MichalskiTrain.from_text(text, train_vis)
151
  # lab = int(train.get_label() == 'east')
152
+ # y.append(lab)
153
+ # trains.append(train)
154
+ # masks.append(scene['car_masks'])
155
+ return image_dir, y, images
156
+
157
+ def _split_generators(self, dl_manager):
158
+ """Returns SplitGenerators."""
159
+ if self.config.metadata_urls['test'] is None:
160
+ image_dir, y, images = self.get_data(dl_manager, self.config.metadata_urls['train'])
161
+ image_dir_train, image_dir_test = image_dir, image_dir
162
+ y_train, y_test, images_train, images_test = train_test_split(y, images, test_size=0.2, random_state=0)
163
+ else:
164
+ image_dir_train, y_train, images_train = self.get_data(dl_manager, self.config.metadata_urls['train'])
165
+ image_dir_test, y_test, images_test = self.get_data(dl_manager, self.config.metadata_urls['test'])
166
  return [
167
  datasets.SplitGenerator(
168
  name=datasets.Split.TRAIN,
169
+ gen_kwargs={"image_dir": image_dir_train, "labels": y_train, "images": images_train}
170
  ),
171
  datasets.SplitGenerator(
172
  name=datasets.Split.TEST,
173
+ gen_kwargs={"image_dir": image_dir_test, "labels": y_test, "images": images_test}
174
  ),
175
  ]
176