alessandro trinca tornidor commited on
Commit
951f1c4
·
1 Parent(s): 2640499

[feat] add optional embedding_key argument to LISAForCausalLM.evaluate() method

Browse files
lisa_on_cuda/LISA.py CHANGED
@@ -7,13 +7,15 @@ import torch.nn.functional as F
7
  from .llava.model.language_model.llava_llama import (LlavaLlamaForCausalLM, LlavaLlamaModel)
8
  from .segment_anything import build_sam_vit_h
9
 
 
 
10
 
11
  def dice_loss(
12
- inputs: torch.Tensor,
13
- targets: torch.Tensor,
14
- num_masks: float,
15
- scale=1000, # 100000.0,
16
- eps=1e-6,
17
  ):
18
  """
19
  Compute the DICE loss, similar to generalized IOU for masks
@@ -35,9 +37,9 @@ def dice_loss(
35
 
36
 
37
  def sigmoid_ce_loss(
38
- inputs: torch.Tensor,
39
- targets: torch.Tensor,
40
- num_masks: float,
41
  ):
42
  """
43
  Args:
@@ -56,9 +58,9 @@ def sigmoid_ce_loss(
56
 
57
  class LisaMetaModel:
58
  def __init__(
59
- self,
60
- config,
61
- **kwargs,
62
  ):
63
  super(LisaMetaModel, self).__init__(config)
64
 
@@ -98,9 +100,9 @@ class LisaMetaModel:
98
 
99
  class LisaModel(LisaMetaModel, LlavaLlamaModel):
100
  def __init__(
101
- self,
102
- config,
103
- **kwargs,
104
  ):
105
  super(LisaModel, self).__init__(config, **kwargs)
106
 
@@ -117,9 +119,9 @@ class LisaModel(LisaMetaModel, LlavaLlamaModel):
117
 
118
  class LISAForCausalLM(LlavaLlamaForCausalLM):
119
  def __init__(
120
- self,
121
- config,
122
- **kwargs,
123
  ):
124
  if not hasattr(config, "train_mask_decoder"):
125
  config.mm_use_im_start_end = kwargs.pop("use_mm_start_end", True)
@@ -131,7 +133,7 @@ class LISAForCausalLM(LlavaLlamaForCausalLM):
131
  self.bce_loss_weight = kwargs.pop("bce_loss_weight", None)
132
  else:
133
  config.mm_vision_tower = config.vision_tower
134
-
135
  self.seg_token_idx = kwargs.pop("seg_token_idx")
136
 
137
  super().__init__(config)
@@ -162,18 +164,18 @@ class LISAForCausalLM(LlavaLlamaForCausalLM):
162
  return self.model_forward(**kwargs)
163
 
164
  def model_forward(
165
- self,
166
- images: torch.FloatTensor,
167
- images_clip: torch.FloatTensor,
168
- input_ids: torch.LongTensor,
169
- labels: torch.LongTensor,
170
- attention_masks: torch.LongTensor,
171
- offset: torch.LongTensor,
172
- masks_list: List[torch.FloatTensor],
173
- label_list: List[torch.Tensor],
174
- resize_list: List[tuple],
175
- inference: bool = False,
176
- **kwargs,
177
  ):
178
  image_embeddings = self.get_visual_embs(images)
179
  batch_size = image_embeddings.shape[0]
@@ -309,17 +311,17 @@ class LISAForCausalLM(LlavaLlamaForCausalLM):
309
  pred_mask = pred_masks[batch_idx]
310
 
311
  assert (
312
- gt_mask.shape[0] == pred_mask.shape[0]
313
  ), "gt_mask.shape: {}, pred_mask.shape: {}".format(
314
  gt_mask.shape, pred_mask.shape
315
  )
316
  mask_bce_loss += (
317
- sigmoid_ce_loss(pred_mask, gt_mask, num_masks=gt_mask.shape[0])
318
- * gt_mask.shape[0]
319
  )
320
  mask_dice_loss += (
321
- dice_loss(pred_mask, gt_mask, num_masks=gt_mask.shape[0])
322
- * gt_mask.shape[0]
323
  )
324
  num_masks += gt_mask.shape[0]
325
 
@@ -338,16 +340,22 @@ class LISAForCausalLM(LlavaLlamaForCausalLM):
338
  }
339
 
340
  def evaluate(
341
- self,
342
- images_clip,
343
- images,
344
- input_ids,
345
- resize_list,
346
- original_size_list,
347
- max_new_tokens=32,
348
- tokenizer=None,
 
 
349
  ):
350
  with torch.no_grad():
 
 
 
 
351
  outputs = self.generate(
352
  images=images_clip,
353
  input_ids=input_ids,
@@ -356,11 +364,13 @@ class LISAForCausalLM(LlavaLlamaForCausalLM):
356
  output_hidden_states=True,
357
  return_dict_in_generate=True,
358
  )
 
359
  output_hidden_states = outputs.hidden_states[-1]
360
  output_ids = outputs.sequences
361
 
362
  seg_token_mask = output_ids[:, 1:] == self.seg_token_idx
363
  # hack for IMAGE_TOKEN_INDEX (we suppose that there is only one image, and it is in the front)
 
364
  seg_token_mask = torch.cat(
365
  [
366
  torch.zeros((seg_token_mask.shape[0], 255)).bool().cuda(),
@@ -368,20 +378,25 @@ class LISAForCausalLM(LlavaLlamaForCausalLM):
368
  ],
369
  dim=1,
370
  )
 
371
 
372
  hidden_states = []
373
 
374
  assert len(self.model.text_hidden_fcs) == 1
375
  hidden_states.append(self.model.text_hidden_fcs[0](output_hidden_states))
376
 
 
377
  last_hidden_state = torch.stack(hidden_states, dim=-1).sum(dim=-1)
 
378
  pred_embeddings = last_hidden_state[seg_token_mask]
379
 
380
  seg_token_counts = seg_token_mask.int().sum(-1) # [bs, ]
381
  seg_token_offset = seg_token_counts.cumsum(-1)
 
382
  seg_token_offset = torch.cat(
383
  [torch.zeros(1).long().cuda(), seg_token_offset], dim=0
384
  )
 
385
 
386
  pred_embeddings_ = []
387
  for i in range(len(seg_token_offset) - 1):
@@ -389,11 +404,25 @@ class LISAForCausalLM(LlavaLlamaForCausalLM):
389
  pred_embeddings_.append(pred_embeddings[start_i:end_i])
390
  pred_embeddings = pred_embeddings_
391
 
392
- image_embeddings = self.get_visual_embs(images)
 
 
 
 
 
 
 
 
 
 
 
 
 
393
 
394
  multimask_output = False
395
  pred_masks = []
396
  for i in range(len(pred_embeddings)):
 
397
  (
398
  sparse_embeddings,
399
  dense_embeddings,
@@ -403,8 +432,9 @@ class LISAForCausalLM(LlavaLlamaForCausalLM):
403
  masks=None,
404
  text_embeds=pred_embeddings[i].unsqueeze(1),
405
  )
406
-
407
  sparse_embeddings = sparse_embeddings.to(pred_embeddings[i].dtype)
 
408
  low_res_masks, iou_predictions = self.model.visual_model.mask_decoder(
409
  image_embeddings=image_embeddings[i].unsqueeze(0),
410
  image_pe=self.model.visual_model.prompt_encoder.get_dense_pe(),
@@ -412,11 +442,14 @@ class LISAForCausalLM(LlavaLlamaForCausalLM):
412
  dense_prompt_embeddings=dense_embeddings,
413
  multimask_output=multimask_output,
414
  )
 
415
  pred_mask = self.model.visual_model.postprocess_masks(
416
  low_res_masks,
417
  input_size=resize_list[i],
418
  original_size=original_size_list[i],
419
  )
 
420
  pred_masks.append(pred_mask[:, 0])
421
 
 
422
  return output_ids, pred_masks
 
7
  from .llava.model.language_model.llava_llama import (LlavaLlamaForCausalLM, LlavaLlamaModel)
8
  from .segment_anything import build_sam_vit_h
9
 
10
+ embedding_dict = {}
11
+
12
 
13
  def dice_loss(
14
+ inputs: torch.Tensor,
15
+ targets: torch.Tensor,
16
+ num_masks: float,
17
+ scale=1000, # 100000.0,
18
+ eps=1e-6,
19
  ):
20
  """
21
  Compute the DICE loss, similar to generalized IOU for masks
 
37
 
38
 
39
  def sigmoid_ce_loss(
40
+ inputs: torch.Tensor,
41
+ targets: torch.Tensor,
42
+ num_masks: float,
43
  ):
44
  """
45
  Args:
 
58
 
59
  class LisaMetaModel:
60
  def __init__(
61
+ self,
62
+ config,
63
+ **kwargs,
64
  ):
65
  super(LisaMetaModel, self).__init__(config)
66
 
 
100
 
101
  class LisaModel(LisaMetaModel, LlavaLlamaModel):
102
  def __init__(
103
+ self,
104
+ config,
105
+ **kwargs,
106
  ):
107
  super(LisaModel, self).__init__(config, **kwargs)
108
 
 
119
 
120
  class LISAForCausalLM(LlavaLlamaForCausalLM):
121
  def __init__(
122
+ self,
123
+ config,
124
+ **kwargs,
125
  ):
126
  if not hasattr(config, "train_mask_decoder"):
127
  config.mm_use_im_start_end = kwargs.pop("use_mm_start_end", True)
 
133
  self.bce_loss_weight = kwargs.pop("bce_loss_weight", None)
134
  else:
135
  config.mm_vision_tower = config.vision_tower
136
+
137
  self.seg_token_idx = kwargs.pop("seg_token_idx")
138
 
139
  super().__init__(config)
 
164
  return self.model_forward(**kwargs)
165
 
166
  def model_forward(
167
+ self,
168
+ images: torch.FloatTensor,
169
+ images_clip: torch.FloatTensor,
170
+ input_ids: torch.LongTensor,
171
+ labels: torch.LongTensor,
172
+ attention_masks: torch.LongTensor,
173
+ offset: torch.LongTensor,
174
+ masks_list: List[torch.FloatTensor],
175
+ label_list: List[torch.Tensor],
176
+ resize_list: List[tuple],
177
+ inference: bool = False,
178
+ **kwargs,
179
  ):
180
  image_embeddings = self.get_visual_embs(images)
181
  batch_size = image_embeddings.shape[0]
 
311
  pred_mask = pred_masks[batch_idx]
312
 
313
  assert (
314
+ gt_mask.shape[0] == pred_mask.shape[0]
315
  ), "gt_mask.shape: {}, pred_mask.shape: {}".format(
316
  gt_mask.shape, pred_mask.shape
317
  )
318
  mask_bce_loss += (
319
+ sigmoid_ce_loss(pred_mask, gt_mask, num_masks=gt_mask.shape[0])
320
+ * gt_mask.shape[0]
321
  )
322
  mask_dice_loss += (
323
+ dice_loss(pred_mask, gt_mask, num_masks=gt_mask.shape[0])
324
+ * gt_mask.shape[0]
325
  )
326
  num_masks += gt_mask.shape[0]
327
 
 
340
  }
341
 
342
  def evaluate(
343
+ self,
344
+ images_clip,
345
+ images,
346
+ input_ids,
347
+ resize_list,
348
+ original_size_list,
349
+ max_new_tokens=32,
350
+ tokenizer=None,
351
+ model_logger=None,
352
+ embedding_key=None
353
  ):
354
  with torch.no_grad():
355
+ if model_logger is None:
356
+ import logging
357
+ model_logger = logging
358
+ model_logger.debug("start output generation...")
359
  outputs = self.generate(
360
  images=images_clip,
361
  input_ids=input_ids,
 
364
  output_hidden_states=True,
365
  return_dict_in_generate=True,
366
  )
367
+ model_logger.debug("done output generation...")
368
  output_hidden_states = outputs.hidden_states[-1]
369
  output_ids = outputs.sequences
370
 
371
  seg_token_mask = output_ids[:, 1:] == self.seg_token_idx
372
  # hack for IMAGE_TOKEN_INDEX (we suppose that there is only one image, and it is in the front)
373
+ model_logger.debug(f"start torch.cat to seg_token_mask...")
374
  seg_token_mask = torch.cat(
375
  [
376
  torch.zeros((seg_token_mask.shape[0], 255)).bool().cuda(),
 
378
  ],
379
  dim=1,
380
  )
381
+ model_logger.debug("done torch.cat to seg_token_mask...")
382
 
383
  hidden_states = []
384
 
385
  assert len(self.model.text_hidden_fcs) == 1
386
  hidden_states.append(self.model.text_hidden_fcs[0](output_hidden_states))
387
 
388
+ model_logger.debug("start torch.stack to last_hidden_state...")
389
  last_hidden_state = torch.stack(hidden_states, dim=-1).sum(dim=-1)
390
+ model_logger.debug("done torch.stack to last_hidden_state...")
391
  pred_embeddings = last_hidden_state[seg_token_mask]
392
 
393
  seg_token_counts = seg_token_mask.int().sum(-1) # [bs, ]
394
  seg_token_offset = seg_token_counts.cumsum(-1)
395
+ model_logger.debug(f"start torch.cat to seg_token_offset...")
396
  seg_token_offset = torch.cat(
397
  [torch.zeros(1).long().cuda(), seg_token_offset], dim=0
398
  )
399
+ model_logger.debug("done torch.cat to seg_token_offset...")
400
 
401
  pred_embeddings_ = []
402
  for i in range(len(seg_token_offset) - 1):
 
404
  pred_embeddings_.append(pred_embeddings[start_i:end_i])
405
  pred_embeddings = pred_embeddings_
406
 
407
+ model_logger.debug(f"start get_visual_embs to image_embeddings with embedding_key {embedding_key}.")
408
+
409
+ if embedding_key is None:
410
+ image_embeddings = self.get_visual_embs(images)
411
+ else:
412
+ try:
413
+ image_embeddings = embedding_dict[embedding_key]
414
+ except KeyError:
415
+ model_logger.debug(f"embedding_key {embedding_key} not in embedding_dict, creating embedding now!")
416
+ image_embeddings = self.get_visual_embs(images)
417
+ embedding_dict[embedding_key] = image_embeddings
418
+ model_logger.debug(f"image embedding added in embedding_dict with embedding_key {embedding_key}!")
419
+
420
+ model_logger.debug("done get_visual_embs to image_embeddings...")
421
 
422
  multimask_output = False
423
  pred_masks = []
424
  for i in range(len(pred_embeddings)):
425
+ model_logger.debug(f"start ({i}nth time) visual_model.prompt_encoder to sparse/dense")
426
  (
427
  sparse_embeddings,
428
  dense_embeddings,
 
432
  masks=None,
433
  text_embeds=pred_embeddings[i].unsqueeze(1),
434
  )
435
+ model_logger.debug(f"done ({i}nth) visual_model.prompt_encoder to sparse/dense, start sparse2sparse")
436
  sparse_embeddings = sparse_embeddings.to(pred_embeddings[i].dtype)
437
+ model_logger.debug(f"done ({i}nth) sparse2sparse, start visual_model.mask_decoder")
438
  low_res_masks, iou_predictions = self.model.visual_model.mask_decoder(
439
  image_embeddings=image_embeddings[i].unsqueeze(0),
440
  image_pe=self.model.visual_model.prompt_encoder.get_dense_pe(),
 
442
  dense_prompt_embeddings=dense_embeddings,
443
  multimask_output=multimask_output,
444
  )
445
+ model_logger.debug(f"done ({i}nth) visual_model.mask_decoder, start postprocess_masks")
446
  pred_mask = self.model.visual_model.postprocess_masks(
447
  low_res_masks,
448
  input_size=resize_list[i],
449
  original_size=original_size_list[i],
450
  )
451
+ model_logger.debug(f"done ({i}nth) postprocess_masks")
452
  pred_masks.append(pred_mask[:, 0])
453
 
454
+ model_logger.debug(f"env evaluate! ")
455
  return output_ids, pred_masks
lisa_on_cuda/utils/app_helpers.py CHANGED
@@ -211,8 +211,12 @@ def get_inference_model_by_args(args_to_parse):
211
  no_seg_out = placeholders["no_seg_out"]
212
 
213
  @session_logger.set_uuid_logging
214
- def inference(input_str: str, input_image: str | np.ndarray, internal_logger: logging = None):
215
-
 
 
 
 
216
  if internal_logger is None:
217
  internal_logger = app_logger
218
 
@@ -255,7 +259,7 @@ def get_inference_model_by_args(args_to_parse):
255
  image_np = cv2.imread(input_image)
256
  image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
257
  original_size_list = [image_np.shape[:2]]
258
-
259
  image_clip = (
260
  clip_image_processor.preprocess(image_np, return_tensors="pt")[
261
  "pixel_values"
@@ -263,24 +267,27 @@ def get_inference_model_by_args(args_to_parse):
263
  .unsqueeze(0)
264
  .cuda()
265
  )
 
266
  internal_logger.info(f"image_clip type: {type(image_clip)}.")
267
  image_clip = set_image_precision_by_args(image_clip, args_to_parse.precision)
268
 
269
  image = transform.apply_image(image_np)
270
  resize_list = [image.shape[:2]]
271
 
 
272
  image = (
273
  preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
274
  .unsqueeze(0)
275
  .cuda()
276
  )
277
- internal_logger.info(f"image_clip type: {type(image_clip)}.")
278
  image = set_image_precision_by_args(image, args_to_parse.precision)
279
 
280
  input_ids = tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
281
  input_ids = input_ids.unsqueeze(0).cuda()
282
 
283
- internal_logger.info("start model evaluation...")
 
284
  output_ids, pred_masks = model.evaluate(
285
  image_clip,
286
  image,
@@ -289,6 +296,8 @@ def get_inference_model_by_args(args_to_parse):
289
  original_size_list,
290
  max_new_tokens=512,
291
  tokenizer=tokenizer,
 
 
292
  )
293
  internal_logger.info("model evaluation done, start token decoding...")
294
  output_ids = output_ids[0][output_ids[0] != utils.IMAGE_TOKEN_INDEX]
@@ -347,6 +356,25 @@ def get_gradio_interface(
347
  )
348
 
349
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
  if __name__ == '__main__':
351
  parsed_args = parse_args([])
352
  print("arrrrg:", parsed_args)
 
211
  no_seg_out = placeholders["no_seg_out"]
212
 
213
  @session_logger.set_uuid_logging
214
+ def inference(
215
+ input_str: str,
216
+ input_image: str | np.ndarray,
217
+ internal_logger: logging = None,
218
+ embedding_key: str = None
219
+ ):
220
  if internal_logger is None:
221
  internal_logger = app_logger
222
 
 
259
  image_np = cv2.imread(input_image)
260
  image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
261
  original_size_list = [image_np.shape[:2]]
262
+ app_logger.debug("start clip_image_processor.preprocess")
263
  image_clip = (
264
  clip_image_processor.preprocess(image_np, return_tensors="pt")[
265
  "pixel_values"
 
267
  .unsqueeze(0)
268
  .cuda()
269
  )
270
+ app_logger.debug("done clip_image_processor.preprocess")
271
  internal_logger.info(f"image_clip type: {type(image_clip)}.")
272
  image_clip = set_image_precision_by_args(image_clip, args_to_parse.precision)
273
 
274
  image = transform.apply_image(image_np)
275
  resize_list = [image.shape[:2]]
276
 
277
+ internal_logger.debug(f"starting preprocess image: {type(image_clip)}.")
278
  image = (
279
  preprocess(torch.from_numpy(image).permute(2, 0, 1).contiguous())
280
  .unsqueeze(0)
281
  .cuda()
282
  )
283
+ internal_logger.info(f"done preprocess image:{type(image)}, image_clip type: {type(image_clip)}.")
284
  image = set_image_precision_by_args(image, args_to_parse.precision)
285
 
286
  input_ids = tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
287
  input_ids = input_ids.unsqueeze(0).cuda()
288
 
289
+ embedding_key = get_hash_array(embedding_key, image, internal_logger)
290
+ internal_logger.info(f"start model evaluation with embedding_key {embedding_key}.")
291
  output_ids, pred_masks = model.evaluate(
292
  image_clip,
293
  image,
 
296
  original_size_list,
297
  max_new_tokens=512,
298
  tokenizer=tokenizer,
299
+ model_logger=internal_logger,
300
+ embedding_key=embedding_key
301
  )
302
  internal_logger.info("model evaluation done, start token decoding...")
303
  output_ids = output_ids[0][output_ids[0] != utils.IMAGE_TOKEN_INDEX]
 
356
  )
357
 
358
 
359
+ def get_hash_array(embedding_key: str, arr: np.ndarray | torch.Tensor, model_logger: logging):
360
+ from base64 import b64encode
361
+ from hashlib import sha256
362
+
363
+ model_logger.debug(f"embedding_key {embedding_key} is None? {embedding_key is None}.")
364
+ if embedding_key is None:
365
+ img2hash = arr
366
+ if isinstance(arr, torch.Tensor):
367
+ model_logger.debug("images variable is a Tensor, start converting back to numpy")
368
+ img2hash = arr.numpy(force=True)
369
+ model_logger.debug("done Tensor converted back to numpy")
370
+ model_logger.debug("start image hashing")
371
+ img2hash_fn = sha256(img2hash)
372
+ embedding_key = b64encode(img2hash_fn.digest())
373
+ embedding_key = embedding_key.decode("utf-8")
374
+ model_logger.debug(f"done image hashing, now embedding_key is {embedding_key}.")
375
+ return embedding_key
376
+
377
+
378
  if __name__ == '__main__':
379
  parsed_args = parse_args([])
380
  print("arrrrg:", parsed_args)
scripts/baremetal_entrypoint.sh ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ if [ -z "${WORKDIR}" ];
4
+ then
5
+ WORKDIR=$1
6
+ fi
7
+
8
+ if [ -z "${XDG_CACHE_HOME}" ];
9
+ then
10
+ XDG_CACHE_HOME=$HOME/.cache
11
+ fi
12
+
13
+ echo "WORKDIR: ${WORKDIR} ..."
14
+ echo "XDG_CACHE_HOME: ${XDG_CACHE_HOME} ..."
15
+
16
+ cd ${WORKDIR}
17
+
18
+ if [ ! -f "${WORKDIR}/.env_source" ];
19
+ then
20
+ echo "missing ${WORKDIR}/.env_source file, exit now..."
21
+ exit 1
22
+ fi
23
+
24
+ source ${WORKDIR}/.env_source
25
+ echo "FOLDERS_MAP: ${FOLDERS_MAP} ..."
26
+
27
+ which python
28
+ python --version
29
+ python ${WORKDIR}/scripts/create_folders_and_variables_if_not_exists.py
30
+
31
+ cd ${WORKDIR}/static
32
+ npm install -g npm pnpm
33
+ pnpm install
34
+ pnpm build
35
+ pnpm tailwindcss -i ${WORKDIR}/static/src/input.css -o ${WORKDIR}/static/dist/output.css
36
+ cd ${WORKDIR}
37
+
38
+ chmod +x ${WORKDIR}/scripts/entrypoint.sh
39
+ bash ${WORKDIR}/scripts/entrypoint.sh "baremetal"
40
+
41
+ exit 0
scripts/create_folders_and_variables_if_not_exists.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ from pathlib import Path
5
+
6
+
7
+ def stats_pathname(pathname: Path | str):
8
+ current_pathname = Path(pathname)
9
+ return current_pathname.is_dir()
10
+
11
+
12
+ def create_folder_if_not_exists(pathname: Path | str):
13
+ current_pathname = Path(pathname)
14
+ try:
15
+ print(f"Pathname exists? {current_pathname.exists()}, That's a folder? {current_pathname.is_dir()}...")
16
+ logging.info(f"Pathname exists? {current_pathname.exists()}, That's a folder? {current_pathname.is_dir()}...")
17
+ current_pathname.unlink(missing_ok=True)
18
+ except PermissionError as pe:
19
+ print(f"permission denied on removing pathname before folder creation:{pe}.")
20
+ logging.error(f"permission denied on removing pathname before folder creation:{pe}.")
21
+ except IsADirectoryError as errdir:
22
+ print(f"that's a directory:{errdir}.")
23
+ logging.error(f"that's a directory:{errdir}.")
24
+
25
+ print(f"Creating pathname: {current_pathname} ...")
26
+ logging.info(f"Creating pathname: {current_pathname} ...")
27
+ current_pathname.mkdir(mode=0o770, parents=True, exist_ok=True)
28
+
29
+ print(f"assertion: pathname exists and is a folder: {current_pathname} ...")
30
+ logging.info(f"assertion: pathname exists and is a folder: {current_pathname} ...")
31
+ assert current_pathname.is_dir()
32
+
33
+
34
+ if __name__ == '__main__':
35
+ folders_string = os.getenv("FOLDERS_MAP")
36
+ try:
37
+ folders_dict = json.loads(folders_string)
38
+ for folder_env_ref, folder_env_path in folders_dict.items():
39
+ print(f"folder_env_ref:{folder_env_ref}, folder_env_path:{folder_env_path}.")
40
+ logging.info(f"folder_env_ref:{folder_env_ref}, folder_env_path:{folder_env_path}.")
41
+ create_folder_if_not_exists(folder_env_path)
42
+ print("========")
43
+ assert os.getenv(folder_env_ref) == folder_env_path
44
+ except (json.JSONDecodeError, TypeError) as jde:
45
+ print(f"jde:{jde}.")
46
+ logging.error(f"jde:{jde}.")
47
+ print("double check your variables, e.g. for mispelling like 'FOLDER_MAP'...")
48
+ logging.info("double check your variables, e.g. for mispelling like 'FOLDER_MAP' instead than 'FOLDERS_MAP'...")
49
+ for k_env, v_env in dict(os.environ).items():
50
+ print(f"{k_env}, v_env:{v_env}.")
51
+ logging.info(f"{k_env}, v_env:{v_env}.")
scripts/entrypoint.sh CHANGED
@@ -1,7 +1,11 @@
1
  #!/usr/bin/env bash
2
 
3
- WORKDIR="/var/task"
4
- XDG_CACHE_HOME="/data"
 
 
 
 
5
  MPLCONFIGDIR=${XDG_CACHE_HOME}/.cache/matplotlib
6
  TRANSFORMERS_CACHE=${XDG_CACHE_HOME}/.cache/transformers
7
  FASTAPI_STATIC=${XDG_CACHE_HOME}/static
@@ -45,13 +49,22 @@ echo "WORKDIR - /var/task"
45
  ls -l ${WORKDIR}
46
 
47
  echo "XDG_CACHE_HOME - /data"
48
- find ${XDG_CACHE_HOME}
 
 
 
 
49
 
50
  CUDA_VISIBLE_DEVICES=$(nvidia-smi --query-gpu=memory.free,index --format=csv,nounits,noheader | sort -nr | head -1 | awk '{ print $NF }')
51
  echo "calculated CUDA_VISIBLE_DEVICES env variable: ${CUDA_VISIBLE_DEVICES}."
52
  export CUDA_VISIBLE_DEVICES
53
 
54
- echo "running command 'uvicorn app.main:app --host 0.0.0.0 --port 7860'..."
55
- uvicorn app.main:app --host 0.0.0.0 --port 7860
 
 
 
 
 
56
 
57
  exit 0
 
1
  #!/usr/bin/env bash
2
 
3
+ if [ -z "$1" ];
4
+ then
5
+ echo "use no \$1 variable, set WORKDIR and XDG_CACHE_HOME as for docker container mode"
6
+ WORKDIR="/var/task"
7
+ XDG_CACHE_HOME="/data"
8
+ fi
9
  MPLCONFIGDIR=${XDG_CACHE_HOME}/.cache/matplotlib
10
  TRANSFORMERS_CACHE=${XDG_CACHE_HOME}/.cache/transformers
11
  FASTAPI_STATIC=${XDG_CACHE_HOME}/static
 
49
  ls -l ${WORKDIR}
50
 
51
  echo "XDG_CACHE_HOME - /data"
52
+ if [ -z "$1" ];
53
+ then
54
+ echo "use no \$1 variable, show folder ${XDG_CACHE_HOME} content"
55
+ find ${XDG_CACHE_HOME}
56
+ fi
57
 
58
  CUDA_VISIBLE_DEVICES=$(nvidia-smi --query-gpu=memory.free,index --format=csv,nounits,noheader | sort -nr | head -1 | awk '{ print $NF }')
59
  echo "calculated CUDA_VISIBLE_DEVICES env variable: ${CUDA_VISIBLE_DEVICES}."
60
  export CUDA_VISIBLE_DEVICES
61
 
62
+ PYTHONFILE="lisa_on_cuda.app.main"
63
+ if [ -z "$1" ];
64
+ then
65
+ PYTHONFILE="app.main"
66
+ fi
67
+ echo "running command 'uvicorn ${PYTHONFILE}:app --host 0.0.0.0 --port 7860'..."
68
+ uvicorn ${PYTHONFILE}:app --host 0.0.0.0 --port 7860
69
 
70
  exit 0