alessandro trinca tornidor commited on
Commit
cc47f6d
·
1 Parent(s): 374ff81

feat: start refactor using samgis-core = 3.0.8

Browse files
app.py CHANGED
@@ -1,23 +1,47 @@
 
1
  import logging
2
  import os
3
  import sys
4
  from pathlib import Path
5
 
6
  import gradio as gr
 
7
  import uvicorn
 
8
  from fastapi import FastAPI
9
  from fastapi.responses import FileResponse
10
  from fastapi.staticfiles import StaticFiles
11
  from fastapi.templating import Jinja2Templates
 
 
 
12
  from spaces import GPU as SPACES_GPU
13
 
 
14
  from lisa_on_cuda import routes
15
- from lisa_on_cuda.utils import app_helpers, session_logger, utils, create_folders_and_variables_if_not_exists
16
- from lisa_on_cuda.utils import frontend_builder
17
 
18
 
19
- LOGLEVEL = os.getenv('LOGLEVEL', 'INFO').upper()
20
- session_logger.change_logging(LOGLEVEL)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  CUSTOM_GRADIO_PATH = "/"
23
  CUSTOM_STATIC_PATH = "/static"
@@ -25,37 +49,33 @@ FASTAPI_TITLE = "lisa_app"
25
  app = FastAPI(title=FASTAPI_TITLE, version="1.0")
26
  app.include_router(routes.router)
27
 
28
- fastapi_dict_folder = Path(utils.FASTAPI_STATIC) / "dist"
29
  frontend_builder.build_frontend(
30
- project_root_folder=frontend_builder.env_project_root_folder,
31
- input_css_path=frontend_builder.env_input_css_path,
32
- output_dist_folder=fastapi_dict_folder
33
  )
34
- create_folders_and_variables_if_not_exists.folders_creation()
35
-
36
- logging.info("build_frontend ok!")
37
 
38
- os.makedirs(utils.FASTAPI_STATIC, exist_ok=True)
39
- app.mount(CUSTOM_STATIC_PATH, StaticFiles(directory=fastapi_dict_folder), name="static")
40
 
41
 
42
  @app.get(CUSTOM_STATIC_PATH)
43
  async def static() -> FileResponse:
44
- return FileResponse(path=str(fastapi_dict_folder / "index.html"), media_type="text/html")
45
 
46
 
47
  templates = Jinja2Templates(directory="templates")
48
 
49
 
50
- app_helpers.app_logger.info(f"sys.argv:{sys.argv}.")
51
  args = app_helpers.parse_args([])
52
- app_helpers.app_logger.info(f"prepared default arguments:{args}.")
53
  inference_fn = app_helpers.get_inference_model_by_args(args, inference_decorator=SPACES_GPU)
54
- app_helpers.app_logger.info(f"prepared inference_fn function:{inference_fn.__name__}, creating gradio interface...")
55
- io = app_helpers.get_gradio_interface(inference_fn)
56
- app_helpers.app_logger.info("created gradio interface")
57
  app = gr.mount_gradio_app(app, io, path=CUSTOM_GRADIO_PATH)
58
- app_helpers.app_logger.info("mounted gradio app within fastapi")
59
 
60
 
61
  if __name__ == '__main__':
 
1
+ import json
2
  import logging
3
  import os
4
  import sys
5
  from pathlib import Path
6
 
7
  import gradio as gr
8
+ import structlog
9
  import uvicorn
10
+ from dotenv import load_dotenv
11
  from fastapi import FastAPI
12
  from fastapi.responses import FileResponse
13
  from fastapi.staticfiles import StaticFiles
14
  from fastapi.templating import Jinja2Templates
15
+ from samgis_core.utilities import create_folders_if_not_exists
16
+ from samgis_core.utilities import frontend_builder
17
+ from samgis_core.utilities.session_logger import setup_logging
18
  from spaces import GPU as SPACES_GPU
19
 
20
+ from lisa_on_cuda.utils import app_helpers
21
  from lisa_on_cuda import routes
 
 
22
 
23
 
24
+ load_dotenv()
25
+ project_root_folder = Path(globals().get("__file__", "./_")).absolute().parent
26
+ workdir = os.getenv("WORKDIR", project_root_folder)
27
+ model_folder = Path(project_root_folder / "machine_learning_models")
28
+
29
+ log_level = os.getenv("LOG_LEVEL", "INFO")
30
+ setup_logging(log_level=log_level)
31
+ app_logger = structlog.stdlib.get_logger()
32
+ app_logger.info(f"PROJECT_ROOT_FOLDER:{project_root_folder}, WORKDIR:{workdir}.")
33
+
34
+ folders_map = os.getenv("FOLDERS_MAP", "{}")
35
+ markdown_text = os.getenv("MARKDOWN_TEXT", "")
36
+ examples_text_list = os.getenv("EXAMPLES_TEXT_LIST", "").split("\n")
37
+ example_body = json.loads(os.getenv("EXAMPLE_BODY", "{}"))
38
+ mount_gradio_app = bool(os.getenv("MOUNT_GRADIO_APP", ""))
39
+
40
+ static_dist_folder = Path(project_root_folder) / "static" / "dist"
41
+ input_css_path = os.getenv("INPUT_CSS_PATH", "src/input.css")
42
+
43
+
44
+ create_folders_if_not_exists.folders_creation(folders_map)
45
 
46
  CUSTOM_GRADIO_PATH = "/"
47
  CUSTOM_STATIC_PATH = "/static"
 
49
  app = FastAPI(title=FASTAPI_TITLE, version="1.0")
50
  app.include_router(routes.router)
51
 
 
52
  frontend_builder.build_frontend(
53
+ project_root_folder=workdir,
54
+ input_css_path=input_css_path,
55
+ output_dist_folder=static_dist_folder
56
  )
57
+ app_logger.info("build_frontend ok!")
 
 
58
 
59
+ app.mount(CUSTOM_STATIC_PATH, StaticFiles(directory=static_dist_folder), name="static")
 
60
 
61
 
62
  @app.get(CUSTOM_STATIC_PATH)
63
  async def static() -> FileResponse:
64
+ return FileResponse(path=str(static_dist_folder / "index.html"), media_type="text/html")
65
 
66
 
67
  templates = Jinja2Templates(directory="templates")
68
 
69
 
70
+ app_logger.info(f"sys.argv:{sys.argv}.")
71
  args = app_helpers.parse_args([])
72
+ app_logger.info(f"prepared default arguments:{args}.")
73
  inference_fn = app_helpers.get_inference_model_by_args(args, inference_decorator=SPACES_GPU)
74
+ app_logger.info(f"prepared inference_fn function:{inference_fn.__name__}, creating gradio interface...")
75
+ io = app_helpers.get_gradio_interface(inference_fn, args=args)
76
+ app_logger.info("created gradio interface")
77
  app = gr.mount_gradio_app(app, io, path=CUSTOM_GRADIO_PATH)
78
+ app_logger.info("mounted gradio app within fastapi")
79
 
80
 
81
  if __name__ == '__main__':
lisa_on_cuda/utils/app_helpers.py CHANGED
@@ -409,8 +409,15 @@ def get_inference_model_by_args(args_to_parse, internal_logger0: logging = None,
409
 
410
  @session_logger.set_uuid_logging
411
  def get_gradio_interface(
412
- fn_inference: Callable
 
413
  ):
 
 
 
 
 
 
414
  return gr.Interface(
415
  fn_inference,
416
  inputs=[
@@ -423,7 +430,7 @@ def get_gradio_interface(
423
  gr.Textbox(lines=1, placeholder=None, label="Text Output")
424
  ],
425
  title=constants.title,
426
- description=constants.description,
427
  article=constants.article,
428
  examples=constants.examples,
429
  allow_flagging="auto"
 
409
 
410
  @session_logger.set_uuid_logging
411
  def get_gradio_interface(
412
+ fn_inference: Callable,
413
+ args: str = None
414
  ):
415
+ description_and_demo_parameters = constants.description
416
+ if args is not None:
417
+ description_and_demo_parameters += "\n"
418
+ args_dict = {arg: getattr(args, arg) for arg in vars(args)}
419
+ for arg_k, arg_v in args_dict.items():
420
+ description_and_demo_parameters += "\n".join(f"{arg_k}: {arg_v};")
421
  return gr.Interface(
422
  fn_inference,
423
  inputs=[
 
430
  gr.Textbox(lines=1, placeholder=None, label="Text Output")
431
  ],
432
  title=constants.title,
433
+ description=description_and_demo_parameters,
434
  article=constants.article,
435
  examples=constants.examples,
436
  allow_flagging="auto"
lisa_on_cuda/utils/constants.py CHANGED
@@ -20,14 +20,12 @@ examples = [
20
  output_labels = ["Segmentation Output"]
21
 
22
  title = "LISA: Reasoning Segmentation via Large Language Model"
23
-
24
  description = """
25
  <font size=4>
26
- This is the online demo of LISA. \n
27
  If multiple users are using it at the same time, they will enter a queue, which may delay some time. \n
28
  **Note**: **Different prompts can lead to significantly varied results**. \n
29
  **Note**: Please try to **standardize** your input text prompts to **avoid ambiguity**, and also pay attention to whether the **punctuations** of the input are correct. \n
30
- **Note**: Current model is **LISA-13B-llama2-v0-explanatory**, and 4-bit quantization may impair text-generation quality. \n
31
  **Usage**: <br>
32
  &ensp;(1) To let LISA **segment something**, input prompt like: "Can you segment xxx in this image?", "What is xxx in this image? Please output segmentation mask."; <br>
33
  &ensp;(2) To let LISA **output an explanation**, input prompt like: "What is xxx in this image? Please output segmentation mask and explain why."; <br>
@@ -36,6 +34,8 @@ Hope you can enjoy our work!
36
  </font>
37
  """
38
 
 
 
39
  article = """
40
  <p style='text-align: center'>
41
  <a href='https://arxiv.org/abs/2308.00692' target='_blank'>
 
20
  output_labels = ["Segmentation Output"]
21
 
22
  title = "LISA: Reasoning Segmentation via Large Language Model"
 
23
  description = """
24
  <font size=4>
25
+ This is the online demo of LISA... \n
26
  If multiple users are using it at the same time, they will enter a queue, which may delay some time. \n
27
  **Note**: **Different prompts can lead to significantly varied results**. \n
28
  **Note**: Please try to **standardize** your input text prompts to **avoid ambiguity**, and also pay attention to whether the **punctuations** of the input are correct. \n
 
29
  **Usage**: <br>
30
  &ensp;(1) To let LISA **segment something**, input prompt like: "Can you segment xxx in this image?", "What is xxx in this image? Please output segmentation mask."; <br>
31
  &ensp;(2) To let LISA **output an explanation**, input prompt like: "What is xxx in this image? Please output segmentation mask and explain why."; <br>
 
34
  </font>
35
  """
36
 
37
+ demo_parameters = """This demo uses these parameters:"""
38
+
39
  article = """
40
  <p style='text-align: center'>
41
  <a href='https://arxiv.org/abs/2308.00692' target='_blank'>