bofenghuang commited on
Commit
efac88a
·
1 Parent(s): 4dc5e51
Files changed (1) hide show
  1. run_demo_openai.py +10 -5
run_demo_openai.py CHANGED
@@ -3,6 +3,7 @@ import warnings
3
 
4
  import gradio as gr
5
  import pytube as pt
 
6
  import torch
7
  import whisper
8
  from huggingface_hub import hf_hub_download, model_info
@@ -42,6 +43,13 @@ logger.info(f"Model will be loaded on device `{device}`")
42
  cached_models = {}
43
 
44
 
 
 
 
 
 
 
 
45
  def print_cuda_memory_info():
46
  used_mem, tot_mem = torch.cuda.mem_get_info()
47
  logger.info(
@@ -50,11 +58,8 @@ def print_cuda_memory_info():
50
 
51
 
52
  def print_memory_info():
53
- # todo
54
- if device == "cpu":
55
- pass
56
- else:
57
- print_cuda_memory_info()
58
 
59
 
60
  def maybe_load_cached_pipeline(model_name):
 
3
 
4
  import gradio as gr
5
  import pytube as pt
6
+ import psutil
7
  import torch
8
  import whisper
9
  from huggingface_hub import hf_hub_download, model_info
 
43
  cached_models = {}
44
 
45
 
46
+ def _print_memory_info():
47
+ memory = psutil.virtual_memory()
48
+ logger.info(
49
+ f"Memory: {memory.total / (1024 ** 3):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 ** 3):.2f}GB"
50
+ )
51
+
52
+
53
  def print_cuda_memory_info():
54
  used_mem, tot_mem = torch.cuda.mem_get_info()
55
  logger.info(
 
58
 
59
 
60
  def print_memory_info():
61
+ _print_memory_info()
62
+ print_cuda_memory_info()
 
 
 
63
 
64
 
65
  def maybe_load_cached_pipeline(model_name):