Hjgugugjhuhjggg commited on
Commit
37f7b00
verified
1 Parent(s): 3d6c891

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -10
app.py CHANGED
@@ -56,7 +56,6 @@ def download_model_from_huggingface(model_name):
56
  try:
57
  response = requests.get(url, headers=headers)
58
  if response.status_code == 200:
59
- # Enlace a los archivos del modelo
60
  model_files = [
61
  "pytorch_model.bin",
62
  "config.json",
@@ -85,17 +84,13 @@ async def predict(request: DownloadModelRequest):
85
  "model.safetensors",
86
  ]
87
 
88
- # Verificar si los archivos del modelo est谩n en GCS
89
  model_files_exist = all(gcs_handler.file_exists(f"{model_prefix}/{file}") for file in model_files)
90
 
91
  if not model_files_exist:
92
- # Descargar el modelo si no existe
93
  download_model_from_huggingface(model_prefix)
94
 
95
- # Descargar los archivos necesarios
96
  model_files_streams = {file: gcs_handler.download_file(f"{model_prefix}/{file}") for file in model_files if gcs_handler.file_exists(f"{model_prefix}/{file}")}
97
 
98
- # Asegurar que los archivos esenciales est茅n presentes
99
  config_stream = model_files_streams.get("config.json")
100
  tokenizer_stream = model_files_streams.get("tokenizer.json")
101
  model_stream = model_files_streams.get("pytorch_model.bin")
@@ -103,7 +98,6 @@ async def predict(request: DownloadModelRequest):
103
  if not config_stream or not tokenizer_stream or not model_stream:
104
  raise HTTPException(status_code=500, detail="Required model files missing.")
105
 
106
- # Guardar los archivos en directorios temporales
107
  with tempfile.TemporaryDirectory() as tmp_dir:
108
  config_path = os.path.join(tmp_dir, "config.json")
109
  tokenizer_path = os.path.join(tmp_dir, "tokenizer.json")
@@ -116,14 +110,11 @@ async def predict(request: DownloadModelRequest):
116
  with open(model_path, 'wb') as f:
117
  f.write(model_stream.read())
118
 
119
- # Cargar el modelo y el tokenizador desde los archivos temporales
120
- model = AutoModelForCausalLM.from_pretrained(tmp_dir)
121
  tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
122
 
123
- # Crear un pipeline para la tarea deseada
124
  pipeline_ = pipeline(request.pipeline_task, model=model, tokenizer=tokenizer)
125
 
126
- # Realizar la predicci贸n
127
  result = pipeline_(request.input_text)
128
 
129
  return {"response": result}
 
56
  try:
57
  response = requests.get(url, headers=headers)
58
  if response.status_code == 200:
 
59
  model_files = [
60
  "pytorch_model.bin",
61
  "config.json",
 
84
  "model.safetensors",
85
  ]
86
 
 
87
  model_files_exist = all(gcs_handler.file_exists(f"{model_prefix}/{file}") for file in model_files)
88
 
89
  if not model_files_exist:
 
90
  download_model_from_huggingface(model_prefix)
91
 
 
92
  model_files_streams = {file: gcs_handler.download_file(f"{model_prefix}/{file}") for file in model_files if gcs_handler.file_exists(f"{model_prefix}/{file}")}
93
 
 
94
  config_stream = model_files_streams.get("config.json")
95
  tokenizer_stream = model_files_streams.get("tokenizer.json")
96
  model_stream = model_files_streams.get("pytorch_model.bin")
 
98
  if not config_stream or not tokenizer_stream or not model_stream:
99
  raise HTTPException(status_code=500, detail="Required model files missing.")
100
 
 
101
  with tempfile.TemporaryDirectory() as tmp_dir:
102
  config_path = os.path.join(tmp_dir, "config.json")
103
  tokenizer_path = os.path.join(tmp_dir, "tokenizer.json")
 
110
  with open(model_path, 'wb') as f:
111
  f.write(model_stream.read())
112
 
113
+ model = AutoModelForCausalLM.from_pretrained(tmp_dir, from_tf=False)
 
114
  tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
115
 
 
116
  pipeline_ = pipeline(request.pipeline_task, model=model, tokenizer=tokenizer)
117
 
 
118
  result = pipeline_(request.input_text)
119
 
120
  return {"response": result}