DHEIVER commited on
Commit
2f030e9
·
verified ·
1 Parent(s): aad7110

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -72
app.py CHANGED
@@ -13,15 +13,10 @@ from langchain.memory import ConversationBufferMemory
13
  from transformers import AutoTokenizer, pipeline
14
 
15
  # ===================================================================
16
- # CONFIGURAÇÃO RADICAL DE HARDWARE
17
  # ===================================================================
18
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
19
  TORCH_DTYPE = torch.bfloat16 if DEVICE == "cuda" else torch.float32
20
- MAX_MEMORY = "16GB" if DEVICE == "cpu" else None
21
-
22
- # ===================================================================
23
- # LISTA DE MODELOS OTIMIZADOS
24
- # ===================================================================
25
  LLM_MODELS = {
26
  "TinyLlama-1.1B-Chat": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
27
  "Phi-2": "microsoft/phi-2",
@@ -30,38 +25,43 @@ LLM_MODELS = {
30
  }
31
 
32
  # ===================================================================
33
- # NÚCLEO DO SISTEMA
34
  # ===================================================================
35
- class DocumentProcessor:
36
  @staticmethod
37
- def load_and_split(files, chunk_size=512, chunk_overlap=64):
38
- """Carrega e processa documentos com paralelismo extremo"""
 
 
 
39
  try:
40
  loaders = [PyPDFLoader(file.name) for file in files]
41
- return [page for loader in loaders for page in loader.load_and_split(
42
- RecursiveCharacterTextSplitter(
43
- chunk_size=chunk_size,
44
- chunk_overlap=chunk_overlap,
45
- separators=["\n\n", "\n", " ", ""]
46
- )
47
- )]
48
  except Exception as e:
49
- raise RuntimeError(f"FALHA CRÍTICA NO PROCESSAMENTO: {str(e)}")
50
 
51
- class VectorDBManager:
52
  @staticmethod
53
- def create(splits):
54
- """Cria vetorização com aceleração de hardware"""
 
 
 
55
  return Chroma.from_documents(
56
  documents=splits,
57
  embedding=HuggingFaceEmbeddings(),
58
- persist_directory="./chroma_db"
59
  )
60
 
61
- class LLMEngine:
62
  @staticmethod
63
- def initialize(model_name, temp=0.7, max_tokens=512):
64
- """Inicialização agressiva do modelo com otimizações de baixo nível"""
65
  try:
66
  tokenizer = AutoTokenizer.from_pretrained(LLM_MODELS[model_name])
67
 
@@ -80,86 +80,104 @@ class LLMEngine:
80
 
81
  return HuggingFacePipeline(pipeline=pipe)
82
  except KeyError:
83
- raise ValueError("MODELO NÃO SUPORTADO!")
84
  except Exception as e:
85
- raise RuntimeError(f"FALHA NUCLEAR NO MODELO: {str(e)}")
86
 
87
  # ===================================================================
88
  # INTERFACE DE COMBATE
89
  # ===================================================================
90
- def create_war_interface():
91
- with gr.Blocks(theme=gr.themes.Soft(), title="⚔️ PDF Assault v1.0") as warzone:
92
- state = gr.State({"db": None, "llm": None})
 
 
 
 
 
93
 
94
- # Zona de Upload
95
  with gr.Row(variant="panel"):
96
- file_upload = gr.Files(label="DOCUMENTOS ALVO", file_types=[".pdf"])
97
- process_btn = gr.Button("ATAQUE!", variant="stop")
98
-
99
- # Controles Táticos
100
- with gr.Row(variant="compact"):
101
- model_selector = gr.Dropdown(list(LLM_MODELS.keys()), label="ARMA PRINCIPAL", value="TinyLlama-1.1B-Chat")
102
- temp_slider = gr.Slider(0, 1, 0.7, label="POTÊNCIA DE FOGO")
103
- deploy_btn = gr.Button("DEPLOY MODELO", variant="primary")
104
-
105
- # Campo de Batalha
106
- chatbot = gr.Chatbot(height=600, label="ZONA DE OPERAÇÕES")
107
- msg_input = gr.Textbox(label="COMANDO DE ATAQUE", placeholder="Insira o alvo...")
108
 
109
- # Sistema de Logs
110
- combat_log = gr.Textbox(label="RELATÓRIO DE COMBATE", interactive=False)
 
 
111
 
112
- # ===== Operações Militares =====
113
- @process_btn.click(inputs=[file_upload], outputs=[state, combat_log])
114
- def assault_documents(files):
115
  try:
116
- splits = DocumentProcessor.load_and_split(files)
117
- db = VectorDBManager.create(splits)
118
- return {"db": db, "llm": None}, "✅ DOCUMENTOS CAPTURADOS!"
 
 
 
 
 
119
  except Exception as e:
120
- return state.value, f"☠️ FALHA CATACLÍSMICA: {str(e)}"
121
 
122
- @deploy_btn.click(inputs=[model_selector, temp_slider, state], outputs=[state, combat_log])
123
- def deploy_model(model, temp, current_state):
124
  try:
125
- llm = LLMEngine.initialize(model, temp)
 
 
 
126
  current_state["llm"] = ConversationalRetrievalChain.from_llm(
127
  llm=llm,
128
- retriever=current_state["db"].as_retriever(),
129
  memory=ConversationBufferMemory(memory_key="chat_history", return_messages=True),
130
  return_source_documents=True
131
  )
132
- return current_state, f"🚀 {model} PRONTO PARA COMBATE!"
 
133
  except Exception as e:
134
- return current_state, f"💥 FALHA NO DEPLOY: {str(e)}"
135
 
136
  @msg_input.submit(inputs=[msg_input, chatbot, state], outputs=[msg_input, chatbot])
137
- def execute_combat(command, history, state):
138
- if not state["llm"]:
139
- return command, history + [(command, "⚠️ MODELO NÃO DEPLOYADO!")]
140
 
141
  try:
142
  result = state["llm"]({"question": command, "chat_history": history})
143
- response = f"🎯 RESPOSTA:\n{result['answer']}\n\n"
144
- response += "📌 INTEL:\n" + "\n".join(
145
- f"Página {doc.metadata['page']+1}: {doc.page_content[:75]}..."
146
  for doc in result["source_documents"][:3]
147
  )
148
- return "", history + [(command, response)]
 
 
149
  except Exception as e:
150
- return command, history + [(command, f"☢️ FALHA CRÍTICA: {str(e)}")]
 
 
 
 
151
 
152
- return warzone
153
 
154
  # ===================================================================
155
  # INICIALIZAÇÃO DO SISTEMA
156
  # ===================================================================
157
  if __name__ == "__main__":
158
- interface = create_war_interface()
159
- interface.launch(
160
  server_name="0.0.0.0",
161
  server_port=7860,
162
- share=False,
163
- auth=("admin", "combat123"),
164
- show_error=True
165
  )
 
13
  from transformers import AutoTokenizer, pipeline
14
 
15
  # ===================================================================
16
+ # CONFIGURAÇÃO DE COMBATE
17
  # ===================================================================
18
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
19
  TORCH_DTYPE = torch.bfloat16 if DEVICE == "cuda" else torch.float32
 
 
 
 
 
20
  LLM_MODELS = {
21
  "TinyLlama-1.1B-Chat": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
22
  "Phi-2": "microsoft/phi-2",
 
25
  }
26
 
27
  # ===================================================================
28
+ # NÚCLEO DE OPERAÇÕES ESPECIAIS
29
  # ===================================================================
30
+ class TacticalDocumentProcessor:
31
  @staticmethod
32
+ def neutralize_documents(files, chunk_size=512, chunk_overlap=64):
33
+ """Operação de desmantelamento de documentos hostis"""
34
+ if not files:
35
+ raise ValueError("ALVO NÃO IDENTIFICADO")
36
+
37
  try:
38
  loaders = [PyPDFLoader(file.name) for file in files]
39
+ splitter = RecursiveCharacterTextSplitter(
40
+ chunk_size=chunk_size,
41
+ chunk_overlap=chunk_overlap,
42
+ separators=["\n\n", "\n", "\. ", " ", ""]
43
+ )
44
+ return [page for loader in loaders for page in loader.load_and_split(splitter)]
 
45
  except Exception as e:
46
+ raise RuntimeError(f"FALHA NA OPERAÇÃO: {str(e)}")
47
 
48
+ class VectorStrikeSystem:
49
  @staticmethod
50
+ def deploy_vector_db(splits):
51
+ """Implante imediato de sistema de vetorização"""
52
+ if not splits:
53
+ raise ValueError("NENHUMA INTELECÇÃO DISPONÍVEL")
54
+
55
  return Chroma.from_documents(
56
  documents=splits,
57
  embedding=HuggingFaceEmbeddings(),
58
+ persist_directory="./combat_db"
59
  )
60
 
61
+ class LLMWeaponsSystem:
62
  @staticmethod
63
+ def activate_weapon(model_name, temp=0.7, max_tokens=512):
64
+ """Ativação de armamento cognitivo"""
65
  try:
66
  tokenizer = AutoTokenizer.from_pretrained(LLM_MODELS[model_name])
67
 
 
80
 
81
  return HuggingFacePipeline(pipeline=pipe)
82
  except KeyError:
83
+ raise ValueError("ARMA NÃO CATALOGADA")
84
  except Exception as e:
85
+ raise RuntimeError(f"FALHA NO SISTEMA DE ARMAMENTO: {str(e)}")
86
 
87
  # ===================================================================
88
  # INTERFACE DE COMBATE
89
  # ===================================================================
90
+ def deploy_combat_interface():
91
+ with gr.Blocks(theme=gr.themes.Soft(), title="🔥 WARBOT v2.0") as interface:
92
+ state = gr.State({
93
+ "db": None,
94
+ "llm": None,
95
+ "doc_status": False,
96
+ "model_status": False
97
+ })
98
 
99
+ # Zona de Controle Tático
100
  with gr.Row(variant="panel"):
101
+ with gr.Column(scale=1):
102
+ file_upload = gr.Files(label="CARREGAMENTO DE ALVOS", file_types=[".pdf"])
103
+ process_btn = gr.Button("INICIAR PROCESSAMENTO", variant="stop")
104
+ process_log = gr.Textbox(label="RELATÓRIO DE PROCESSAMENTO", interactive=False)
105
+
106
+ with gr.Column(scale=1):
107
+ model_selector = gr.Dropdown(list(LLM_MODELS.keys()), label="SELECIONE O ARMAMENTO", value="TinyLlama-1.1B-Chat")
108
+ temp_slider = gr.Slider(0, 1, 0.7, label="NÍVEL DE AGRESSIVIDADE")
109
+ deploy_btn = gr.Button("ATIVAR ARMAMENTO", variant="primary")
110
+ deploy_log = gr.Textbox(label="STATUS DO ARMAMENTO", interactive=False)
 
 
111
 
112
+ # Campo de Batalha Principal
113
+ chatbot = gr.Chatbot(height=650, label="ZONA DE ENGENHARIA COGNITIVA")
114
+ msg_input = gr.Textbox(label="INSIRA COMANDO DE ATAQUE", placeholder="Aguardando ordens...")
115
+ clear_btn = gr.Button("LIMPAR CAMPO DE BATALHA")
116
 
117
+ # ===== OPERAÇÕES TÁTICAS =====
118
+ @process_btn.click(inputs=[file_upload], outputs=[state, process_log])
119
+ def execute_processing(files):
120
  try:
121
+ splits = TacticalDocumentProcessor.neutralize_documents(files)
122
+ db = VectorStrikeSystem.deploy_vector_db(splits)
123
+ return {
124
+ "db": db,
125
+ "llm": None,
126
+ "doc_status": True,
127
+ "model_status": False
128
+ }, "✅ ALVOS PROCESSADOS COM SUCESSO"
129
  except Exception as e:
130
+ return state.value, f"☠️ FALHA CRÍTICA: {str(e)}"
131
 
132
+ @deploy_btn.click(inputs=[model_selector, temp_slider, state], outputs=[state, deploy_log])
133
+ def deploy_weapon(model, temp, current_state):
134
  try:
135
+ if not current_state["doc_status"]:
136
+ raise RuntimeError("ALVOS NÃO PROCESSADOS! EXECUTE A FASE 1")
137
+
138
+ llm = LLMWeaponsSystem.activate_weapon(model, temp)
139
  current_state["llm"] = ConversationalRetrievalChain.from_llm(
140
  llm=llm,
141
+ retriever=current_state["db"].as_retriever(search_kwargs={"k": 3}),
142
  memory=ConversationBufferMemory(memory_key="chat_history", return_messages=True),
143
  return_source_documents=True
144
  )
145
+ current_state["model_status"] = True
146
+ return current_state, f"🚀 {model} PRONTO PARA ENGAGEMENT"
147
  except Exception as e:
148
+ return current_state, f"💥 FALHA NO ARMAMENTO: {str(e)}"
149
 
150
  @msg_input.submit(inputs=[msg_input, chatbot, state], outputs=[msg_input, chatbot])
151
+ def execute_engagement(command, history, state):
152
+ if not state["model_status"]:
153
+ return command, history + [(command, "⚠️ ARMAMENTO NÃO ATIVADO")]
154
 
155
  try:
156
  result = state["llm"]({"question": command, "chat_history": history})
157
+ intel_report = "\n".join(
158
+ f"🔍 Pg {doc.metadata['page']+1}: {doc.page_content[:100]}..."
 
159
  for doc in result["source_documents"][:3]
160
  )
161
+ return "", history + [
162
+ (command, f"🎯 RESPOSTA:\n{result['answer']}\n\n📡 INTELIGÊNCIA:\n{intel_report}")
163
+ ]
164
  except Exception as e:
165
+ return command, history + [(command, f"☢️ FALHA OPERACIONAL: {str(e)}")]
166
+
167
+ @clear_btn.click(inputs=[], outputs=[chatbot])
168
+ def clear_battlefield():
169
+ return []
170
 
171
+ return interface
172
 
173
  # ===================================================================
174
  # INICIALIZAÇÃO DO SISTEMA
175
  # ===================================================================
176
  if __name__ == "__main__":
177
+ combat_system = deploy_combat_interface()
178
+ combat_system.launch(
179
  server_name="0.0.0.0",
180
  server_port=7860,
181
+ auth=("commander", "tactical123"),
182
+ share=True
 
183
  )