Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -13,15 +13,10 @@ from langchain.memory import ConversationBufferMemory
|
|
13 |
from transformers import AutoTokenizer, pipeline
|
14 |
|
15 |
# ===================================================================
|
16 |
-
# CONFIGURAÇÃO
|
17 |
# ===================================================================
|
18 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
19 |
TORCH_DTYPE = torch.bfloat16 if DEVICE == "cuda" else torch.float32
|
20 |
-
MAX_MEMORY = "16GB" if DEVICE == "cpu" else None
|
21 |
-
|
22 |
-
# ===================================================================
|
23 |
-
# LISTA DE MODELOS OTIMIZADOS
|
24 |
-
# ===================================================================
|
25 |
LLM_MODELS = {
|
26 |
"TinyLlama-1.1B-Chat": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
27 |
"Phi-2": "microsoft/phi-2",
|
@@ -30,38 +25,43 @@ LLM_MODELS = {
|
|
30 |
}
|
31 |
|
32 |
# ===================================================================
|
33 |
-
# NÚCLEO
|
34 |
# ===================================================================
|
35 |
-
class
|
36 |
@staticmethod
|
37 |
-
def
|
38 |
-
"""
|
|
|
|
|
|
|
39 |
try:
|
40 |
loaders = [PyPDFLoader(file.name) for file in files]
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
)]
|
48 |
except Exception as e:
|
49 |
-
raise RuntimeError(f"FALHA
|
50 |
|
51 |
-
class
|
52 |
@staticmethod
|
53 |
-
def
|
54 |
-
"""
|
|
|
|
|
|
|
55 |
return Chroma.from_documents(
|
56 |
documents=splits,
|
57 |
embedding=HuggingFaceEmbeddings(),
|
58 |
-
persist_directory="./
|
59 |
)
|
60 |
|
61 |
-
class
|
62 |
@staticmethod
|
63 |
-
def
|
64 |
-
"""
|
65 |
try:
|
66 |
tokenizer = AutoTokenizer.from_pretrained(LLM_MODELS[model_name])
|
67 |
|
@@ -80,86 +80,104 @@ class LLMEngine:
|
|
80 |
|
81 |
return HuggingFacePipeline(pipeline=pipe)
|
82 |
except KeyError:
|
83 |
-
raise ValueError("
|
84 |
except Exception as e:
|
85 |
-
raise RuntimeError(f"FALHA
|
86 |
|
87 |
# ===================================================================
|
88 |
# INTERFACE DE COMBATE
|
89 |
# ===================================================================
|
90 |
-
def
|
91 |
-
with gr.Blocks(theme=gr.themes.Soft(), title="
|
92 |
-
state = gr.State({
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
-
# Zona de
|
95 |
with gr.Row(variant="panel"):
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
chatbot = gr.Chatbot(height=600, label="ZONA DE OPERAÇÕES")
|
107 |
-
msg_input = gr.Textbox(label="COMANDO DE ATAQUE", placeholder="Insira o alvo...")
|
108 |
|
109 |
-
#
|
110 |
-
|
|
|
|
|
111 |
|
112 |
-
# =====
|
113 |
-
@process_btn.click(inputs=[file_upload], outputs=[state,
|
114 |
-
def
|
115 |
try:
|
116 |
-
splits =
|
117 |
-
db =
|
118 |
-
return {
|
|
|
|
|
|
|
|
|
|
|
119 |
except Exception as e:
|
120 |
-
return state.value, f"☠️ FALHA
|
121 |
|
122 |
-
@deploy_btn.click(inputs=[model_selector, temp_slider, state], outputs=[state,
|
123 |
-
def
|
124 |
try:
|
125 |
-
|
|
|
|
|
|
|
126 |
current_state["llm"] = ConversationalRetrievalChain.from_llm(
|
127 |
llm=llm,
|
128 |
-
retriever=current_state["db"].as_retriever(),
|
129 |
memory=ConversationBufferMemory(memory_key="chat_history", return_messages=True),
|
130 |
return_source_documents=True
|
131 |
)
|
132 |
-
|
|
|
133 |
except Exception as e:
|
134 |
-
return current_state, f"💥 FALHA NO
|
135 |
|
136 |
@msg_input.submit(inputs=[msg_input, chatbot, state], outputs=[msg_input, chatbot])
|
137 |
-
def
|
138 |
-
if not state["
|
139 |
-
return command, history + [(command, "⚠️
|
140 |
|
141 |
try:
|
142 |
result = state["llm"]({"question": command, "chat_history": history})
|
143 |
-
|
144 |
-
|
145 |
-
f"Página {doc.metadata['page']+1}: {doc.page_content[:75]}..."
|
146 |
for doc in result["source_documents"][:3]
|
147 |
)
|
148 |
-
return "", history + [
|
|
|
|
|
149 |
except Exception as e:
|
150 |
-
return command, history + [(command, f"☢️ FALHA
|
|
|
|
|
|
|
|
|
151 |
|
152 |
-
return
|
153 |
|
154 |
# ===================================================================
|
155 |
# INICIALIZAÇÃO DO SISTEMA
|
156 |
# ===================================================================
|
157 |
if __name__ == "__main__":
|
158 |
-
|
159 |
-
|
160 |
server_name="0.0.0.0",
|
161 |
server_port=7860,
|
162 |
-
|
163 |
-
|
164 |
-
show_error=True
|
165 |
)
|
|
|
13 |
from transformers import AutoTokenizer, pipeline
|
14 |
|
15 |
# ===================================================================
|
16 |
+
# CONFIGURAÇÃO DE COMBATE
|
17 |
# ===================================================================
|
18 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
19 |
TORCH_DTYPE = torch.bfloat16 if DEVICE == "cuda" else torch.float32
|
|
|
|
|
|
|
|
|
|
|
20 |
LLM_MODELS = {
|
21 |
"TinyLlama-1.1B-Chat": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
22 |
"Phi-2": "microsoft/phi-2",
|
|
|
25 |
}
|
26 |
|
27 |
# ===================================================================
|
28 |
+
# NÚCLEO DE OPERAÇÕES ESPECIAIS
|
29 |
# ===================================================================
|
30 |
+
class TacticalDocumentProcessor:
|
31 |
@staticmethod
|
32 |
+
def neutralize_documents(files, chunk_size=512, chunk_overlap=64):
|
33 |
+
"""Operação de desmantelamento de documentos hostis"""
|
34 |
+
if not files:
|
35 |
+
raise ValueError("ALVO NÃO IDENTIFICADO")
|
36 |
+
|
37 |
try:
|
38 |
loaders = [PyPDFLoader(file.name) for file in files]
|
39 |
+
splitter = RecursiveCharacterTextSplitter(
|
40 |
+
chunk_size=chunk_size,
|
41 |
+
chunk_overlap=chunk_overlap,
|
42 |
+
separators=["\n\n", "\n", "\. ", " ", ""]
|
43 |
+
)
|
44 |
+
return [page for loader in loaders for page in loader.load_and_split(splitter)]
|
|
|
45 |
except Exception as e:
|
46 |
+
raise RuntimeError(f"FALHA NA OPERAÇÃO: {str(e)}")
|
47 |
|
48 |
+
class VectorStrikeSystem:
|
49 |
@staticmethod
|
50 |
+
def deploy_vector_db(splits):
|
51 |
+
"""Implante imediato de sistema de vetorização"""
|
52 |
+
if not splits:
|
53 |
+
raise ValueError("NENHUMA INTELECÇÃO DISPONÍVEL")
|
54 |
+
|
55 |
return Chroma.from_documents(
|
56 |
documents=splits,
|
57 |
embedding=HuggingFaceEmbeddings(),
|
58 |
+
persist_directory="./combat_db"
|
59 |
)
|
60 |
|
61 |
+
class LLMWeaponsSystem:
|
62 |
@staticmethod
|
63 |
+
def activate_weapon(model_name, temp=0.7, max_tokens=512):
|
64 |
+
"""Ativação de armamento cognitivo"""
|
65 |
try:
|
66 |
tokenizer = AutoTokenizer.from_pretrained(LLM_MODELS[model_name])
|
67 |
|
|
|
80 |
|
81 |
return HuggingFacePipeline(pipeline=pipe)
|
82 |
except KeyError:
|
83 |
+
raise ValueError("ARMA NÃO CATALOGADA")
|
84 |
except Exception as e:
|
85 |
+
raise RuntimeError(f"FALHA NO SISTEMA DE ARMAMENTO: {str(e)}")
|
86 |
|
87 |
# ===================================================================
|
88 |
# INTERFACE DE COMBATE
|
89 |
# ===================================================================
|
90 |
+
def deploy_combat_interface():
|
91 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="🔥 WARBOT v2.0") as interface:
|
92 |
+
state = gr.State({
|
93 |
+
"db": None,
|
94 |
+
"llm": None,
|
95 |
+
"doc_status": False,
|
96 |
+
"model_status": False
|
97 |
+
})
|
98 |
|
99 |
+
# Zona de Controle Tático
|
100 |
with gr.Row(variant="panel"):
|
101 |
+
with gr.Column(scale=1):
|
102 |
+
file_upload = gr.Files(label="CARREGAMENTO DE ALVOS", file_types=[".pdf"])
|
103 |
+
process_btn = gr.Button("INICIAR PROCESSAMENTO", variant="stop")
|
104 |
+
process_log = gr.Textbox(label="RELATÓRIO DE PROCESSAMENTO", interactive=False)
|
105 |
+
|
106 |
+
with gr.Column(scale=1):
|
107 |
+
model_selector = gr.Dropdown(list(LLM_MODELS.keys()), label="SELECIONE O ARMAMENTO", value="TinyLlama-1.1B-Chat")
|
108 |
+
temp_slider = gr.Slider(0, 1, 0.7, label="NÍVEL DE AGRESSIVIDADE")
|
109 |
+
deploy_btn = gr.Button("ATIVAR ARMAMENTO", variant="primary")
|
110 |
+
deploy_log = gr.Textbox(label="STATUS DO ARMAMENTO", interactive=False)
|
|
|
|
|
111 |
|
112 |
+
# Campo de Batalha Principal
|
113 |
+
chatbot = gr.Chatbot(height=650, label="ZONA DE ENGENHARIA COGNITIVA")
|
114 |
+
msg_input = gr.Textbox(label="INSIRA COMANDO DE ATAQUE", placeholder="Aguardando ordens...")
|
115 |
+
clear_btn = gr.Button("LIMPAR CAMPO DE BATALHA")
|
116 |
|
117 |
+
# ===== OPERAÇÕES TÁTICAS =====
|
118 |
+
@process_btn.click(inputs=[file_upload], outputs=[state, process_log])
|
119 |
+
def execute_processing(files):
|
120 |
try:
|
121 |
+
splits = TacticalDocumentProcessor.neutralize_documents(files)
|
122 |
+
db = VectorStrikeSystem.deploy_vector_db(splits)
|
123 |
+
return {
|
124 |
+
"db": db,
|
125 |
+
"llm": None,
|
126 |
+
"doc_status": True,
|
127 |
+
"model_status": False
|
128 |
+
}, "✅ ALVOS PROCESSADOS COM SUCESSO"
|
129 |
except Exception as e:
|
130 |
+
return state.value, f"☠️ FALHA CRÍTICA: {str(e)}"
|
131 |
|
132 |
+
@deploy_btn.click(inputs=[model_selector, temp_slider, state], outputs=[state, deploy_log])
|
133 |
+
def deploy_weapon(model, temp, current_state):
|
134 |
try:
|
135 |
+
if not current_state["doc_status"]:
|
136 |
+
raise RuntimeError("ALVOS NÃO PROCESSADOS! EXECUTE A FASE 1")
|
137 |
+
|
138 |
+
llm = LLMWeaponsSystem.activate_weapon(model, temp)
|
139 |
current_state["llm"] = ConversationalRetrievalChain.from_llm(
|
140 |
llm=llm,
|
141 |
+
retriever=current_state["db"].as_retriever(search_kwargs={"k": 3}),
|
142 |
memory=ConversationBufferMemory(memory_key="chat_history", return_messages=True),
|
143 |
return_source_documents=True
|
144 |
)
|
145 |
+
current_state["model_status"] = True
|
146 |
+
return current_state, f"🚀 {model} PRONTO PARA ENGAGEMENT"
|
147 |
except Exception as e:
|
148 |
+
return current_state, f"💥 FALHA NO ARMAMENTO: {str(e)}"
|
149 |
|
150 |
@msg_input.submit(inputs=[msg_input, chatbot, state], outputs=[msg_input, chatbot])
|
151 |
+
def execute_engagement(command, history, state):
|
152 |
+
if not state["model_status"]:
|
153 |
+
return command, history + [(command, "⚠️ ARMAMENTO NÃO ATIVADO")]
|
154 |
|
155 |
try:
|
156 |
result = state["llm"]({"question": command, "chat_history": history})
|
157 |
+
intel_report = "\n".join(
|
158 |
+
f"🔍 Pg {doc.metadata['page']+1}: {doc.page_content[:100]}..."
|
|
|
159 |
for doc in result["source_documents"][:3]
|
160 |
)
|
161 |
+
return "", history + [
|
162 |
+
(command, f"🎯 RESPOSTA:\n{result['answer']}\n\n📡 INTELIGÊNCIA:\n{intel_report}")
|
163 |
+
]
|
164 |
except Exception as e:
|
165 |
+
return command, history + [(command, f"☢️ FALHA OPERACIONAL: {str(e)}")]
|
166 |
+
|
167 |
+
@clear_btn.click(inputs=[], outputs=[chatbot])
|
168 |
+
def clear_battlefield():
|
169 |
+
return []
|
170 |
|
171 |
+
return interface
|
172 |
|
173 |
# ===================================================================
|
174 |
# INICIALIZAÇÃO DO SISTEMA
|
175 |
# ===================================================================
|
176 |
if __name__ == "__main__":
|
177 |
+
combat_system = deploy_combat_interface()
|
178 |
+
combat_system.launch(
|
179 |
server_name="0.0.0.0",
|
180 |
server_port=7860,
|
181 |
+
auth=("commander", "tactical123"),
|
182 |
+
share=True
|
|
|
183 |
)
|