Spaces:
Running
Running
acecalisto3
commited on
Commit
•
d2213e9
1
Parent(s):
83d7dd2
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,43 @@
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
import streamlit as st
|
4 |
-
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, AutoModel
|
5 |
import black
|
6 |
from pylint import lint
|
7 |
from io import StringIO
|
8 |
import openai
|
9 |
import sys
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
# Set your OpenAI API key here
|
12 |
openai.api_key = "YOUR_OPENAI_API_KEY"
|
@@ -355,6 +386,7 @@ elif app_mode == "Workspace Chat App":
|
|
355 |
file_name = st.text_input("Enter file name (e.g., 'app.py'):")
|
356 |
if st.button("Add Code"):
|
357 |
add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
|
|
|
358 |
st.success(add_code_status)
|
359 |
|
360 |
# Terminal Interface with Project Context
|
@@ -362,6 +394,7 @@ elif app_mode == "Workspace Chat App":
|
|
362 |
terminal_input = st.text_input("Enter a command within the workspace:")
|
363 |
if st.button("Run Command"):
|
364 |
terminal_output = terminal_interface(terminal_input, project_name)
|
|
|
365 |
st.code(terminal_output, language="bash")
|
366 |
|
367 |
# Chat Interface for Guidance
|
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
import streamlit as st
|
4 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, AutoModel, RagRetriever, AutoModelForSeq2SeqLM
|
5 |
import black
|
6 |
from pylint import lint
|
7 |
from io import StringIO
|
8 |
import openai
|
9 |
import sys
|
10 |
+
import torch
|
11 |
+
|
12 |
+
# Load pre-trained RAG retriever
|
13 |
+
rag_retriever = RagRetriever.from_pretrained("facebook/rag-base")
|
14 |
+
|
15 |
+
# Load pre-trained chat model
|
16 |
+
chat_model = AutoModelForSeq2SeqLM.from_pretrained("google/chat-model-base")
|
17 |
+
|
18 |
+
# Load tokenizer
|
19 |
+
tokenizer = AutoTokenizer.from_pretrained("google/chat-model-base")
|
20 |
+
|
21 |
+
def process_input(user_input):
|
22 |
+
# Input pipeline: Tokenize and preprocess user input
|
23 |
+
input_ids = tokenizer(user_input, return_tensors="pt").input_ids
|
24 |
+
attention_mask = tokenizer(user_input, return_tensors="pt").attention_mask
|
25 |
+
|
26 |
+
# RAG model: Generate response
|
27 |
+
with torch.no_grad():
|
28 |
+
output = rag_retriever(input_ids, attention_mask=attention_mask)
|
29 |
+
response = output.generator_outputs[0].sequences[0]
|
30 |
+
|
31 |
+
# Chat model: Refine response
|
32 |
+
chat_input = tokenizer(response, return_tensors="pt")
|
33 |
+
chat_input["input_ids"] = chat_input["input_ids"].unsqueeze(0)
|
34 |
+
chat_input["attention_mask"] = chat_input["attention_mask"].unsqueeze(0)
|
35 |
+
with torch.no_grad():
|
36 |
+
chat_output = chat_model(**chat_input)
|
37 |
+
refined_response = chat_output.sequences[0]
|
38 |
+
|
39 |
+
# Output pipeline: Return final response
|
40 |
+
return refined_response
|
41 |
|
42 |
# Set your OpenAI API key here
|
43 |
openai.api_key = "YOUR_OPENAI_API_KEY"
|
|
|
386 |
file_name = st.text_input("Enter file name (e.g., 'app.py'):")
|
387 |
if st.button("Add Code"):
|
388 |
add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
|
389 |
+
st.session_state.terminal_history.append((f"Add Code: {code_to_add}", add_code_status))
|
390 |
st.success(add_code_status)
|
391 |
|
392 |
# Terminal Interface with Project Context
|
|
|
394 |
terminal_input = st.text_input("Enter a command within the workspace:")
|
395 |
if st.button("Run Command"):
|
396 |
terminal_output = terminal_interface(terminal_input, project_name)
|
397 |
+
st.session_state.terminal_history.append((terminal_input, terminal_output))
|
398 |
st.code(terminal_output, language="bash")
|
399 |
|
400 |
# Chat Interface for Guidance
|