Spaces:
Running
Running
Carlos Salgado
commited on
Commit
·
561ac7c
1
Parent(s):
80d18e0
call username variable in workflow, display model name
Browse files- .github/workflows/hugging_face.yml +3 -4
- app.py +4 -3
- scripts.py +2 -1
.github/workflows/hugging_face.yml
CHANGED
@@ -12,11 +12,10 @@ jobs:
|
|
12 |
- uses: actions/checkout@v3
|
13 |
with:
|
14 |
fetch-depth: 0
|
15 |
-
lfs: true
|
16 |
-
- name: Navigate to frontend directory
|
17 |
-
run: cd ./frontend
|
18 |
- name: Push to hub
|
19 |
env:
|
20 |
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
21 |
-
|
|
|
22 |
|
|
|
12 |
- uses: actions/checkout@v3
|
13 |
with:
|
14 |
fetch-depth: 0
|
15 |
+
lfs: true
|
|
|
|
|
16 |
- name: Push to hub
|
17 |
env:
|
18 |
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
19 |
+
HF_USER: ${{ variables.HF_USER }}
|
20 |
+
run: git push https://$HF_USER:$HF_TOKEN@huggingface.co/spaces/$HF_USER/docverifyrag main
|
21 |
|
app.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
-
import streamlit as st
|
2 |
import io
|
3 |
import os
|
|
|
4 |
import tempfile
|
5 |
|
6 |
-
from scripts import generate_metadata, ingest
|
7 |
|
8 |
|
9 |
st.title('DocVerifyRAG')
|
@@ -19,8 +19,9 @@ if uploaded_file is not None:
|
|
19 |
st.write(f'Created temporary file {file_path}')
|
20 |
|
21 |
docs = ingest(file_path)
|
|
|
22 |
metadata = generate_metadata(docs)
|
23 |
-
st.write('##
|
24 |
st.write(metadata)
|
25 |
|
26 |
# Clean up the temporary file
|
|
|
|
|
1 |
import io
|
2 |
import os
|
3 |
+
import streamlit as st
|
4 |
import tempfile
|
5 |
|
6 |
+
from scripts import generate_metadata, ingest, model_name
|
7 |
|
8 |
|
9 |
st.title('DocVerifyRAG')
|
|
|
19 |
st.write(f'Created temporary file {file_path}')
|
20 |
|
21 |
docs = ingest(file_path)
|
22 |
+
st.write('## Querying Together.ai API')
|
23 |
metadata = generate_metadata(docs)
|
24 |
+
st.write(f'## Metadata Generated by {model_name}')
|
25 |
st.write(metadata)
|
26 |
|
27 |
# Clean up the temporary file
|
scripts.py
CHANGED
@@ -72,8 +72,9 @@ def generate_metadata(docs):
|
|
72 |
)
|
73 |
|
74 |
# Call the LLM with the JSON schema
|
|
|
75 |
chat_completion = client.chat.completions.create(
|
76 |
-
model=
|
77 |
messages=[
|
78 |
{
|
79 |
"role": "system",
|
|
|
72 |
)
|
73 |
|
74 |
# Call the LLM with the JSON schema
|
75 |
+
model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
76 |
chat_completion = client.chat.completions.create(
|
77 |
+
model=model_name,
|
78 |
messages=[
|
79 |
{
|
80 |
"role": "system",
|