Spaces:
Running
Running
try transitioning to HF Docker space!
Browse files- Dockerfile +17 -0
- README.md +2 -3
- app_utils/config.py +3 -9
- requirements.txt +4 -4
Dockerfile
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM deepset/haystack:base-cpu-v1.23.0
|
2 |
+
|
3 |
+
COPY requirements.txt .
|
4 |
+
RUN pip install -r requirements.txt
|
5 |
+
|
6 |
+
# copy only the application files in /app
|
7 |
+
# Streamlit does not allow running an app from the root directory
|
8 |
+
COPY Rock_fact_checker.py app/
|
9 |
+
COPY pages app/pages
|
10 |
+
COPY app_utils app/app_utils
|
11 |
+
COPY data app/data
|
12 |
+
|
13 |
+
WORKDIR app
|
14 |
+
|
15 |
+
EXPOSE 8501
|
16 |
+
|
17 |
+
ENTRYPOINT ["streamlit", "run", "Rock_fact_checker.py"]
|
README.md
CHANGED
@@ -3,9 +3,8 @@ title: Fact Checking rocks!
|
|
3 |
emoji: 🎸
|
4 |
colorFrom: purple
|
5 |
colorTo: blue
|
6 |
-
sdk:
|
7 |
-
|
8 |
-
app_file: Rock_fact_checker.py
|
9 |
pinned: true
|
10 |
models: [sentence-transformers/msmarco-distilbert-base-tas-b, microsoft/deberta-v2-xlarge-mnli, google/flan-t5-large]
|
11 |
tags: [fact-checking, rock, natural language inference, dense retrieval, large language models, haystack, neural search]
|
|
|
3 |
emoji: 🎸
|
4 |
colorFrom: purple
|
5 |
colorTo: blue
|
6 |
+
sdk: docker
|
7 |
+
app_port: 8501
|
|
|
8 |
pinned: true
|
9 |
models: [sentence-transformers/msmarco-distilbert-base-tas-b, microsoft/deberta-v2-xlarge-mnli, google/flan-t5-large]
|
10 |
tags: [fact-checking, rock, natural language inference, dense retrieval, large language models, haystack, neural search]
|
app_utils/config.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import
|
2 |
|
3 |
INDEX_DIR = "data/index"
|
4 |
STATEMENTS_PATH = "data/statements.txt"
|
@@ -9,17 +9,11 @@ RETRIEVER_TOP_K = 5
|
|
9 |
|
10 |
# In HF Space, we use microsoft/deberta-v2-xlarge-mnli
|
11 |
# for local testing, a smaller model is better
|
12 |
-
|
13 |
-
NLI_MODEL = st.secrets["NLI_MODEL"]
|
14 |
-
except:
|
15 |
-
NLI_MODEL = "valhalla/distilbart-mnli-12-1"
|
16 |
print(f"Used NLI model: {NLI_MODEL}")
|
17 |
|
18 |
|
19 |
# In HF Space, we use google/flan-t5-large
|
20 |
# for local testing, a smaller model is better
|
21 |
-
|
22 |
-
PROMPT_MODEL = st.secrets["PROMPT_MODEL"]
|
23 |
-
except:
|
24 |
-
PROMPT_MODEL = "google/flan-t5-small"
|
25 |
print(f"Used Prompt model: {PROMPT_MODEL}")
|
|
|
1 |
+
import os
|
2 |
|
3 |
INDEX_DIR = "data/index"
|
4 |
STATEMENTS_PATH = "data/statements.txt"
|
|
|
9 |
|
10 |
# In HF Space, we use microsoft/deberta-v2-xlarge-mnli
|
11 |
# for local testing, a smaller model is better
|
12 |
+
NLI_MODEL = os.environ.get("NLI_MODEL", "valhalla/distilbart-mnli-12-1")
|
|
|
|
|
|
|
13 |
print(f"Used NLI model: {NLI_MODEL}")
|
14 |
|
15 |
|
16 |
# In HF Space, we use google/flan-t5-large
|
17 |
# for local testing, a smaller model is better
|
18 |
+
PROMPT_MODEL = os.environ.get("PROMPT_MODEL", "google/flan-t5-small")
|
|
|
|
|
|
|
19 |
print(f"Used Prompt model: {PROMPT_MODEL}")
|
requirements.txt
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
-
|
|
|
|
|
2 |
haystack-entailment-checker
|
3 |
plotly==5.14.1
|
4 |
pydantic<2
|
5 |
|
6 |
-
|
7 |
-
# uncomment for local installation
|
8 |
-
# streamlit==1.19.0
|
9 |
|
10 |
altair<5
|
|
|
1 |
+
# we now use the Haystack Docker image, so the following line is not needed anymore
|
2 |
+
# farm-haystack[faiss,inference]==1.23.0
|
3 |
+
|
4 |
haystack-entailment-checker
|
5 |
plotly==5.14.1
|
6 |
pydantic<2
|
7 |
|
8 |
+
streamlit==1.19.0
|
|
|
|
|
9 |
|
10 |
altair<5
|