Spaces:
Runtime error
Runtime error
Update assessment3_maria_maraki.py
Browse files- assessment3_maria_maraki.py +6 -11
assessment3_maria_maraki.py
CHANGED
@@ -7,7 +7,9 @@ Original file is located at
|
|
7 |
https://colab.research.google.com/drive/1jm_hI8O4Y0HgNNdWLnkLBIjlzSaGwwBS
|
8 |
"""
|
9 |
|
10 |
-
|
|
|
|
|
11 |
|
12 |
"""Since the dataset **emails.csv** in the [Enron Email Dataset](https://www.kaggle.com/datasets/wcukierski/enron-email-dataset/code) was too big, I split the original dataset into smaller .csv files and then chose one of the split files: ***emails_subset.csv***
|
13 |
|
@@ -51,10 +53,7 @@ pd.set_option('display.max_columns',None,
|
|
51 |
'display.max_colwidth',None
|
52 |
)
|
53 |
|
54 |
-
|
55 |
-
drive.mount('/content/drive')
|
56 |
-
|
57 |
-
email_data = pd.read_csv('/content/drive/MyDrive/data/emails_subset.csv')
|
58 |
email_data.head()
|
59 |
|
60 |
|
@@ -83,7 +82,7 @@ from langchain.document_loaders import DirectoryLoader
|
|
83 |
import warnings
|
84 |
warnings.filterwarnings('ignore')
|
85 |
|
86 |
-
openAI_embeddings = OpenAIEmbeddings(openai_api_key=
|
87 |
|
88 |
content = []
|
89 |
for item in email_data.message:
|
@@ -116,7 +115,7 @@ The fine-tuning task kept crushing my notebook and I had to restart so I stored
|
|
116 |
|
117 |
"""# Gradio Interface that answers questions related to the case"""
|
118 |
|
119 |
-
email_data_retrieval = RetrievalQA.from_chain_type(llm=OpenAI(openai_api_key=
|
120 |
temperature=0.6,
|
121 |
top_p=0.5,
|
122 |
max_tokens=500),
|
@@ -135,7 +134,3 @@ iface = gradio.Interface(
|
|
135 |
|
136 |
iface.launch()
|
137 |
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
|
|
7 |
https://colab.research.google.com/drive/1jm_hI8O4Y0HgNNdWLnkLBIjlzSaGwwBS
|
8 |
"""
|
9 |
|
10 |
+
###########################################################################################################################################################
|
11 |
+
#The provided code has undergone minor adjustments from its original source (colab enviroment) to ensure its compatibility with the Hugging Face ecosystem.
|
12 |
+
###########################################################################################################################################################
|
13 |
|
14 |
"""Since the dataset **emails.csv** in the [Enron Email Dataset](https://www.kaggle.com/datasets/wcukierski/enron-email-dataset/code) was too big, I split the original dataset into smaller .csv files and then chose one of the split files: ***emails_subset.csv***
|
15 |
|
|
|
53 |
'display.max_colwidth',None
|
54 |
)
|
55 |
|
56 |
+
email_data = pd.read_csv('emails_subset.csv')
|
|
|
|
|
|
|
57 |
email_data.head()
|
58 |
|
59 |
|
|
|
82 |
import warnings
|
83 |
warnings.filterwarnings('ignore')
|
84 |
|
85 |
+
openAI_embeddings = OpenAIEmbeddings(openai_api_key=os.environ.get('OPENAI_API_KEY'))
|
86 |
|
87 |
content = []
|
88 |
for item in email_data.message:
|
|
|
115 |
|
116 |
"""# Gradio Interface that answers questions related to the case"""
|
117 |
|
118 |
+
email_data_retrieval = RetrievalQA.from_chain_type(llm=OpenAI(openai_api_key=os.environ.get('OPENAI_API_KEY'),
|
119 |
temperature=0.6,
|
120 |
top_p=0.5,
|
121 |
max_tokens=500),
|
|
|
134 |
|
135 |
iface.launch()
|
136 |
|
|
|
|
|
|
|
|