Joekd608 commited on
Commit
492375f
1 Parent(s): 7e6f807
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -10,17 +10,17 @@ st.title("Text GenAI Model")
10
  st.subheader("Answer Random Questions Using Hugging Face Models")
11
 
12
  # Fetch Hugging Face token from Streamlit Secrets
13
- HF_TOKEN = secret.HF_TOKEN
14
- access_token_read = st.secrets[HF_TOKEN] # Ensure this is set in your Streamlit Cloud Secrets
15
 
16
- # Free up GPU memory (if using GPU)
17
- torch.cuda.empty_cache()
18
 
19
- # Set environment variable to avoid fragmentation
20
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
21
 
22
- # Login to Hugging Face Hub using the access token
23
- login(token=access_token_read)
24
 
25
  # Initialize the text generation pipeline with GPT-2 model
26
  pipe = pipeline("text-generation", model="distilbert/distilgpt2") # Using CPU
 
10
  st.subheader("Answer Random Questions Using Hugging Face Models")
11
 
12
  # Fetch Hugging Face token from Streamlit Secrets
13
+ # HF_TOKEN = secret.HF_TOKEN
14
+ # access_token_read = st.secrets[HF_TOKEN] # Ensure this is set in your Streamlit Cloud Secrets
15
 
16
+ # # Free up GPU memory (if using GPU)
17
+ # torch.cuda.empty_cache()
18
 
19
+ # # Set environment variable to avoid fragmentation
20
+ # os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
21
 
22
+ # # Login to Hugging Face Hub using the access token
23
+ # login(token=access_token_read)
24
 
25
  # Initialize the text generation pipeline with GPT-2 model
26
  pipe = pipeline("text-generation", model="distilbert/distilgpt2") # Using CPU