acecalisto3 commited on
Commit
2a5ea3c
1 Parent(s): ae4ec08

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -5
app.py CHANGED
@@ -26,8 +26,15 @@ def chat_interface(input_text):
26
  except EnvironmentError as e:
27
  return f'Error loading model: {e}'
28
 
 
 
 
 
 
 
29
  # Generate chatbot response
30
- response = generator(input_text, max_new_tokens=50, num_return_sequences=1, do_sample=True)[0]['generated_text']
 
31
  return response
32
 
33
  # 2. Terminal
@@ -112,7 +119,7 @@ def summarize_text(text):
112
 
113
  # 6. Code Generation
114
  def generate_code(idea):
115
- """Generates code based on a given idea using the bigscience/T0_3B model.
116
 
117
  Args:
118
  idea: The idea for the code to be generated.
@@ -122,9 +129,12 @@ def generate_code(idea):
122
  """
123
 
124
  # Load the code generation model
125
- model_name = 'bigscience/T0_3B' # Choose your model
126
- model = AutoModelForCausalLM.from_pretrained(model_name)
127
- tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
 
128
 
129
  # Generate the code
130
  input_text = f"""
 
26
  except EnvironmentError as e:
27
  return f'Error loading model: {e}'
28
 
29
+ # Truncate input text to avoid exceeding the model's maximum length
30
+ max_input_length = 900
31
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
32
+ if input_ids.shape[1] > max_input_length:
33
+ input_ids = input_ids[:, :max_input_length]
34
+
35
  # Generate chatbot response
36
+ outputs = model.generate(input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True)
37
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
38
  return response
39
 
40
  # 2. Terminal
 
119
 
120
  # 6. Code Generation
121
  def generate_code(idea):
122
+ """Generates code based on a given idea using the EleutherAI/gpt-neo-2.7B model.
123
 
124
  Args:
125
  idea: The idea for the code to be generated.
 
129
  """
130
 
131
  # Load the code generation model
132
+ model_name = 'EleutherAI/gpt-neo-2.7B'
133
+ try:
134
+ model = AutoModelForCausalLM.from_pretrained(model_name)
135
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
136
+ except EnvironmentError as e:
137
+ return f'Error loading model: {e}'
138
 
139
  # Generate the code
140
  input_text = f"""