Takashi Itoh commited on
Commit
df7ef80
1 Parent(s): 5c275ae
Files changed (1) hide show
  1. app.py +2 -17
app.py CHANGED
@@ -43,14 +43,6 @@ def smiles_to_image(smiles):
43
  return None
44
 
45
 
46
- # Function to get canonical SMILES
47
- def get_canonical_smiles(smiles):
48
- mol = Chem.MolFromSmiles(smiles)
49
- if mol:
50
- return Chem.MolToSmiles(mol, canonical=True)
51
- return None
52
-
53
-
54
  # Dictionary for SMILES strings and corresponding images (you can replace with your actual image paths)
55
  smiles_image_mapping = {
56
  "Mol 1": {"smiles": "C=C(C)CC(=O)NC[C@H](CO)NC(=O)C=Cc1ccc(C)c(Cl)c1", "image": "img/img1.png"},
@@ -72,9 +64,6 @@ fusion_available = ["Concat"]
72
 
73
 
74
  # Function to handle evaluation and logging
75
- def save_rep(models, dataset, task_type, eval_output):
76
- return
77
-
78
  def evaluate_and_log(models, dataset, task_type, eval_output, state):
79
  task_dic = {'Classification': 'CLS', 'Regression': 'RGR'}
80
  result = f"{eval_output}"
@@ -133,16 +122,12 @@ def generate(latent_vector, mask):
133
  decoder_output = gen_model.generate(encoder_outputs=encoder_outputs, attention_mask=mask,
134
  max_new_tokens=64, do_sample=True, top_k=5, top_p=0.95, num_return_sequences=1)
135
  selfies = gen_tokenizer.batch_decode(decoder_output, skip_special_tokens=True)
136
- outs = []
137
- for i in selfies:
138
- outs.append(sf.decoder(re.sub(r'\]\s*(.*?)\s*\[', r']\1[', i)))
139
- return outs
140
 
141
 
142
  def perturb_latent(latent_vecs, noise_scale=0.5):
143
- modified_vec = torch.tensor(np.random.uniform(0, 1, latent_vecs.shape) * noise_scale,
144
  dtype=torch.float32) + latent_vecs
145
- return modified_vec
146
 
147
 
148
  def encode(selfies):
 
43
  return None
44
 
45
 
 
 
 
 
 
 
 
 
46
  # Dictionary for SMILES strings and corresponding images (you can replace with your actual image paths)
47
  smiles_image_mapping = {
48
  "Mol 1": {"smiles": "C=C(C)CC(=O)NC[C@H](CO)NC(=O)C=Cc1ccc(C)c(Cl)c1", "image": "img/img1.png"},
 
64
 
65
 
66
  # Function to handle evaluation and logging
 
 
 
67
  def evaluate_and_log(models, dataset, task_type, eval_output, state):
68
  task_dic = {'Classification': 'CLS', 'Regression': 'RGR'}
69
  result = f"{eval_output}"
 
122
  decoder_output = gen_model.generate(encoder_outputs=encoder_outputs, attention_mask=mask,
123
  max_new_tokens=64, do_sample=True, top_k=5, top_p=0.95, num_return_sequences=1)
124
  selfies = gen_tokenizer.batch_decode(decoder_output, skip_special_tokens=True)
125
+ return [sf.decoder(re.sub(r'\]\s*(.*?)\s*\[', r']\1[', i)) for i in selfies]
 
 
 
126
 
127
 
128
  def perturb_latent(latent_vecs, noise_scale=0.5):
129
+ return torch.tensor(np.random.uniform(0, 1, latent_vecs.shape) * noise_scale,
130
  dtype=torch.float32) + latent_vecs
 
131
 
132
 
133
  def encode(selfies):