rynmurdock commited on
Commit
e060f2d
1 Parent(s): f824106

svc instead of linearsvc -- may be much better

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -2,7 +2,7 @@ DEVICE = 'cuda'
2
 
3
  import gradio as gr
4
  import numpy as np
5
- from sklearn.svm import LinearSVC
6
  from sklearn import preprocessing
7
  import pandas as pd
8
 
@@ -55,6 +55,7 @@ pipe.to(device=DEVICE)
55
 
56
  @spaces.GPU
57
  def compile_em():
 
58
  pipe.unet = torch.compile(pipe.unet, mode='reduce-overhead')
59
  pipe.vae = torch.compile(pipe.vae, mode='reduce-overhead')
60
  autoencoder.model.forward = torch.compile(autoencoder.model.forward, backend='inductor', dynamic=True)
@@ -195,11 +196,11 @@ def get_coeff(embs_local, ys):
195
  feature_embs = scaler.transform(feature_embs)
196
  print(len(feature_embs), len(ys))
197
 
198
- lin_class = LinearSVC(max_iter=50000, dual='auto', class_weight='balanced').fit(feature_embs, np.array([ys[i] for i in indices]))
199
- lin_class.coef_ = torch.tensor(lin_class.coef_, dtype=torch.double)
200
- lin_class.coef_ = (lin_class.coef_.flatten() / (lin_class.coef_.flatten().norm())).unsqueeze(0)
201
 
202
- return lin_class.coef_
203
 
204
  # TODO add to state instead of shared across all
205
  glob_idx = 0
 
2
 
3
  import gradio as gr
4
  import numpy as np
5
+ from sklearn.svm import SVC
6
  from sklearn import preprocessing
7
  import pandas as pd
8
 
 
55
 
56
  @spaces.GPU
57
  def compile_em():
58
+ return None# TODO add back
59
  pipe.unet = torch.compile(pipe.unet, mode='reduce-overhead')
60
  pipe.vae = torch.compile(pipe.vae, mode='reduce-overhead')
61
  autoencoder.model.forward = torch.compile(autoencoder.model.forward, backend='inductor', dynamic=True)
 
196
  feature_embs = scaler.transform(feature_embs)
197
  print(len(feature_embs), len(ys))
198
 
199
+ lin_class = SVC(max_iter=50000, kernel='linear', class_weight='balanced', C=.1).fit(feature_embs, np.array([ys[i] for i in indices]))
200
+ coef_ = torch.tensor(lin_class.coef_, dtype=torch.double)
201
+ coef_ = (coef_.flatten() / (coef_.flatten().norm())).unsqueeze(0)
202
 
203
+ return coef_
204
 
205
  # TODO add to state instead of shared across all
206
  glob_idx = 0