frances-dean commited on
Commit
d2993f4
·
verified ·
1 Parent(s): 7173196

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -15
app.py CHANGED
@@ -1,12 +1,11 @@
1
  import gradio as gr
2
- import os
3
  import matplotlib.pyplot as plt
4
  from scipy.integrate import odeint
5
  import torch
6
  from torch.utils import data
7
  from torch.utils.data import DataLoader, Dataset
8
  from torch import nn, optim
9
- import os
10
  from skimage.transform import rescale, resize
11
  from torch import nn, optim
12
  import torch.nn.functional as F
@@ -36,8 +35,8 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
36
 
37
  sequences_all = []
38
  info_data_all = []
39
- path = '/Users/FDean/Desktop/Physics_Informed_Transfer_Learning/EchoNet-Dynamic'
40
- output_path = '/Users/FDean/Desktop/Physics_Informed_Transfer_Learning'
41
 
42
  class Echo(torchvision.datasets.VisionDataset):
43
  """EchoNet-Dynamic Dataset.
@@ -120,10 +119,10 @@ class Echo(torchvision.datasets.VisionDataset):
120
  self.fnames, self.outcome = [], []
121
 
122
  if self.split == "EXTERNAL_TEST":
123
- self.fnames = sorted(os.listdir(self.external_test_location))
124
  else:
125
  # Load video-level labels
126
- with open(os.path.join(self.root, "FileList.csv")) as f:
127
  data = pandas.read_csv(f)
128
  data["Split"].map(lambda x: x.upper())
129
 
@@ -132,7 +131,7 @@ class Echo(torchvision.datasets.VisionDataset):
132
 
133
  self.header = data.columns.tolist()
134
  self.fnames = data["FileName"].tolist()
135
- self.fnames = [fn + ".avi" for fn in self.fnames if os.path.splitext(fn)[1] == ""] # Assume avi if no suffix
136
  self.outcome = data.values.tolist()
137
 
138
  # Check that files are present
@@ -149,7 +148,7 @@ class Echo(torchvision.datasets.VisionDataset):
149
  self.frames = collections.defaultdict(list)
150
  self.trace = collections.defaultdict(_defaultdict_of_lists)
151
 
152
- with open(os.path.join(self.root, "VolumeTracings.csv")) as f:
153
  header = f.readline().strip().split(",")
154
  assert header == ["FileName", "X1", "Y1", "X2", "Y2", "Frame"]
155
 
@@ -175,11 +174,11 @@ class Echo(torchvision.datasets.VisionDataset):
175
  def __getitem__(self, index):
176
  # Find filename of video
177
  if self.split == "EXTERNAL_TEST":
178
- video = os.path.join(self.external_test_location, self.fnames[index])
179
  elif self.split == "CLINICAL_TEST":
180
- video = os.path.join(self.root, "ProcessedStrainStudyA4c", self.fnames[index])
181
  else:
182
- video = os.path.join(self.root, "Videos", self.fnames[index])
183
 
184
  # Load video into np.array
185
  video = echonet.utils.loadvideo(video).astype(np.float32)
@@ -379,10 +378,10 @@ class Interpolator(nn.Module):
379
 
380
  # Initialize the neural network
381
  net = Interpolator()
382
- net.load_state_dict(torch.load('/Users/FDean/Desktop/Physics_Informed_Transfer_Learning/final_model_weights/interp6_7param_weight.pt'))
383
  print("Done loading interpolator!")
384
 
385
- weights_path = '/Users/FDean/Desktop/Physics_Informed_Transfer_Learning/final_model_weights/202_full_echonet_7param_Vloss_epoch_200_lr_0.001_weight_best_model.pt'
386
  model = NEW3DCNN(num_parameters = 7)
387
  model.load_state_dict(torch.load(weights_path))
388
  model.to(device)
@@ -492,14 +491,14 @@ def pvloop_simulator_plot_only(Rm, Ra, Emax, Emin, Vd, Tc, start_v):
492
 
493
  def generate_example():
494
  # get random input
495
- data_path = '/Users/FDean/Desktop/Physics_Informed_Transfer_Learning/EchoNet-Dynamic'
496
  image_data = Echo(root = data_path, split = 'all', target_type=['Filename','LargeIndex','SmallIndex'])
497
  image_loaded_data = DataLoader(image_data, batch_size=1, shuffle=True)
498
  val_data = next(iter(image_loaded_data))
499
  #create_echo_clip(val_data,'test')
500
  val_seq = val_data[0]
501
  filename = val_data[1][0][0]
502
- video = os.path.join(os.getcwd(), f"EchoNet-Dynamic/Videos/{filename}")
503
  val_tensor = torch.tensor(val_seq, dtype=torch.float32)
504
  results = model(val_tensor)
505
 
 
1
  import gradio as gr
2
+ #import os
3
  import matplotlib.pyplot as plt
4
  from scipy.integrate import odeint
5
  import torch
6
  from torch.utils import data
7
  from torch.utils.data import DataLoader, Dataset
8
  from torch import nn, optim
 
9
  from skimage.transform import rescale, resize
10
  from torch import nn, optim
11
  import torch.nn.functional as F
 
35
 
36
  sequences_all = []
37
  info_data_all = []
38
+ path = 'EchoNet-Dynamic'
39
+ output_path = ''
40
 
41
  class Echo(torchvision.datasets.VisionDataset):
42
  """EchoNet-Dynamic Dataset.
 
119
  self.fnames, self.outcome = [], []
120
 
121
  if self.split == "EXTERNAL_TEST":
122
+ # self.fnames = sorted(os.listdir(self.external_test_location))
123
  else:
124
  # Load video-level labels
125
+ with open(f"{self.root}FileList.csv")) as f:
126
  data = pandas.read_csv(f)
127
  data["Split"].map(lambda x: x.upper())
128
 
 
131
 
132
  self.header = data.columns.tolist()
133
  self.fnames = data["FileName"].tolist()
134
+ # self.fnames = [fn + ".avi" for fn in self.fnames if os.path.splitext(fn)[1] == ""] # Assume avi if no suffix
135
  self.outcome = data.values.tolist()
136
 
137
  # Check that files are present
 
148
  self.frames = collections.defaultdict(list)
149
  self.trace = collections.defaultdict(_defaultdict_of_lists)
150
 
151
+ with open(f"{self.root}VolumeTracings.csv")) as f:
152
  header = f.readline().strip().split(",")
153
  assert header == ["FileName", "X1", "Y1", "X2", "Y2", "Frame"]
154
 
 
174
  def __getitem__(self, index):
175
  # Find filename of video
176
  if self.split == "EXTERNAL_TEST":
177
+ # video = os.path.join(self.external_test_location, self.fnames[index])
178
  elif self.split == "CLINICAL_TEST":
179
+ # video = os.path.join(self.root, "ProcessedStrainStudyA4c", self.fnames[index])
180
  else:
181
+ # video = os.path.join(self.root, "Videos", self.fnames[index])
182
 
183
  # Load video into np.array
184
  video = echonet.utils.loadvideo(video).astype(np.float32)
 
378
 
379
  # Initialize the neural network
380
  net = Interpolator()
381
+ net.load_state_dict(torch.load('final_model_weights/interp6_7param_weight.pt'))
382
  print("Done loading interpolator!")
383
 
384
+ weights_path = 'final_model_weights/202_full_echonet_7param_Vloss_epoch_200_lr_0.001_weight_best_model.pt'
385
  model = NEW3DCNN(num_parameters = 7)
386
  model.load_state_dict(torch.load(weights_path))
387
  model.to(device)
 
491
 
492
  def generate_example():
493
  # get random input
494
+ data_path = 'EchoNet-Dynamic'
495
  image_data = Echo(root = data_path, split = 'all', target_type=['Filename','LargeIndex','SmallIndex'])
496
  image_loaded_data = DataLoader(image_data, batch_size=1, shuffle=True)
497
  val_data = next(iter(image_loaded_data))
498
  #create_echo_clip(val_data,'test')
499
  val_seq = val_data[0]
500
  filename = val_data[1][0][0]
501
+ video = f"EchoNet-Dynamic/Videos/{filename}"
502
  val_tensor = torch.tensor(val_seq, dtype=torch.float32)
503
  results = model(val_tensor)
504