stupidog04 commited on
Commit
854af6d
1 Parent(s): 687d4c6

commit files to HF hub

Browse files
README.md CHANGED
@@ -15,7 +15,7 @@ model-index:
15
  metrics:
16
  - name: Accuracy
17
  type: accuracy
18
- value: 0.8866666555404663
19
  ---
20
 
21
  # krenzcolor_chkpt_classifier
 
15
  metrics:
16
  - name: Accuracy
17
  type: accuracy
18
+ value: 0.9196428656578064
19
  ---
20
 
21
  # krenzcolor_chkpt_classifier
pair_classification_pipeline.py CHANGED
@@ -52,5 +52,5 @@ class PairClassificationPipeline(ImageClassificationPipeline):
52
  def extract_split_feature(self, left_image, right_image):
53
  model_inputs = self.feature_extractor(images=left_image, return_tensors=self.framework)
54
  right_inputs = self.feature_extractor(images=right_image, return_tensors=self.framework)
55
- model_inputs['pixel_values'] = torch.cat([model_inputs['pixel_values'],right_inputs['pixel_values']], dim=1)
56
  return model_inputs
 
52
  def extract_split_feature(self, left_image, right_image):
53
  model_inputs = self.feature_extractor(images=left_image, return_tensors=self.framework)
54
  right_inputs = self.feature_extractor(images=right_image, return_tensors=self.framework)
55
+ model_inputs['pixel_values'] = torch.cat([right_inputs['pixel_values'], model_inputs['pixel_values']], dim=1)
56
  return model_inputs
pipeline.py CHANGED
@@ -52,5 +52,5 @@ class PairClassificationPipeline(ImageClassificationPipeline):
52
  def extract_split_feature(self, left_image, right_image):
53
  model_inputs = self.feature_extractor(images=left_image, return_tensors=self.framework)
54
  right_inputs = self.feature_extractor(images=right_image, return_tensors=self.framework)
55
- model_inputs['pixel_values'] = torch.cat([model_inputs['pixel_values'],right_inputs['pixel_values']], dim=1)
56
  return model_inputs
 
52
  def extract_split_feature(self, left_image, right_image):
53
  model_inputs = self.feature_extractor(images=left_image, return_tensors=self.framework)
54
  right_inputs = self.feature_extractor(images=right_image, return_tensors=self.framework)
55
+ model_inputs['pixel_values'] = torch.cat([right_inputs['pixel_values'], model_inputs['pixel_values']], dim=1)
56
  return model_inputs
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca3079bf8792c93f629db09fda958a7facd2440eefeb187fcb39cafa96cf661d
3
  size 345635761
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6abc36104093d15f2ff2ceddd0dab10748660cce399abaecd10097400464a3b
3
  size 345635761
runs/events.out.tfevents.1667234221.sa103.14351.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3acca13a82ad16456be038d21ef79542772dc06fda03610593019310260404f7
3
+ size 1981