phyloforfun commited on
Commit
12ea480
1 Parent(s): b2935d3

fix safety check

Browse files
vouchervision/OCR_google_cloud_vision.py CHANGED
@@ -809,39 +809,42 @@ class SafetyCheck():
809
  return credentials
810
 
811
  def check_for_inappropriate_content(self, file_stream):
812
- LEVEL = 2
813
- # content = file_stream.read()
814
- file_stream.seek(0) # Reset file stream position to the beginning
815
- content = file_stream.read()
816
- image = vision.Image(content=content)
817
- response = self.client.safe_search_detection(image=image)
818
- safe = response.safe_search_annotation
819
-
820
- likelihood_name = (
821
- "UNKNOWN",
822
- "VERY_UNLIKELY",
823
- "UNLIKELY",
824
- "POSSIBLE",
825
- "LIKELY",
826
- "VERY_LIKELY",
827
- )
828
- print("Safe search:")
829
-
830
- print(f" adult*: {likelihood_name[safe.adult]}")
831
- print(f" medical*: {likelihood_name[safe.medical]}")
832
- print(f" spoofed: {likelihood_name[safe.spoof]}")
833
- print(f" violence*: {likelihood_name[safe.violence]}")
834
- print(f" racy: {likelihood_name[safe.racy]}")
835
-
836
- # Check the levels of adult, violence, racy, etc. content.
837
- if (safe.adult > LEVEL or
838
- safe.medical > LEVEL or
839
- # safe.spoof > LEVEL or
840
- safe.violence > LEVEL #or
841
- # safe.racy > LEVEL
842
- ):
843
- print("Found violation")
844
- return True # The image violates safe search guidelines.
845
-
846
- print("Found NO violation")
847
- return False # The image is considered safe.
 
 
 
 
809
  return credentials
810
 
811
  def check_for_inappropriate_content(self, file_stream):
812
+ try:
813
+ LEVEL = 2
814
+ # content = file_stream.read()
815
+ file_stream.seek(0) # Reset file stream position to the beginning
816
+ content = file_stream.read()
817
+ image = vision.Image(content=content)
818
+ response = self.client.safe_search_detection(image=image)
819
+ safe = response.safe_search_annotation
820
+
821
+ likelihood_name = (
822
+ "UNKNOWN",
823
+ "VERY_UNLIKELY",
824
+ "UNLIKELY",
825
+ "POSSIBLE",
826
+ "LIKELY",
827
+ "VERY_LIKELY",
828
+ )
829
+ print("Safe search:")
830
+
831
+ print(f" adult*: {likelihood_name[safe.adult]}")
832
+ print(f" medical*: {likelihood_name[safe.medical]}")
833
+ print(f" spoofed: {likelihood_name[safe.spoof]}")
834
+ print(f" violence*: {likelihood_name[safe.violence]}")
835
+ print(f" racy: {likelihood_name[safe.racy]}")
836
+
837
+ # Check the levels of adult, violence, racy, etc. content.
838
+ if (safe.adult > LEVEL or
839
+ safe.medical > LEVEL or
840
+ # safe.spoof > LEVEL or
841
+ safe.violence > LEVEL #or
842
+ # safe.racy > LEVEL
843
+ ):
844
+ print("Found violation")
845
+ return True # The image violates safe search guidelines.
846
+
847
+ print("Found NO violation")
848
+ return False # The image is considered safe.
849
+ except:
850
+ return False # The image is considered safe. TEMPOROARY FIX TODO