Shaltiel commited on
Commit
ca54606
·
1 Parent(s): 86e6b81

Added support for large models

Browse files
Files changed (1) hide show
  1. main_backend_lighteval.py +10 -2
main_backend_lighteval.py CHANGED
@@ -63,8 +63,16 @@ def run_auto_eval():
63
  # This needs to be done
64
  #instance_size, instance_type = get_instance_for_model(eval_request)
65
  # For GPU
66
- # instance_size, instance_type = "small", "g4dn.xlarge"
67
- instance_size, instance_type = "medium", "g5.2xlarge"
 
 
 
 
 
 
 
 
68
  # For CPU
69
  # instance_size, instance_type = "medium", "c6i"
70
 
 
63
  # This needs to be done
64
  #instance_size, instance_type = get_instance_for_model(eval_request)
65
  # For GPU
66
+ if not eval_request or eval_request.params < 0:
67
+ raise ValueError("Couldn't detect number of params, please make sure the metadata is available")
68
+ elif eval_request.params < 4:
69
+ instance_size, instance_type = "small", "g4dn.xlarge"
70
+ elif eval_request.params < 9:
71
+ instance_size, instance_type = "medium", "g5.2xlarge"
72
+ elif eval_request.parmas < 24:
73
+ instance_size, instance_type = "large", "g4dn.12xlarge"
74
+ else:
75
+ raise ValueError("Number of params too big, can't run this model")
76
  # For CPU
77
  # instance_size, instance_type = "medium", "c6i"
78