cognitivess commited on
Commit
2ec0205
1 Parent(s): d981a81

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +14 -3
README.md CHANGED
@@ -11,10 +11,21 @@ pip install git+https://huggingface.co/CognitivessAI/cognitivess
11
  Then, you can use the model like this:
12
 
13
  ```python
14
- # pip install bitsandbytes accelerate
 
 
 
 
15
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
16
  import torch
17
 
 
 
 
 
 
 
 
18
  # Set up quantization config
19
  quantization_config = BitsAndBytesConfig(load_in_8bit=True)
20
 
@@ -23,12 +34,12 @@ tokenizer = AutoTokenizer.from_pretrained("CognitivessAI/cognitivess")
23
  model = AutoModelForCausalLM.from_pretrained(
24
  "CognitivessAI/cognitivess",
25
  quantization_config=quantization_config,
26
- device_map="auto" # This will automatically distribute the model across available GPUs
27
  )
28
 
29
  # Prepare input
30
  input_text = "Write me a poem about Machine Learning."
31
- inputs = tokenizer(input_text, return_tensors="pt").to("cuda")
32
 
33
  # Generate output
34
  with torch.no_grad():
 
11
  Then, you can use the model like this:
12
 
13
  ```python
14
+ # Install required packages
15
+ #pip install bitsandbytes accelerate
16
+ #pip install git+https://huggingface.co/CognitivessAI/cognitivess
17
+
18
+ # Import necessary libraries
19
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
20
  import torch
21
 
22
+ # Import and register your custom classes
23
+ from cognitivess_model import CognitivessConfig, CognitivessForCausalLM
24
+ from transformers import AutoConfig, AutoModelForCausalLM
25
+
26
+ AutoConfig.register("cognitivess", CognitivessConfig)
27
+ AutoModelForCausalLM.register(CognitivessConfig, CognitivessForCausalLM)
28
+
29
  # Set up quantization config
30
  quantization_config = BitsAndBytesConfig(load_in_8bit=True)
31
 
 
34
  model = AutoModelForCausalLM.from_pretrained(
35
  "CognitivessAI/cognitivess",
36
  quantization_config=quantization_config,
37
+ device_map="auto"
38
  )
39
 
40
  # Prepare input
41
  input_text = "Write me a poem about Machine Learning."
42
+ inputs = tokenizer(input_text, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
43
 
44
  # Generate output
45
  with torch.no_grad():