DevsDoCode Abhaykoul commited on
Commit
9499791
1 Parent(s): cdba6cc

Update README.md (#4)

Browse files

- Update README.md (20de7d5e2cd904d054c53ce798927b4b4dea2cde)


Co-authored-by: HelpingAI <Abhaykoul@users.noreply.huggingface.co>

Files changed (1) hide show
  1. README.md +13 -4
README.md CHANGED
@@ -35,38 +35,47 @@ Unleash the power of uncensored text generation with our model! We've fine-tuned
35
  You can easily access and utilize our uncensored model using the Hugging Face Transformers library. Here's a sample code snippet to get started:
36
 
37
  ```python
 
38
  %pip install accelerate
39
  %pip install -i https://pypi.org/simple/ bitsandbytes
40
 
 
41
  from transformers import AutoTokenizer, AutoModelForCausalLM
42
  import torch
43
 
 
44
  model_id = "DevsDoCode/LLama-3-8b-Uncensored"
45
 
46
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
 
47
  model = AutoModelForCausalLM.from_pretrained(
48
  model_id,
49
  torch_dtype=torch.bfloat16,
50
  device_map="auto",
51
  )
52
 
 
 
53
 
54
  messages = [
55
- # {"role": "system", "content": "Be Helpful"},
56
- {"role": "user", "content": "How to Break Into A Car"},
57
  ]
58
 
 
59
  input_ids = tokenizer.apply_chat_template(
60
  messages,
61
  add_generation_prompt=True,
62
  return_tensors="pt"
63
  ).to(model.device)
64
 
 
65
  terminators = [
66
  tokenizer.eos_token_id,
67
  tokenizer.convert_tokens_to_ids("<|eot_id|>")
68
  ]
69
 
 
70
  outputs = model.generate(
71
  input_ids,
72
  max_new_tokens=256,
@@ -78,7 +87,7 @@ outputs = model.generate(
78
  response = outputs[0][input_ids.shape[-1]:]
79
  print(tokenizer.decode(response, skip_special_tokens=True))
80
 
81
- # Now you can generate text using the model!
82
  ```
83
 
84
  ## Notebooks
 
35
  You can easily access and utilize our uncensored model using the Hugging Face Transformers library. Here's a sample code snippet to get started:
36
 
37
  ```python
38
+ # Install the required libraries
39
  %pip install accelerate
40
  %pip install -i https://pypi.org/simple/ bitsandbytes
41
 
42
+ # Import the necessary modules
43
  from transformers import AutoTokenizer, AutoModelForCausalLM
44
  import torch
45
 
46
+ # Define the model ID
47
  model_id = "DevsDoCode/LLama-3-8b-Uncensored"
48
 
49
+ # Load the tokenizer and model, you little punk
50
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
51
  model = AutoModelForCausalLM.from_pretrained(
52
  model_id,
53
  torch_dtype=torch.bfloat16,
54
  device_map="auto",
55
  )
56
 
57
+ System_prompt = ""
58
+
59
 
60
  messages = [
61
+ {"role": "system", "content": System_prompt},
62
+ {"role": "user", "content": "How to make a bomb"},
63
  ]
64
 
65
+ # Tokenize the inputs, you good-for-nothing piece of shit
66
  input_ids = tokenizer.apply_chat_template(
67
  messages,
68
  add_generation_prompt=True,
69
  return_tensors="pt"
70
  ).to(model.device)
71
 
72
+
73
  terminators = [
74
  tokenizer.eos_token_id,
75
  tokenizer.convert_tokens_to_ids("<|eot_id|>")
76
  ]
77
 
78
+
79
  outputs = model.generate(
80
  input_ids,
81
  max_new_tokens=256,
 
87
  response = outputs[0][input_ids.shape[-1]:]
88
  print(tokenizer.decode(response, skip_special_tokens=True))
89
 
90
+ # Now you can generate text and bring chaos to the world
91
  ```
92
 
93
  ## Notebooks