invalid-coder commited on
Commit
a65a275
·
verified ·
1 Parent(s): c163ef3

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +62 -0
README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+ Uses
5
+ Important: Please use the exact chat template provided below for the model. Otherwise there will be a degrade in the performance. The model output can be verbose in rare cases. Please consider setting temperature = 0 to make this happen less.
6
+
7
+ Our model follows the exact chat template and usage as Openchat-3.5-0106. Please refer to their model card for more details. In addition, our model is hosted on LMSYS Chatbot Arena for free test.
8
+
9
+ The conversation template is the same as Openchat-3.5-0106:
10
+
11
+ import transformers
12
+ tokenizer = transformers.AutoTokenizer.from_pretrained("openchat/openchat-3.5-0106")
13
+
14
+ # Single-turn
15
+ tokens = tokenizer("GPT4 Correct User: Hello<|end_of_turn|>GPT4 Correct Assistant:").input_ids
16
+ assert tokens == [1, 420, 6316, 28781, 3198, 3123, 1247, 28747, 22557, 32000, 420, 6316, 28781, 3198, 3123, 21631, 28747]
17
+
18
+ # Multi-turn
19
+ tokens = tokenizer("GPT4 Correct User: Hello<|end_of_turn|>GPT4 Correct Assistant: Hi<|end_of_turn|>GPT4 Correct User: How are you today?<|end_of_turn|>GPT4 Correct Assistant:").input_ids
20
+ assert tokens == [1, 420, 6316, 28781, 3198, 3123, 1247, 28747, 22557, 32000, 420, 6316, 28781, 3198, 3123, 21631, 28747, 15359, 32000, 420, 6316, 28781, 3198, 3123, 1247, 28747, 1602, 460, 368, 3154, 28804, 32000, 420, 6316, 28781, 3198, 3123, 21631, 28747]
21
+
22
+ # Coding Mode
23
+ tokens = tokenizer("Code User: Implement quicksort using C++<|end_of_turn|>Code Assistant:").input_ids
24
+ assert tokens == [1, 7596, 1247, 28747, 26256, 2936, 7653, 1413, 334, 1680, 32000, 7596, 21631, 28747]
25
+
26
+ Code Examples
27
+ import transformers
28
+
29
+ tokenizer = transformers.AutoTokenizer.from_pretrained("invalid-coder/Starling-LM-7B-beta-laser-dpo")
30
+ model = transformers.AutoModelForCausalLM.from_pretrained("invalid-coder/Starling-LM-7B-beta-laser-dpo")
31
+
32
+ def generate_response(prompt):
33
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids
34
+ outputs = model.generate(
35
+ input_ids,
36
+ max_length=256,
37
+ pad_token_id=tokenizer.pad_token_id,
38
+ eos_token_id=tokenizer.eos_token_id,
39
+ )
40
+ response_ids = outputs[0]
41
+ response_text = tokenizer.decode(response_ids, skip_special_tokens=True)
42
+ return response_text
43
+
44
+ # Single-turn conversation
45
+ prompt = "Hello, how are you?"
46
+ single_turn_prompt = f"GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant:"
47
+ response_text = generate_response(single_turn_prompt)
48
+ print("Response:", response_text)
49
+
50
+ ## Multi-turn conversation
51
+ prompt = "Hello"
52
+ follow_up_question = "How are you today?"
53
+ response = ""
54
+ multi_turn_prompt = f"GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant: {response}<|end_of_turn|>GPT4 Correct User: {follow_up_question}<|end_of_turn|>GPT4 Correct Assistant:"
55
+ response_text = generate_response(multi_turn_prompt)
56
+ print("Multi-turn conversation response:", response_text)
57
+
58
+ ### Coding conversation
59
+ prompt = "Implement quicksort using C++"
60
+ coding_prompt = f"Code User: {prompt}<|end_of_turn|>Code Assistant:"
61
+ response = generate_response(coding_prompt)
62
+ print("Coding conversation response:", response)