machineuser commited on
Commit
60fd1b8
1 Parent(s): a6b2d88

Sync widgets demo

Browse files
packages/tasks/src/tasks/text-generation/inference.ts CHANGED
@@ -26,11 +26,24 @@ export interface TextGenerationInput {
26
  */
27
  export interface TextGenerationParameters {
28
  /**
29
- * Whether to use logit sampling (true) or greedy search (false).
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  */
31
  do_sample?: boolean;
32
  /**
33
- * Maximum number of generated tokens.
34
  */
35
  max_new_tokens?: number;
36
  /**
@@ -42,6 +55,10 @@ export interface TextGenerationParameters {
42
  * Whether to prepend the prompt to the generated text.
43
  */
44
  return_full_text?: boolean;
 
 
 
 
45
  /**
46
  * Stop generating tokens if a member of `stop_sequences` is generated.
47
  */
@@ -79,10 +96,99 @@ export interface TextGenerationParameters {
79
  * Outputs for Text Generation inference
80
  */
81
  export interface TextGenerationOutput {
82
- generatedText: unknown;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  /**
84
  * The generated text
85
  */
86
- generated_text?: string;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  [property: string]: unknown;
88
  }
 
26
  */
27
  export interface TextGenerationParameters {
28
  /**
29
+ * The number of sampling queries to run. Only the best one (in terms of total logprob) will
30
+ * be returned.
31
+ */
32
+ best_of?: number;
33
+ /**
34
+ * Whether or not to output decoder input details
35
+ */
36
+ decoder_input_details?: boolean;
37
+ /**
38
+ * Whether or not to output details
39
+ */
40
+ details?: boolean;
41
+ /**
42
+ * Whether to use logits sampling instead of greedy decoding when generating new tokens.
43
  */
44
  do_sample?: boolean;
45
  /**
46
+ * The maximum number of tokens to generate.
47
  */
48
  max_new_tokens?: number;
49
  /**
 
55
  * Whether to prepend the prompt to the generated text.
56
  */
57
  return_full_text?: boolean;
58
+ /**
59
+ * The random sampling seed.
60
+ */
61
+ seed?: number;
62
  /**
63
  * Stop generating tokens if a member of `stop_sequences` is generated.
64
  */
 
96
  * Outputs for Text Generation inference
97
  */
98
  export interface TextGenerationOutput {
99
+ /**
100
+ * When enabled, details about the generation
101
+ */
102
+ details?: TextGenerationOutputDetails;
103
+ /**
104
+ * The generated text
105
+ */
106
+ generated_text: string;
107
+ [property: string]: unknown;
108
+ }
109
+
110
+ /**
111
+ * When enabled, details about the generation
112
+ */
113
+ export interface TextGenerationOutputDetails {
114
+ /**
115
+ * Details about additional sequences when best_of is provided
116
+ */
117
+ best_of_sequences?: TextGenerationSequenceDetails[];
118
+ /**
119
+ * The reason why the generation was stopped.
120
+ */
121
+ finish_reason: FinishReason;
122
+ /**
123
+ * The number of generated tokens
124
+ */
125
+ generated_tokens: number;
126
+ prefill: PrefillToken[];
127
+ /**
128
+ * The random seed used for generation
129
+ */
130
+ seed?: number;
131
+ /**
132
+ * The generated tokens and associated details
133
+ */
134
+ tokens: Token[];
135
+ [property: string]: unknown;
136
+ }
137
+
138
+ export interface TextGenerationSequenceDetails {
139
+ /**
140
+ * The reason why the generation was stopped.
141
+ */
142
+ finish_reason: FinishReason;
143
  /**
144
  * The generated text
145
  */
146
+ generated_text: number;
147
+ /**
148
+ * The number of generated tokens
149
+ */
150
+ generated_tokens: number;
151
+ prefill: PrefillToken[];
152
+ /**
153
+ * The random seed used for generation
154
+ */
155
+ seed?: number;
156
+ /**
157
+ * The generated tokens and associated details
158
+ */
159
+ tokens: Token[];
160
+ [property: string]: unknown;
161
+ }
162
+
163
+ /**
164
+ * The generated sequence reached the maximum allowed length
165
+ *
166
+ * The model generated an end-of-sentence (EOS) token
167
+ *
168
+ * One of the sequence in stop_sequences was generated
169
+ */
170
+ export type FinishReason = "length" | "eos_token" | "stop_sequence";
171
+
172
+ export interface PrefillToken {
173
+ id: number;
174
+ logprob: number;
175
+ /**
176
+ * The text associated with that token
177
+ */
178
+ text: string;
179
+ [property: string]: unknown;
180
+ }
181
+
182
+ export interface Token {
183
+ id: number;
184
+ logprob: number;
185
+ /**
186
+ * Whether or not that token is a special one
187
+ */
188
+ special: boolean;
189
+ /**
190
+ * The text associated with that token
191
+ */
192
+ text: string;
193
  [property: string]: unknown;
194
  }
packages/tasks/src/tasks/text-generation/spec/input.json CHANGED
@@ -20,13 +20,25 @@
20
  "description": "Additional inference parameters for Text Generation",
21
  "type": "object",
22
  "properties": {
 
 
 
 
 
 
 
 
 
 
 
 
23
  "do_sample": {
24
  "type": "boolean",
25
- "description": "Whether to use logit sampling (true) or greedy search (false)."
26
  },
27
  "max_new_tokens": {
28
  "type": "integer",
29
- "description": "Maximum number of generated tokens."
30
  },
31
  "repetition_penalty": {
32
  "type": "number",
@@ -36,6 +48,10 @@
36
  "type": "boolean",
37
  "description": "Whether to prepend the prompt to the generated text."
38
  },
 
 
 
 
39
  "stop_sequences": {
40
  "type": "array",
41
  "items": {
 
20
  "description": "Additional inference parameters for Text Generation",
21
  "type": "object",
22
  "properties": {
23
+ "best_of": {
24
+ "type": "integer",
25
+ "description": "The number of sampling queries to run. Only the best one (in terms of total logprob) will be returned."
26
+ },
27
+ "decoder_input_details": {
28
+ "type": "boolean",
29
+ "description": "Whether or not to output decoder input details"
30
+ },
31
+ "details": {
32
+ "type": "boolean",
33
+ "description": "Whether or not to output details"
34
+ },
35
  "do_sample": {
36
  "type": "boolean",
37
+ "description": "Whether to use logits sampling instead of greedy decoding when generating new tokens."
38
  },
39
  "max_new_tokens": {
40
  "type": "integer",
41
+ "description": "The maximum number of tokens to generate."
42
  },
43
  "repetition_penalty": {
44
  "type": "number",
 
48
  "type": "boolean",
49
  "description": "Whether to prepend the prompt to the generated text."
50
  },
51
+ "seed": {
52
+ "type": "integer",
53
+ "description": "The random sampling seed."
54
+ },
55
  "stop_sequences": {
56
  "type": "array",
57
  "items": {
packages/tasks/src/tasks/text-generation/spec/output.json CHANGED
@@ -8,7 +8,113 @@
8
  "generated_text": {
9
  "type": "string",
10
  "description": "The generated text"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  }
12
  },
13
- "required": ["generatedText"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  }
 
8
  "generated_text": {
9
  "type": "string",
10
  "description": "The generated text"
11
+ },
12
+ "details": {
13
+ "description": "When enabled, details about the generation",
14
+ "title": "TextGenerationOutputDetails",
15
+ "allOf": [
16
+ { "$ref": "#/$defs/SequenceDetails" },
17
+ {
18
+ "type": "object",
19
+ "properties": {
20
+ "best_of_sequences": {
21
+ "type": "array",
22
+ "description": "Details about additional sequences when best_of is provided",
23
+ "items": {
24
+ "allOf": [
25
+ { "$ref": "#/$defs/SequenceDetails" },
26
+ {
27
+ "type": "object",
28
+ "properties": {
29
+ "generated_text": {
30
+ "type": "integer",
31
+ "description": "The generated text"
32
+ }
33
+ },
34
+ "required": ["generated_text"]
35
+ }
36
+ ]
37
+ }
38
+ }
39
+ }
40
+ }
41
+ ]
42
  }
43
  },
44
+ "required": ["generated_text"],
45
+
46
+ "$defs": {
47
+ "Token": {
48
+ "type": "object",
49
+ "title": "Token",
50
+ "properties": {
51
+ "id": {
52
+ "type": "integer"
53
+ },
54
+ "logprob": {
55
+ "type": "number"
56
+ },
57
+ "special": {
58
+ "type": "boolean",
59
+ "description": "Whether or not that token is a special one"
60
+ },
61
+ "text": {
62
+ "type": "string",
63
+ "description": "The text associated with that token"
64
+ }
65
+ },
66
+ "required": ["id", "logprob", "special", "text"]
67
+ },
68
+ "SequenceDetails": {
69
+ "type": "object",
70
+ "title": "TextGenerationSequenceDetails",
71
+ "properties": {
72
+ "finish_reason": {
73
+ "type": "string",
74
+ "description": "The reason why the generation was stopped.",
75
+ "oneOf": [
76
+ { "const": "length", "description": "The generated sequence reached the maximum allowed length" },
77
+ { "const": "eos_token", "description": "The model generated an end-of-sentence (EOS) token" },
78
+ { "const": "stop_sequence", "description": "One of the sequence in stop_sequences was generated" }
79
+ ]
80
+ },
81
+ "generated_tokens": {
82
+ "type": "integer",
83
+ "description": "The number of generated tokens"
84
+ },
85
+ "prefill": {
86
+ "type": "array",
87
+ "items": {
88
+ "title": "PrefillToken",
89
+ "type": "object",
90
+ "properties": {
91
+ "id": {
92
+ "type": "integer"
93
+ },
94
+ "logprob": {
95
+ "type": "number"
96
+ },
97
+ "text": {
98
+ "type": "string",
99
+ "description": "The text associated with that token"
100
+ }
101
+ },
102
+ "required": ["id", "logprob", "text"]
103
+ }
104
+ },
105
+ "seed": {
106
+ "type": "integer",
107
+ "description": "The random seed used for generation"
108
+ },
109
+ "tokens": {
110
+ "type": "array",
111
+ "description": "The generated tokens and associated details",
112
+ "items": {
113
+ "$ref": "#/$defs/Token"
114
+ }
115
+ }
116
+ },
117
+ "required": ["finish_reason", "generated_tokens", "prefill", "tokens"]
118
+ }
119
+ }
120
  }