|
| 1 | +namespace AzureOpenAI; |
| 2 | + |
| 3 | +/** |
| 4 | + * A labeled content filter result item that indicates whether the content was filtered and what the qualitative |
| 5 | + * severity level of the content was, as evaluated against content filter configuration for the category. |
| 6 | + */ |
| 7 | +model AzureContentFilterSeverityResult { |
| 8 | + /** Whether the content severity resulted in a content filtering action. */ |
| 9 | + filtered: boolean; |
| 10 | + |
| 11 | + /** The labeled severity of the content. */ |
| 12 | + severity: "safe" | "low" | "medium" | "high"; |
| 13 | +} |
| 14 | + |
| 15 | +/** |
| 16 | + * A labeled content filter result item that indicates whether the content was detected and whether the content was |
| 17 | + * filtered. |
| 18 | + */ |
| 19 | +model AzureContentFilterDetectionResult { |
| 20 | + /** Whether the content detection resulted in a content filtering action. */ |
| 21 | + filtered: boolean; |
| 22 | + |
| 23 | + /** Whether the labeled content category was detected in the content. */ |
| 24 | + detected: boolean; |
| 25 | +} |
| 26 | + |
| 27 | +/** |
| 28 | + * A content filter result item that associates an existing custom blocklist ID with a value indicating whether or not |
| 29 | + * the corresponding blocklist resulted in content being filtered. |
| 30 | + */ |
| 31 | +model AzureContentFilterBlocklistIdResult { |
| 32 | + /** The ID of the custom blocklist associated with the filtered status. */ |
| 33 | + id: string; |
| 34 | + |
| 35 | + /** Whether the associated blocklist resulted in the content being filtered. */ |
| 36 | + filtered: boolean; |
| 37 | +} |
| 38 | + |
| 39 | +/** |
| 40 | + * A content filter result item that associates an existing custom topic ID with a value indicating whether or not |
| 41 | + * the corresponding topic resulted in content being detected. |
| 42 | + */ |
| 43 | +model AzureContentFilterCustomTopicIdResult { |
| 44 | + /** The ID of the custom topic associated with the detected status. */ |
| 45 | + id: string; |
| 46 | + |
| 47 | + /** Whether the associated custom topic resulted in the content being detected. */ |
| 48 | + detected: boolean; |
| 49 | +} |
| 50 | + |
| 51 | +/** |
| 52 | + * A representation of a span of completion text as used by Azure OpenAI content filter results. |
| 53 | + */ |
| 54 | +model AzureContentFilterCompletionTextSpan { |
| 55 | + /** |
| 56 | + * Offset of the UTF32 code point which begins the span. |
| 57 | + */ |
| 58 | + completion_start_offset: int32; |
| 59 | + |
| 60 | + /** |
| 61 | + * Offset of the first UTF32 code point which is excluded from the span. This field is always equal to completion_start_offset for empty spans. This field is always larger than completion_start_offset for non-empty spans. |
| 62 | + */ |
| 63 | + completion_end_offset: int32; |
| 64 | +} |
| 65 | + |
| 66 | +model AzureContentFilterCompletionTextSpanDetectionResult { |
| 67 | + ...AzureContentFilterDetectionResult; |
| 68 | + |
| 69 | + /** |
| 70 | + * Detailed information about the detected completion text spans. |
| 71 | + */ |
| 72 | + details: AzureContentFilterCompletionTextSpan[]; |
| 73 | +} |
| 74 | + |
| 75 | +alias AzureContentFilterCategoriesBase = { |
| 76 | + ...SexualSeverityCategory; |
| 77 | + ...HateSeverityCategory; |
| 78 | + ...ViolenceSeverityCategory; |
| 79 | + ...SelfHarmSeverityCategory; |
| 80 | + ...ProfanityCategory; |
| 81 | + ...CustomBlocklistsResult; |
| 82 | + ...CustomTopicsResult; |
| 83 | + |
| 84 | + /** |
| 85 | + * If present, details about an error that prevented content filtering from completing its evaluation. |
| 86 | + */ |
| 87 | + error?: { |
| 88 | + /** |
| 89 | + * A distinct, machine-readable code associated with the error. |
| 90 | + */ |
| 91 | + code: int32; |
| 92 | + |
| 93 | + /** |
| 94 | + * A human-readable message associated with the error. |
| 95 | + */ |
| 96 | + message: string; |
| 97 | + }; |
| 98 | +}; |
| 99 | + |
| 100 | +/** |
| 101 | + * A collection of true/false filtering results for configured custom blocklists. |
| 102 | + */ |
| 103 | +model AzureContentFilterBlocklistResult { |
| 104 | + /** A value indicating whether any of the detailed blocklists resulted in a filtering action. */ |
| 105 | + filtered: boolean; |
| 106 | + |
| 107 | + /** The pairs of individual blocklist IDs and whether they resulted in a filtering action. */ |
| 108 | + details?: { |
| 109 | + /** A value indicating whether the blocklist produced a filtering action. */ |
| 110 | + filtered: boolean; |
| 111 | + |
| 112 | + /** The ID of the custom blocklist evaluated. */ |
| 113 | + id: string; |
| 114 | + }[]; |
| 115 | +} |
| 116 | + |
| 117 | +/** |
| 118 | + * A collection of true/false filtering results for configured custom topics. |
| 119 | + */ |
| 120 | +model AzureContentFilterCustomTopicResult { |
| 121 | + /** A value indicating whether any of the detailed topics resulted in a filtering action. */ |
| 122 | + filtered: boolean; |
| 123 | + |
| 124 | + /** The pairs of individual topic IDs and whether they are detected. */ |
| 125 | + details?: { |
| 126 | + /** A value indicating whether the topic is detected. */ |
| 127 | + detected: boolean; |
| 128 | + |
| 129 | + /** The ID of the custom topic evaluated. */ |
| 130 | + id: string; |
| 131 | + }[]; |
| 132 | +} |
| 133 | + |
| 134 | +/** |
| 135 | + * A content filter result associated with a single input prompt item into a generative AI system. |
| 136 | + */ |
| 137 | +model AzureContentFilterResultForPrompt { |
| 138 | + /** |
| 139 | + * The index of the input prompt associated with the accompanying content filter result categories. |
| 140 | + */ |
| 141 | + prompt_index?: int32; |
| 142 | + |
| 143 | + /** |
| 144 | + * The content filter category details for the result. |
| 145 | + */ |
| 146 | + content_filter_results?: { |
| 147 | + ...AzureContentFilterCategoriesBase; |
| 148 | + ...JailbreakResult; |
| 149 | + |
| 150 | + /** |
| 151 | + * A detection result that describes attacks on systems powered by Generative AI models that can happen every time |
| 152 | + * an application processes information that wasn’t directly authored by either the developer of the application or |
| 153 | + * the user. |
| 154 | + */ |
| 155 | + indirect_attack: AzureContentFilterDetectionResult; |
| 156 | + }; |
| 157 | +} |
| 158 | + |
| 159 | +/** |
| 160 | + * A content filter result for a single response item produced by a generative AI system. |
| 161 | + */ |
| 162 | +model AzureContentFilterResultForChoice { |
| 163 | + ...AzureContentFilterCategoriesBase; |
| 164 | + |
| 165 | + /** |
| 166 | + * A detection result that describes a match against text protected under copyright or other status. |
| 167 | + */ |
| 168 | + protected_material_text?: AzureContentFilterDetectionResult; |
| 169 | + |
| 170 | + /** |
| 171 | + * A detection result that describes a match against licensed code or other protected source material. |
| 172 | + */ |
| 173 | + protected_material_code?: { |
| 174 | + ...AzureContentFilterDetectionResult; |
| 175 | + |
| 176 | + /** |
| 177 | + * If available, the citation details describing the associated license and its location. |
| 178 | + */ |
| 179 | + citation?: { |
| 180 | + /** |
| 181 | + * The name or identifier of the license associated with the detection. |
| 182 | + */ |
| 183 | + license?: string; |
| 184 | + |
| 185 | + /** |
| 186 | + * The URL associated with the license. |
| 187 | + */ |
| 188 | + URL?: url; |
| 189 | + }; |
| 190 | + }; |
| 191 | + |
| 192 | + ungrounded_material?: AzureContentFilterCompletionTextSpanDetectionResult; |
| 193 | +} |
| 194 | + |
| 195 | +/** |
| 196 | + * A content filter result for an image generation operation's output response content. |
| 197 | + */ |
| 198 | +model AzureContentFilterImageResponseResults { |
| 199 | + ...SexualSeverityCategory; |
| 200 | + ...ViolenceSeverityCategory; |
| 201 | + ...HateSeverityCategory; |
| 202 | + ...SelfHarmSeverityCategory; |
| 203 | +} |
| 204 | + |
| 205 | +/** |
| 206 | + * A content filter result for an image generation operation's input request content. |
| 207 | + */ |
| 208 | +model AzureContentFilterImagePromptResults |
| 209 | + extends AzureContentFilterImageResponseResults { |
| 210 | + ...ProfanityCategory; |
| 211 | + ...CustomBlocklistsResult; |
| 212 | + ...CustomTopicsResult; |
| 213 | + ...JailbreakResult; |
| 214 | +} |
| 215 | + |
| 216 | +alias SexualSeverityCategory = { |
| 217 | + /** |
| 218 | + * A content filter category for language related to anatomical organs and genitals, romantic relationships, acts |
| 219 | + * portrayed in erotic or affectionate terms, pregnancy, physical sexual acts, including those portrayed as an |
| 220 | + * assault or a forced sexual violent act against one's will, prostitution, pornography, and abuse. |
| 221 | + */ |
| 222 | + sexual?: AzureContentFilterSeverityResult; |
| 223 | +}; |
| 224 | + |
| 225 | +alias ViolenceSeverityCategory = { |
| 226 | + /** |
| 227 | + * A content filter category for language related to physical actions intended to hurt, injure, damage, or kill |
| 228 | + * someone or something; describes weapons, guns and related entities, such as manufactures, associations, |
| 229 | + * legislation, and so on. |
| 230 | + */ |
| 231 | + violence?: AzureContentFilterSeverityResult; |
| 232 | +}; |
| 233 | + |
| 234 | +alias HateSeverityCategory = { |
| 235 | + /** |
| 236 | + * A content filter category that can refer to any content that attacks or uses pejorative or discriminatory |
| 237 | + * language with reference to a person or identity group based on certain differentiating attributes of these groups |
| 238 | + * including but not limited to race, ethnicity, nationality, gender identity and expression, sexual orientation, |
| 239 | + * religion, immigration status, ability status, personal appearance, and body size. |
| 240 | + */ |
| 241 | + hate?: AzureContentFilterSeverityResult; |
| 242 | +}; |
| 243 | + |
| 244 | +alias SelfHarmSeverityCategory = { |
| 245 | + /** |
| 246 | + * A content filter category that describes language related to physical actions intended to purposely hurt, injure, |
| 247 | + * damage one's body or kill oneself. |
| 248 | + */ |
| 249 | + self_harm?: AzureContentFilterSeverityResult; |
| 250 | +}; |
| 251 | + |
| 252 | +alias ProfanityCategory = { |
| 253 | + /** |
| 254 | + * A detection result that identifies whether crude, vulgar, or otherwise objection language is present in the |
| 255 | + * content. |
| 256 | + */ |
| 257 | + profanity?: AzureContentFilterDetectionResult; |
| 258 | +}; |
| 259 | + |
| 260 | +alias JailbreakResult = { |
| 261 | + /** |
| 262 | + * A detection result that describes user prompt injection attacks, where malicious users deliberately exploit |
| 263 | + * system vulnerabilities to elicit unauthorized behavior from the LLM. This could lead to inappropriate content |
| 264 | + * generation or violations of system-imposed restrictions. |
| 265 | + */ |
| 266 | + jailbreak: AzureContentFilterDetectionResult; |
| 267 | +}; |
| 268 | + |
| 269 | +alias CustomBlocklistsResult = { |
| 270 | + /** |
| 271 | + * A collection of binary filtering outcomes for configured custom blocklists. |
| 272 | + */ |
| 273 | + custom_blocklists?: AzureContentFilterBlocklistResult; |
| 274 | +}; |
| 275 | + |
| 276 | +alias CustomTopicsResult = { |
| 277 | + /** |
| 278 | + * A collection of binary filtering outcomes for configured custom topics. |
| 279 | + */ |
| 280 | + custom_topics?: AzureContentFilterCustomTopicResult; |
| 281 | +}; |
0 commit comments