@@ -5,6 +5,16 @@ import * as Core from '../core';
55import * as Shared from './shared' ;
66
77export class Safety extends APIResource {
8+ /**
9+ * Classifies if text and/or image inputs are potentially harmful.
10+ */
11+ openaiModerations (
12+ body : SafetyOpenAIModerationsParams ,
13+ options ?: Core . RequestOptions ,
14+ ) : Core . APIPromise < OpenAIModerationsResponse > {
15+ return this . _client . post ( '/v1/openai/v1/moderations' , { body, ...options } ) ;
16+ }
17+
818 /**
919 * Run a shield.
1020 */
@@ -63,6 +73,19 @@ export interface RunShieldResponse {
6373 violation ?: Shared . SafetyViolation ;
6474}
6575
76+ export interface SafetyOpenAIModerationsParams {
77+ /**
78+ * Input (or inputs) to classify. Can be a single string, an array of strings, or
79+ * an array of multi-modal input objects similar to other models.
80+ */
81+ input : string | Array < string > ;
82+
83+ /**
84+ * The content moderation model you would like to use.
85+ */
86+ model ?: string ;
87+ }
88+
6689export interface SafetyRunShieldParams {
6790 /**
6891 * The messages to run the shield on.
@@ -84,6 +107,7 @@ export declare namespace Safety {
84107 export {
85108 type OpenAIModerationsResponse as OpenAIModerationsResponse ,
86109 type RunShieldResponse as RunShieldResponse ,
110+ type SafetyOpenAIModerationsParams as SafetyOpenAIModerationsParams ,
87111 type SafetyRunShieldParams as SafetyRunShieldParams ,
88112 } ;
89113}
0 commit comments