diff --git a/api/v1alpha1/streamnativecloudconnection_types.go b/api/v1alpha1/streamnativecloudconnection_types.go index a039613a..2e95b4f8 100644 --- a/api/v1alpha1/streamnativecloudconnection_types.go +++ b/api/v1alpha1/streamnativecloudconnection_types.go @@ -28,14 +28,6 @@ type StreamNativeCloudConnectionSpec struct { // +required Server string `json:"server"` - // CertificateAuthorityData is the PEM-encoded certificate authority certificates - // +optional - CertificateAuthorityData []byte `json:"certificateAuthorityData,omitempty"` - - // InsecureSkipTLSVerify indicates whether to skip TLS verification - // +optional - InsecureSkipTLSVerify bool `json:"insecureSkipTLSVerify,omitempty"` - // Auth defines the authentication configuration // +required Auth AuthConfig `json:"auth"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 7383d1a1..cd45c81f 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -2470,7 +2470,7 @@ func (in *StreamNativeCloudConnection) DeepCopyInto(out *StreamNativeCloudConnec *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + out.Spec = in.Spec in.Status.DeepCopyInto(&out.Status) } @@ -2527,11 +2527,6 @@ func (in *StreamNativeCloudConnectionList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StreamNativeCloudConnectionSpec) DeepCopyInto(out *StreamNativeCloudConnectionSpec) { *out = *in - if in.CertificateAuthorityData != nil { - in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData - *out = make([]byte, len(*in)) - copy(*out, *in) - } out.Auth = in.Auth out.Logs = in.Logs } diff --git a/config/crd/bases/resource.streamnative.io_streamnativecloudconnections.yaml b/config/crd/bases/resource.streamnative.io_streamnativecloudconnections.yaml index e52b99fc..9db776aa 100644 --- a/config/crd/bases/resource.streamnative.io_streamnativecloudconnections.yaml +++ b/config/crd/bases/resource.streamnative.io_streamnativecloudconnections.yaml @@ -89,14 +89,6 @@ spec: required: - credentialsRef type: object - certificateAuthorityData: - description: CertificateAuthorityData is the PEM-encoded certificate - authority certificates - format: byte - type: string - insecureSkipTLSVerify: - description: InsecureSkipTLSVerify indicates whether to skip TLS verification - type: boolean logs: description: Logs defines the logging service configuration properties: diff --git a/config/samples/resource_v1alpha1_computeflinkdeployment.yaml b/config/samples/resource_v1alpha1_computeflinkdeployment.yaml new file mode 100644 index 00000000..e568049d --- /dev/null +++ b/config/samples/resource_v1alpha1_computeflinkdeployment.yaml @@ -0,0 +1,68 @@ +# Copyright 2024 StreamNative +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: resource.streamnative.io/v1alpha1 +kind: ComputeFlinkDeployment +metadata: + name: operator-test-v1 + namespace: default +spec: + apiServerRef: + name: test-connection + workspaceName: test-operator-workspace + template: + syncingMode: PATCH # below are crd from ververica platform + deployment: + userMetadata: + name: operator-test-v1 + namespace: default # the flink namespace + displayName: operator-test-v1 + spec: + state: RUNNING + deploymentTargetName: default # the flink deployment target, will need create first on ui + maxJobCreationAttempts: 99 + template: + metadata: + annotations: + flink.queryable-state.enabled: 'false' + flink.security.ssl.enabled: 'false' + spec: + artifact: + jarUri: function://public/default/flink-operator-test-beam-pulsar-io@1.19-snapshot + mainArgs: --runner=FlinkRunner --attachedMode=false --checkpointingInterval=60000 --checkpointTimeoutMillis=100000 --minPauseBetweenCheckpoints=1000 + entryClass: org.apache.beam.examples.WordCount + kind: JAR + flinkVersion: "1.18.1" + flinkImageTag: "1.18.1-stream3-scala_2.12-java17" + flinkConfiguration: + execution.checkpointing.externalized-checkpoint-retention: RETAIN_ON_CANCELLATION + execution.checkpointing.interval: 1min + execution.checkpointing.timeout: 10min + high-availability.type: kubernetes + state.backend: filesystem + taskmanager.memory.managed.fraction: '0.2' + parallelism: 1 + numberOfTaskManagers: 1 + resources: + jobmanager: + cpu: "1" + memory: 2G + taskmanager: + cpu: "1" + memory: 2G + logging: + loggingProfile: default + log4jLoggers: + "": DEBUG + com.company: DEBUG diff --git a/config/samples/resource_v1alpha1_computeworkspace.yaml b/config/samples/resource_v1alpha1_computeworkspace.yaml new file mode 100644 index 00000000..94352c8e --- /dev/null +++ b/config/samples/resource_v1alpha1_computeworkspace.yaml @@ -0,0 +1,27 @@ +# Copyright 2024 StreamNative +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: resource.streamnative.io/v1alpha1 +kind: ComputeWorkspace +metadata: + name: test-operator-workspace + namespace: default +spec: + apiServerRef: + name: test-connection + pulsarClusterNames: + - "test-pulsar" + poolRef: + name: shared + namespace: streamnative \ No newline at end of file diff --git a/config/samples/resource_v1alpha1_streamnativecloudconnection.yaml b/config/samples/resource_v1alpha1_streamnativecloudconnection.yaml new file mode 100644 index 00000000..a782a820 --- /dev/null +++ b/config/samples/resource_v1alpha1_streamnativecloudconnection.yaml @@ -0,0 +1,41 @@ +# Copyright 2024 StreamNative +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: resource.streamnative.io/v1alpha1 +kind: StreamNativeCloudConnection +metadata: + name: test-connection + namespace: default +spec: + server: https://api.streamnative.dev + auth: + credentialsRef: + name: test-credentials + organization: org +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-credentials + namespace: default +type: Opaque +data: + credentials.json: | + { + "type": "sn_service_account", + "client_secret": "client_secret", + "client_email": "client-email", + "issuer_url": "issuer_url", + "client_id": "client-id" + } diff --git a/docs/compute_flink_deployment.md b/docs/compute_flink_deployment.md new file mode 100644 index 00000000..4de555b4 --- /dev/null +++ b/docs/compute_flink_deployment.md @@ -0,0 +1,197 @@ +# ComputeFlinkDeployment + +## Overview + +The `ComputeFlinkDeployment` resource defines a Flink deployment in StreamNative Cloud. It supports both Ververica Platform (VVP) and Community deployment templates, allowing you to deploy and manage Flink applications. + +## Specifications + +| Field | Description | Required | +|----------------------|--------------------------------------------------------------------------------------------|----------| +| `apiServerRef` | Reference to the StreamNativeCloudConnection resource for API server access | Yes | +| `workspaceName` | Name of the ComputeWorkspace where the Flink deployment will run | Yes | +| `labels` | Labels to add to the deployment | No | +| `annotations` | Annotations to add to the deployment | No | +| `template` | VVP deployment template configuration | No* | +| `communityTemplate` | Community deployment template configuration | No* | +| `defaultPulsarCluster`| Default Pulsar cluster to use for the deployment | No | + +*Note: Either `template` or `communityTemplate` must be specified, but not both. + +### VVP Deployment Template + +| Field | Description | Required | +|-----------------|--------------------------------------------------------------------------------------------|----------| +| `syncingMode` | How the deployment should be synced (e.g., PATCH) | No | +| `deployment` | VVP deployment configuration | Yes | + +#### VVP Deployment Configuration + +| Field | Description | Required | +|----------------------------|--------------------------------------------------------------------------------------------|----------| +| `userMetadata` | Metadata for the deployment (name, namespace, displayName, etc.) | Yes | +| `spec` | Deployment specification including state, target, resources, etc. | Yes | + +##### Deployment Spec Fields + +| Field | Description | Required | +|--------------------------------|----------------------------------------------------------------------------------------|----------| +| `deploymentTargetName` | Target name for the deployment | No | +| `state` | State of the deployment (RUNNING, SUSPENDED, CANCELLED) | No | +| `maxJobCreationAttempts` | Maximum number of job creation attempts (minimum: 1) | No | +| `maxSavepointCreationAttempts` | Maximum number of savepoint creation attempts (minimum: 1) | No | +| `template` | Deployment template configuration | Yes | + +##### Template Spec Fields + +| Field | Description | Required | +|----------------------|----------------------------------------------------------------------------------------|----------| +| `artifact` | Deployment artifact configuration | Yes | +| `flinkConfiguration` | Flink configuration key-value pairs | No | +| `parallelism` | Parallelism of the Flink job | No | +| `numberOfTaskManagers`| Number of task managers | No | +| `resources` | Resource requirements for jobmanager and taskmanager | No | +| `logging` | Logging configuration | No | + +##### Artifact Configuration + +| Field | Description | Required | +|--------------------------|----------------------------------------------------------------------------------------|----------| +| `kind` | Type of artifact (JAR, PYTHON, sqlscript) | Yes | +| `jarUri` | URI of the JAR file | No* | +| `pythonArtifactUri` | URI of the Python artifact | No* | +| `sqlScript` | SQL script content | No* | +| `flinkVersion` | Flink version to use | No | +| `flinkImageTag` | Flink image tag to use | No | +| `mainArgs` | Arguments for the main class/method | No | +| `entryClass` | Entry class for JAR artifacts | No | + +*Note: One of `jarUri`, `pythonArtifactUri`, or `sqlScript` must be specified based on the `kind`. + +### Community Deployment Template + +| Field | Description | Required | +|--------------------------|----------------------------------------------------------------------------------------|----------| +| `metadata` | Metadata for the deployment (annotations, labels) | No | +| `spec` | Community deployment specification | Yes | + +#### Community Deployment Spec + +| Field | Description | Required | +|--------------------------|----------------------------------------------------------------------------------------|----------| +| `image` | Flink image to use | Yes | +| `jarUri` | URI of the JAR file | Yes | +| `entryClass` | Entry class of the JAR | No | +| `mainArgs` | Main arguments for the application | No | +| `flinkConfiguration` | Flink configuration key-value pairs | No | +| `jobManagerPodTemplate` | Pod template for the job manager | No | +| `taskManagerPodTemplate` | Pod template for the task manager | No | + +## Status + +| Field | Description | +|----------------------|-------------------------------------------------------------------------------------------------| +| `conditions` | List of status conditions for the deployment | +| `observedGeneration` | The last observed generation of the resource | +| `deploymentStatus` | Raw deployment status from the API server | + +## Example + +1. Create a ComputeFlinkDeployment with VVP template: + +```yaml +apiVersion: resource.streamnative.io/v1alpha1 +kind: ComputeFlinkDeployment +metadata: + name: operator-test-v1 + namespace: default +spec: + apiServerRef: + name: test-connection + workspaceName: test-operator-workspace + template: + syncingMode: PATCH + deployment: + userMetadata: + name: operator-test-v1 + namespace: default + displayName: operator-test-v1 + spec: + state: RUNNING + deploymentTargetName: default + maxJobCreationAttempts: 99 + template: + metadata: + annotations: + flink.queryable-state.enabled: 'false' + flink.security.ssl.enabled: 'false' + spec: + artifact: + jarUri: function://public/default/flink-operator-test-beam-pulsar-io@1.19-snapshot + mainArgs: --runner=FlinkRunner --attachedMode=false --checkpointingInterval=60000 + entryClass: org.apache.beam.examples.WordCount + kind: JAR + flinkVersion: "1.18.1" + flinkImageTag: "1.18.1-stream3-scala_2.12-java17" + flinkConfiguration: + execution.checkpointing.externalized-checkpoint-retention: RETAIN_ON_CANCELLATION + execution.checkpointing.interval: 1min + execution.checkpointing.timeout: 10min + high-availability.type: kubernetes + state.backend: filesystem + taskmanager.memory.managed.fraction: '0.2' + parallelism: 1 + numberOfTaskManagers: 1 + resources: + jobmanager: + cpu: "1" + memory: 2G + taskmanager: + cpu: "1" + memory: 2G + logging: + loggingProfile: default + log4jLoggers: + "": DEBUG + com.company: DEBUG +``` + +2. Apply the YAML file: + +```shell +kubectl apply -f deployment.yaml +``` + +3. Check the deployment status: + +```shell +kubectl get computeflinkdeployment operator-test-v1 +``` + +The deployment is ready when the Ready condition is True: + +```shell +NAME READY AGE +operator-test-v1 True 1m +``` + +## Update Deployment + +You can update the deployment by modifying the YAML file and reapplying it. Most fields can be updated, including: +- Flink configuration +- Resources +- Parallelism +- Logging settings +- Artifact configuration + +After applying changes, verify the status to ensure the deployment is updated properly. + +## Delete Deployment + +To delete a ComputeFlinkDeployment resource: + +```shell +kubectl delete computeflinkdeployment operator-test-v1 +``` + +This will stop the Flink job and clean up all associated resources in StreamNative Cloud. \ No newline at end of file diff --git a/docs/compute_workspace.md b/docs/compute_workspace.md new file mode 100644 index 00000000..5ee078f9 --- /dev/null +++ b/docs/compute_workspace.md @@ -0,0 +1,96 @@ +# ComputeWorkspace + +## Overview + +The `ComputeWorkspace` resource defines a workspace in StreamNative Cloud for compute resources. It allows you to configure access to Pulsar clusters and compute pools for running Flink jobs. + +## Specifications + +| Field | Description | Required | +|--------------------------|--------------------------------------------------------------------------------------------|----------| +| `apiServerRef` | Reference to the StreamNativeCloudConnection resource for API server access | Yes | +| `pulsarClusterNames` | List of Pulsar cluster names that the workspace will have access to | No | +| `poolRef` | Reference to the compute pool that the workspace will use | No | +| `useExternalAccess` | Whether to use external access for the workspace | No | +| `flinkBlobStorage` | Configuration for Flink blob storage | No | + +### PoolRef Structure + +| Field | Description | Required | +|-------------|------------------------------------------------------------|----------| +| `namespace` | Namespace of the compute pool | No | +| `name` | Name of the compute pool | Yes | + +### FlinkBlobStorage Structure + +| Field | Description | Required | +|----------|------------------------------------------------------------|----------| +| `bucket` | Cloud storage bucket for Flink blob storage | Yes | +| `path` | Sub-path in the bucket (leave empty to use the whole bucket)| No | + +## Status + +| Field | Description | +|----------------------|-------------------------------------------------------------------------------------------------| +| `conditions` | List of status conditions for the workspace | +| `observedGeneration` | The last observed generation of the resource | +| `workspaceId` | The ID of the workspace in the StreamNative Cloud API server | + +## Example + +1. Create a ComputeWorkspace resource: + +```yaml +apiVersion: resource.streamnative.io/v1alpha1 +kind: ComputeWorkspace +metadata: + name: test-operator-workspace + namespace: default +spec: + apiServerRef: + name: test-connection + pulsarClusterNames: + - "test-pulsar" + poolRef: + name: shared + namespace: streamnative +``` + +2. Apply the YAML file: + +```shell +kubectl apply -f workspace.yaml +``` + +3. Check the workspace status: + +```shell +kubectl get computeworkspace test-operator-workspace +``` + +The workspace is ready when the Ready condition is True: + +```shell +NAME READY AGE +test-operator-workspace True 1m +``` + +## Update Workspace + +You can update the workspace by modifying the YAML file and reapplying it. Most fields can be updated, including: +- Pulsar cluster names +- Pool reference +- External access settings +- Flink blob storage configuration + +After applying changes, verify the status to ensure the workspace is configured properly. + +## Delete Workspace + +To delete a ComputeWorkspace resource: + +```shell +kubectl delete computeworkspace test-operator-workspace +``` + +Note that deleting the workspace will affect any resources that depend on it, such as ComputeFlinkDeployments. Make sure to handle any dependent resources appropriately before deletion. \ No newline at end of file diff --git a/docs/streamnative_cloud_connection.md b/docs/streamnative_cloud_connection.md new file mode 100644 index 00000000..56a0ddea --- /dev/null +++ b/docs/streamnative_cloud_connection.md @@ -0,0 +1,115 @@ +# StreamNativeCloudConnection + +## Overview + +The `StreamNativeCloudConnection` resource defines a connection to the StreamNative Cloud API server. It allows you to configure authentication and connection details for interacting with StreamNative Cloud services. + +## Specifications + +| Field | Description | Required | +|--------------------------------|-----------------------------------------------------------------------------------------------------------------|----------| +| `server` | The URL of the API server | Yes | +| `auth.credentialsRef` | Reference to the service account credentials secret | Yes | +| `logs.serviceUrl` | URL of the logging service. Required if logs configuration is specified. | No* | +| `organization` | The organization to use in the API server. If not specified, the connection name will be used | No | + +*Note: If `logs` configuration is specified, `serviceUrl` becomes required. + +## Status + +| Field | Description | +|----------------------|-------------------------------------------------------------------------------------------------| +| `conditions` | List of status conditions for the connection | +| `observedGeneration` | The last observed generation of the resource | +| `lastConnectedTime` | Timestamp of the last successful connection to the API server | + +## Service Account Credentials Structure + +The service account credentials secret should contain a `credentials.json` file with the following structure: + +```json +{ + "type": "sn_service_account", + "client_id": "", + "client_secret": "", + "client_email": "", + "issuer_url": "" +} +``` + +## Example + +1. Create a service account credentials secret: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: test-credentials + namespace: default +type: Opaque +data: + credentials.json: | + { + "type": "sn_service_account", + "client_secret": "client_secret", + "client_email": "client-email", + "issuer_url": "issuer_url", + "client_id": "client-id" + } +``` + +2. Create a StreamNativeCloudConnection resource: + +```yaml +apiVersion: resource.streamnative.io/v1alpha1 +kind: StreamNativeCloudConnection +metadata: + name: test-connection + namespace: default +spec: + server: https://api.streamnative.dev + auth: + credentialsRef: + name: test-credentials + organization: org +``` + +3. Apply the YAML files: + +```shell +kubectl apply -f credentials.yaml +kubectl apply -f connection.yaml +``` + +4. Check the connection status: + +```shell +kubectl get streamnativecloudconnection test-connection +``` + +The connection is ready when the Ready condition is True: + +```shell +NAME READY AGE +test-connection True 1m +``` + +## Update Connection + +You can update the connection by modifying the YAML file and reapplying it. Most fields can be updated, including: +- Server URL +- Organization +- Credentials reference + +After applying changes, verify the status to ensure the connection is working properly. + +## Delete Connection + +To delete a StreamNativeCloudConnection resource: + +```shell +kubectl delete streamnativecloudconnection test-connection +``` + +Note that deleting the connection will affect any resources that depend on it, such as ComputeWorkspaces or ComputeFlinkDeployments. \ No newline at end of file