Skip to content

Commit

Permalink
Merge pull request #12 from tensorchord/v0.0.7
Browse files Browse the repository at this point in the history
add restore from backup and pitr
  • Loading branch information
xieydd authored Sep 11, 2024
2 parents 9435f70 + 32ba35c commit e02e8d2
Show file tree
Hide file tree
Showing 7 changed files with 444 additions and 89 deletions.
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -80,4 +80,9 @@ In order to run the full suite of Acceptance tests, run `make testacc`.

```shell
PGVECTORS_CLOUD_API_KEY=pgrs-xxxxxxxxxxxx PGVECTORS_CLOUD_API_URL=https://cloud.pgvecto.rs/api/v1 make testacc

# test restore from backup
PGVECTORS_CLOUD_API_KEY=pgrs-xxxxxxxxxxxx PGVECTORS_CLOUD_API_URL=https://cloud.pgvecto.rs/api/v1 BACKUP_ID=b29f3124-0796-43a5-b565-68e684dcb07b make testacc
# test pitr
PGVECTORS_CLOUD_API_KEY=pgrs-xxxxxxxxxxxx PGVECTORS_CLOUD_API_URL=https://cloud.pgvecto.rs/api/v1 CLUSTER_ID=7d06b73d-807f-4b4f-a397-1c1eac768333 TARGET_TIME=2024-09-11T00:00:00+08:00 make testacc
```
30 changes: 20 additions & 10 deletions client/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,11 +74,13 @@ type CNPGCluster struct {

type CNPGClusterStatus struct {
// Status is the status of the cluster.
Status ClusterStatus `json:"status,omitempty"`
Endpoint Endpoint `json:"endpoint,omitempty"`
ClusterID string `json:"cluster_id,omitempty"`
ProjectID string `json:"project_id,omitempty"`
UpdatedAt time.Time `json:"updated_at,omitempty"`
Status ClusterStatus `json:"status,omitempty"`
Endpoint Endpoint `json:"endpoint,omitempty"`
ClusterID string `json:"cluster_id,omitempty"`
ProjectID string `json:"project_id,omitempty"`
UpdatedAt time.Time `json:"updated_at,omitempty"`
FirstRecoverabilityPoint time.Time `json:"first_recoverability_point,omitempty"`
LastArchivedWALTime time.Time `json:"last_archived_wal_time,omitempty"`
}

type Endpoint struct {
Expand Down Expand Up @@ -107,11 +109,19 @@ type CNPGClusterList struct {
}

type PostgreSQLConfig struct {
Instances int `json:"instances"`
Image string `json:"image"`
PGDataDiskSize string `json:"pg_data_disk_size"`
VectorConfig VectorConfig `json:"vector_config,omitempty"`
EnablePooler bool `json:"enable_pooler"`
Instances int `json:"instances"`
Image string `json:"image"`
PGDataDiskSize string `json:"pg_data_disk_size"`
VectorConfig VectorConfig `json:"vector_config,omitempty"`
EnablePooler bool `json:"enable_pooler"`
RestoreConfig RestoreConfig `json:"restore_config,omitempty"`
}

type RestoreConfig struct {
Enabled bool `json:"enabled,omitempty"`
BackupID string `json:"backup_id,omitempty"`
ClusterID string `json:"cluster_id,omitempty"`
TargetTime time.Time `json:"target_time,omitempty"`
}

type VectorConfig struct {
Expand Down
7 changes: 7 additions & 0 deletions docs/data-sources/cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,21 @@ Cluster Data Source

### Read-Only

- `backup_id` (String) The backup ID for restore.
- `cluster_name` (String) The name of the cluster to be created. It is a string of no more than 32 characters.
- `cluster_provider` (String) The cloud provider of the cluster instance. At present, only aws is supported.
- `connect_endpoint` (String) The psql connection endpoint of the cluster.
- `database_name` (String) The name of the database.
- `enable_pooler` (Boolean) Enable connection pooler.
- `enable_restore` (Boolean) Enable restore.
- `first_recoverability_point` (String) The first recoverability point.
- `image` (String) The image of the cluster instance.
- `last_archived_wal_time` (String) The last archived WAL time.
- `last_updated` (String)
- `pg_data_disk_size` (String) The size of the PGData disk in GB, please insert between 1 and 16384.
- `plan` (String) The plan tier of the PGVecto.rs Cloud service. Available options are Starter and Enterprise.
- `region` (String) The region of the cluster instance.Available options are us-east-1,eu-west-1
- `server_resource` (String) The server resource of the cluster instance. Available aws-t3-xlarge-4c-16g, aws-m7i-large-2c-8g, aws-r7i-large-2c-16g,aws-r7i-xlarge-4c-32g
- `status` (String) The current status of the cluster. Possible values are Initializing, Ready, NotReady, Deleted, Upgrading, Suspended, Resuming.
- `target_cluster_id` (String) The target cluster ID for restore.
- `target_time` (String) The target time for restore.
7 changes: 7 additions & 0 deletions docs/resources/cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,20 +21,27 @@ Cluster resource. This resource allows you to create a new PGVecto.rs cluster.
- `cluster_name` (String) The name of the cluster to be created. It is a string of no more than 32 characters.
- `cluster_provider` (String) The cloud provider of the cluster instance. At present, only aws is supported.
- `database_name` (String) The name of the database.
- `image` (String) The image of the cluster instance. You can specify the tag of the image, please select limited tags in https://hub.docker.com/repository/docker/modelzai/pgvecto-rs/tags
- `plan` (String) The plan tier of the PGVecto.rs Cloud service. Available options are Starter and Enterprise.
- `region` (String) The region of the cluster instance.Available options are us-east-1,eu-west-1
- `server_resource` (String) The server resource of the cluster instance. Available aws-t3-xlarge-4c-16g, aws-m7i-large-2c-8g, aws-r7i-large-2c-16g,aws-r7i-xlarge-4c-32g

### Optional

- `backup_id` (String) The backup id to restore from
- `enable_pooler` (Boolean) Enable pgpooler
- `enable_restore` (Boolean) Enable restore from backup or target cluster(PITR)
- `pg_data_disk_size` (String) The size of the PGData disk in GB, please insert between 1 and 16384.
- `target_cluster_id` (String) The target cluster id to restore from
- `target_time` (String) The target time to restore from cluster
- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts))

### Read-Only

- `connect_endpoint` (String) The psql connection endpoint of the cluster.
- `first_recoverability_point` (String) The first recoverability point of the cluster
- `id` (String) Cluster identifier
- `last_archived_wal_time` (String) The last archived WAL time of the cluster
- `last_updated` (String)
- `status` (String) The current status of the cluster. Possible values are Initializing, Ready, NotReady, Deleted, Upgrading, Suspended, Resuming.

Expand Down
88 changes: 72 additions & 16 deletions pgvecto.rs/provider/cluster_data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package provider
import (
"context"
"fmt"
"strings"
"time"

"github.com/hashicorp/terraform-plugin-framework/datasource"
Expand All @@ -26,19 +27,26 @@ type ClusterDataSource struct {

// ClusterDataSourceModel describes the cluster data model.
type ClusterDataSourceModel struct {
ClusterId types.String `tfsdk:"id"`
AccountId types.String `tfsdk:"account_id"`
ClusterName types.String `tfsdk:"cluster_name"`
Plan types.String `tfsdk:"plan"`
Region types.String `tfsdk:"region"`
ServerResource types.String `tfsdk:"server_resource"`
ClusterProvider types.String `tfsdk:"cluster_provider"`
Status types.String `tfsdk:"status"`
ConnectEndpoint types.String `tfsdk:"connect_endpoint"`
PGDataDiskSize types.String `tfsdk:"pg_data_disk_size"`
LastUpdated types.String `tfsdk:"last_updated"`
DatabaseName types.String `tfsdk:"database_name"`
EnablePooler types.Bool `tfsdk:"enable_pooler"`
ClusterId types.String `tfsdk:"id"`
AccountId types.String `tfsdk:"account_id"`
ClusterName types.String `tfsdk:"cluster_name"`
Plan types.String `tfsdk:"plan"`
Image types.String `tfsdk:"image"`
Region types.String `tfsdk:"region"`
ServerResource types.String `tfsdk:"server_resource"`
ClusterProvider types.String `tfsdk:"cluster_provider"`
Status types.String `tfsdk:"status"`
ConnectEndpoint types.String `tfsdk:"connect_endpoint"`
PGDataDiskSize types.String `tfsdk:"pg_data_disk_size"`
LastUpdated types.String `tfsdk:"last_updated"`
DatabaseName types.String `tfsdk:"database_name"`
EnablePooler types.Bool `tfsdk:"enable_pooler"`
EnableRestore types.Bool `tfsdk:"enable_restore"`
TargetClusterID types.String `tfsdk:"target_cluster_id"`
BackupID types.String `tfsdk:"backup_id"`
TargetTime types.String `tfsdk:"target_time"`
FirstRecoverabilityPoint types.String `tfsdk:"first_recoverability_point"`
LastArchivedWALTime types.String `tfsdk:"last_archived_wal_time"`
}

func (d *ClusterDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) {
Expand All @@ -65,6 +73,10 @@ func (r *ClusterDataSource) Schema(ctx context.Context, req datasource.SchemaReq
MarkdownDescription: "The plan tier of the PGVecto.rs Cloud service. Available options are Starter and Enterprise.",
Computed: true,
},
"image": schema.StringAttribute{
MarkdownDescription: "The image of the cluster instance.",
Computed: true,
},
"server_resource": schema.StringAttribute{
MarkdownDescription: "The server resource of the cluster instance. Available aws-t3-xlarge-4c-16g, aws-m7i-large-2c-8g, aws-r7i-large-2c-16g,aws-r7i-xlarge-4c-32g",
Computed: true,
Expand Down Expand Up @@ -100,6 +112,30 @@ func (r *ClusterDataSource) Schema(ctx context.Context, req datasource.SchemaReq
MarkdownDescription: "Enable connection pooler.",
Computed: true,
},
"enable_restore": schema.BoolAttribute{
MarkdownDescription: "Enable restore.",
Computed: true,
},
"target_cluster_id": schema.StringAttribute{
MarkdownDescription: "The target cluster ID for restore.",
Computed: true,
},
"backup_id": schema.StringAttribute{
MarkdownDescription: "The backup ID for restore.",
Computed: true,
},
"target_time": schema.StringAttribute{
MarkdownDescription: "The target time for restore.",
Computed: true,
},
"first_recoverability_point": schema.StringAttribute{
MarkdownDescription: "The first recoverability point.",
Computed: true,
},
"last_archived_wal_time": schema.StringAttribute{
MarkdownDescription: "The last archived WAL time.",
Computed: true,
},
},
}
}
Expand Down Expand Up @@ -154,6 +190,7 @@ func (d *ClusterDataSource) Read(ctx context.Context, req datasource.ReadRequest
state.ClusterId = types.StringValue(c.Spec.ID)
state.ClusterName = types.StringValue(c.Spec.Name)
state.Plan = types.StringValue(string(c.Spec.Plan))
state.Image = types.StringValue(strings.Split(c.Spec.PostgreSQLConfig.Image, ":")[1])
state.ServerResource = types.StringValue(string(c.Spec.ServerResource))
state.Region = types.StringValue(c.Spec.ClusterProvider.Region)
state.ClusterProvider = types.StringValue(string(c.Spec.ClusterProvider.Type))
Expand All @@ -164,13 +201,32 @@ func (d *ClusterDataSource) Read(ctx context.Context, req datasource.ReadRequest
}
state.PGDataDiskSize = types.StringValue(c.Spec.PostgreSQLConfig.PGDataDiskSize)
state.DatabaseName = types.StringValue(c.Spec.PostgreSQLConfig.VectorConfig.DatabaseName)
state.LastUpdated = types.StringValue(c.Status.UpdatedAt.Format(time.RFC850))
state.EnablePooler = types.BoolValue(c.Spec.PostgreSQLConfig.EnablePooler)
state.LastUpdated = types.StringValue(c.Status.UpdatedAt.Format(time.RFC3339))
if c.Spec.PostgreSQLConfig.EnablePooler {
state.EnablePooler = types.BoolValue(c.Spec.PostgreSQLConfig.EnablePooler)
}

if c.Spec.PostgreSQLConfig.RestoreConfig.Enabled {
state.EnableRestore = types.BoolValue(c.Spec.PostgreSQLConfig.RestoreConfig.Enabled)
}

if c.Spec.PostgreSQLConfig.RestoreConfig.ClusterID != "" {
state.TargetClusterID = types.StringValue(c.Spec.PostgreSQLConfig.RestoreConfig.ClusterID)
}

if c.Spec.PostgreSQLConfig.RestoreConfig.BackupID != "" {
state.BackupID = types.StringValue(c.Spec.PostgreSQLConfig.RestoreConfig.BackupID)
}

if !c.Spec.PostgreSQLConfig.RestoreConfig.TargetTime.IsZero() {
state.TargetTime = types.StringValue(c.Spec.PostgreSQLConfig.RestoreConfig.TargetTime.Format(time.RFC3339))
}
state.FirstRecoverabilityPoint = types.StringValue(c.Status.FirstRecoverabilityPoint.Format(time.RFC3339))
state.LastArchivedWALTime = types.StringValue(c.Status.LastArchivedWALTime.Format(time.RFC3339))

diags := resp.State.Set(ctx, &state)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}

}
Loading

0 comments on commit e02e8d2

Please sign in to comment.