diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..922ee27 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @hashicorp/terraform-devex diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..0c8b092 --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +HashiCorp Community Guidelines apply to you when interacting with the community here on GitHub and contributing code. + +Please read the full text at https://www.hashicorp.com/community-guidelines diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..87fdf72 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,12 @@ +# See GitHub's documentation for more information on this file: +# https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/configuration-options-for-dependency-updates +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "daily" diff --git a/.github/workflows/licensing.yaml b/.github/workflows/licensing.yaml new file mode 100644 index 0000000..74fe9d2 --- /dev/null +++ b/.github/workflows/licensing.yaml @@ -0,0 +1,26 @@ +name: Verify dependency licenses + +on: + push: + branches: + - main + pull_request: + types: + - opened + - reopened + - synchronize + +jobs: + licensing: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '3.1' + bundler-cache: false + - run: gem install license_finder + - run: license_finder diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..1115591 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,41 @@ +# Terraform Provider release workflow. +name: Release + +# This GitHub action creates a release when a tag that matches the pattern +# "v*" (e.g. v0.1.0) is created. +on: + push: + tags: + - 'v*' + +# Releases need permissions to read and write the repository contents. +# GitHub considers creating releases and uploading assets as writing contents. +permissions: + contents: write + +jobs: + goreleaser: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + # Allow goreleaser to access older tag information. + fetch-depth: 0 + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + go-version-file: 'go.mod' + cache: true + - name: Import GPG key + uses: crazy-max/ghaction-import-gpg@72b6676b71ab476b77e676928516f6982eef7a41 # v5.3.0 + id: import_gpg + with: + gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} + passphrase: ${{ secrets.PASSPHRASE }} + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@336e29918d653399e599bfca99fadc1d7ffbc9f7 # v4.3.0 + with: + args: release --clean + env: + # GitHub sets the GITHUB_TOKEN secret automatically. + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }} diff --git a/.github/workflows/test-expensive.yml b/.github/workflows/test-expensive.yml new file mode 100644 index 0000000..4d7b1c0 --- /dev/null +++ b/.github/workflows/test-expensive.yml @@ -0,0 +1,59 @@ +name: extensive + +on: + merge_group: + types: [checks_requested] + +permissions: + contents: read + +jobs: + build: + name: build + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + go-version-file: 'go.mod' + cache: true + - run: go mod download + - run: go build -v . + - name: Run linters + uses: golangci/golangci-lint-action@639cd343e1d3b897ff35927a75193d57cfcba299 # v3.6.0 + with: + version: latest + + test: + name: integration + needs: build + runs-on: ubuntu-latest + timeout-minutes: 15 + strategy: + fail-fast: false + matrix: + terraform: + - '1.0.*' + - '1.1.*' + - '1.2.*' + - '1.3.*' + - '1.4.*' + - '1.5.*' + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + go-version-file: 'go.mod' + cache: true + - uses: hashicorp/setup-terraform@633666f66e0061ca3b725c73b2ec20cd13a8fdd1 # v2.0.3 + with: + terraform_version: ${{ matrix.terraform }} + terraform_wrapper: false + - run: go mod download + - run: go test -v -cover ./internal/provider/ + env: + PLANETSCALE_SERVICE_TOKEN_NAME: ${{ secrets.PLANETSCALE_SERVICE_TOKEN_NAME }} + PLANETSCALE_SERVICE_TOKEN: ${{ secrets.PLANETSCALE_SERVICE_TOKEN }} + TF_ACC: "1" + timeout-minutes: 10 \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..457a191 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,80 @@ +name: basic + +on: + pull_request: + paths-ignore: + - 'README.md' + push: + paths-ignore: + - 'README.md' + merge_group: + types: [checks_requested] + +permissions: + contents: read + +jobs: + build: + name: build + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + go-version-file: 'go.mod' + cache: true + - run: go mod download + - run: go build -v . + - name: Run linters + uses: golangci/golangci-lint-action@639cd343e1d3b897ff35927a75193d57cfcba299 # v3.6.0 + with: + version: latest + + generate: + name: generate + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + go-version-file: 'go.mod' + cache: true + - uses: hashicorp/setup-terraform@633666f66e0061ca3b725c73b2ec20cd13a8fdd1 # v2.0.3 + with: + terraform_version: 1.5.3 + terraform_wrapper: false + - run: go generate -x ./... + - name: git diff + run: | + git diff --compact-summary --exit-code || \ + (echo; echo "Unexpected difference in directories after code generation. Run 'go generate ./...' command and commit."; exit 1) + + test: + name: unit + needs: build + runs-on: ubuntu-latest + timeout-minutes: 15 + strategy: + fail-fast: false + matrix: + terraform: + - '1.0.*' + - '1.1.*' + - '1.2.*' + - '1.3.*' + - '1.4.*' + - '1.5.*' + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + go-version-file: 'go.mod' + cache: true + - uses: hashicorp/setup-terraform@633666f66e0061ca3b725c73b2ec20cd13a8fdd1 # v2.0.3 + with: + terraform_version: ${{ matrix.terraform }} + terraform_wrapper: false + - run: go mod download + - run: go test -cover ./... + timeout-minutes: 10 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6cfac92 --- /dev/null +++ b/.gitignore @@ -0,0 +1,37 @@ +*.dll +*.exe +.DS_Store +example.tf +terraform.tfplan +terraform.tfstate +bin/ +dist/ +modules-dev/ +/pkg/ +website/.vagrant +website/.bundle +website/build +website/node_modules +.vagrant/ +*.backup +./*.tfstate +.terraform/ +*.log +*.bak +*~ +.*.swp +.idea +*.iml +*.test +*.iml + +website/vendor + +# Test exclusions +!command/test-fixtures/**/*.tfstate +!command/test-fixtures/**/.terraform/ + +# Keep windows files with windows line endings +*.winfile eol=crlf + +.env \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..223cf95 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,27 @@ +# Visit https://golangci-lint.run/ for usage documentation +# and information on other useful linters +issues: + max-per-linter: 0 + max-same-issues: 0 + +linters: + disable-all: true + enable: + - durationcheck + - errcheck + - exportloopref + - forcetypeassert + - godot + - gofmt + - gosimple + - ineffassign + - makezero + - misspell + - nilerr + - predeclared + - staticcheck + - tenv + - unconvert + - unparam + - unused + - vet \ No newline at end of file diff --git a/.goreleaser.yml b/.goreleaser.yml new file mode 100644 index 0000000..9bb0aa7 --- /dev/null +++ b/.goreleaser.yml @@ -0,0 +1,60 @@ +# Visit https://goreleaser.com for documentation on how to customize this +# behavior. +before: + hooks: + # this is just an example and not a requirement for provider building/publishing + - go mod tidy +builds: +- env: + # goreleaser does not work with CGO, it could also complicate + # usage by users in CI/CD systems like Terraform Cloud where + # they are unable to install libraries. + - CGO_ENABLED=0 + mod_timestamp: '{{ .CommitTimestamp }}' + flags: + - -trimpath + ldflags: + - '-s -w -X main.version={{.Version}} -X main.commit={{.Commit}}' + goos: + - freebsd + - windows + - linux + - darwin + goarch: + - amd64 + - '386' + - arm + - arm64 + ignore: + - goos: darwin + goarch: '386' + binary: '{{ .ProjectName }}_v{{ .Version }}' +archives: +- format: zip + name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}' +checksum: + extra_files: + - glob: 'terraform-registry-manifest.json' + name_template: '{{ .ProjectName }}_{{ .Version }}_manifest.json' + name_template: '{{ .ProjectName }}_{{ .Version }}_SHA256SUMS' + algorithm: sha256 +signs: + - artifacts: checksum + args: + # if you are using this in a GitHub action or some other automated pipeline, you + # need to pass the batch flag to indicate its not interactive. + - "--batch" + - "--local-user" + - "{{ .Env.GPG_FINGERPRINT }}" # set this environment variable for your signing key + - "--output" + - "${signature}" + - "--detach-sign" + - "${artifact}" +release: + extra_files: + - glob: 'terraform-registry-manifest.json' + name_template: '{{ .ProjectName }}_{{ .Version }}_manifest.json' + # If you want to manually examine the release before its live, uncomment this line: + # draft: true +changelog: + skip: true diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 0000000..f3a896f --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,18 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Launch Package", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${fileDirname}", + "env": { + "TF_ACC":"true", + } + } + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..fe353bf --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "go.testEnvVars": { + "TF_ACC": "true" + }, + "go.testEnvFile": "${workspaceFolder}/.env" +} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..b76e247 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,3 @@ +## 0.1.0 (Unreleased) + +FEATURES: diff --git a/GNUmakefile b/GNUmakefile new file mode 100644 index 0000000..7771cd6 --- /dev/null +++ b/GNUmakefile @@ -0,0 +1,6 @@ +default: testacc + +# Run acceptance tests +.PHONY: testacc +testacc: + TF_ACC=1 go test ./... -v $(TESTARGS) -timeout 120m diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..a612ad9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/README.md b/README.md new file mode 100644 index 0000000..11bf1bf --- /dev/null +++ b/README.md @@ -0,0 +1,5 @@ +# PlanetScale Terraform Provider + +Work in progress Terraform provider for PlanetScale. + +Not ready for general consumption. \ No newline at end of file diff --git a/doc/dependency_decisions.yml b/doc/dependency_decisions.yml new file mode 100644 index 0000000..0b4ebd0 --- /dev/null +++ b/doc/dependency_decisions.yml @@ -0,0 +1,73 @@ +--- +- - :permit + - MIT + - :who: + :why: + :versions: [] + :when: 2023-09-25 21:54:03.091597000 Z +- - :permit + - New BSD + - :who: + :why: + :versions: [] + :when: 2023-09-25 21:54:05.834639000 Z +- - :permit + - Apache 2.0 + - :who: + :why: + :versions: [] + :when: 2023-09-25 21:54:18.270178000 Z +- - :permit + - Simplified BSD + - :who: + :why: + :versions: [] + :when: 2023-09-25 21:54:35.110124000 Z +- - :permit + - Mozilla Public License 2.0 + - :who: + :why: + :versions: [] + :when: 2023-09-25 21:54:43.394929000 Z +- - :approve + - github.com/hashicorp/go-plugin + - :who: + :why: + :versions: [] + :when: 2023-09-25 21:55:23.893162000 Z +- - :approve + - github.com/hashicorp/go-uuid + - :who: + :why: + :versions: [] + :when: 2023-09-25 21:55:32.748891000 Z +- - :approve + - github.com/hashicorp/terraform-plugin-framework + - :who: + :why: + :versions: [] + :when: 2023-09-25 21:55:56.398305000 Z +- - :approve + - github.com/hashicorp/terraform-plugin-go + - :who: + :why: + :versions: [] + :when: 2023-09-25 21:56:02.831960000 Z +- - :approve + - github.com/hashicorp/terraform-plugin-log + - :who: + :why: + :versions: [] + :when: 2023-09-25 21:56:11.501510000 Z +- - :approve + - github.com/hashicorp/terraform-registry-address + - :who: + :why: + :versions: [] + :when: 2023-09-25 21:56:21.325793000 Z +- - :approve + - github.com/hashicorp/terraform-svchost + - :who: + :why: + :versions: [] + :when: 2023-09-25 21:56:32.197702000 Z diff --git a/docs/data-sources/backup.md b/docs/data-sources/backup.md new file mode 100644 index 0000000..c892c01 --- /dev/null +++ b/docs/data-sources/backup.md @@ -0,0 +1,78 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_backup Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A PlanetScale backup. +--- + +# planetscale_backup (Data Source) + +A PlanetScale backup. + +## Example Usage + +```terraform +data "planetscale_backup" "example" { + organization = "example.com" + database = "example_db" + branch = "main" + id = "k20nb1b7ut18" +} + +output "backup" { + value = data.planetscale_backup.example +} +``` + + +## Schema + +### Required + +- `branch` (String) The branch this backup belongs to. +- `database` (String) The database this backup belongs to. +- `id` (String) The ID of the backup. +- `organization` (String) The organization this backup belongs to. + +### Read-Only + +- `actor` (Attributes) The actor that created the backup. (see [below for nested schema](#nestedatt--actor)) +- `backup_policy` (Attributes) The backup policy being followed. (see [below for nested schema](#nestedatt--backup_policy)) +- `created_at` (String) When the backup was created. +- `estimated_storage_cost` (String) The estimated storage cost of the backup. +- `name` (String) The name of the backup. +- `required` (Boolean) Whether or not the backup policy is required. +- `restored_branches` (List of String) Branches that have been restored with this backup. +- `size` (Number) The size of the backup. +- `state` (String) The current state of the backup. +- `updated_at` (String) When the backup was last updated. + + +### Nested Schema for `actor` + +Read-Only: + +- `avatar_url` (String) The URL of the actor's avatar +- `display_name` (String) The name of the actor +- `id` (String) The ID of the actor + + + +### Nested Schema for `backup_policy` + +Read-Only: + +- `created_at` (String) When the backup policy was created. +- `frequency_unit` (String) The unit for the frequency of the backup policy. +- `frequency_value` (Number) A number value for the frequency of the backup policy. +- `id` (String) The ID of the backup policy. +- `last_ran_at` (String) When the backup was last run. +- `name` (String) The name of the backup policy. +- `next_run_at` (String) When the backup will next run. +- `retention_unit` (String) The unit for the retention period of the backup policy. +- `retention_value` (Number) A number value for the retention period of the backup policy. +- `schedule_day` (String) Day of the week that the backup is scheduled. +- `schedule_week` (String) Week of the month that the backup is scheduled. +- `target` (String) Whether the backup policy is for a production or development database, or for a database branch. +- `updated_at` (String) When the backup policy was last updated. diff --git a/docs/data-sources/backups.md b/docs/data-sources/backups.md new file mode 100644 index 0000000..87227ae --- /dev/null +++ b/docs/data-sources/backups.md @@ -0,0 +1,87 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_backups Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A list of PlanetScale backups. +--- + +# planetscale_backups (Data Source) + +A list of PlanetScale backups. + +## Example Usage + +```terraform +data "planetscale_backups" "example" { + organization = "example.com" + database = "example_db" + branch = "main" +} + +output "backups" { + value = data.planetscale_backups.example +} +``` + + +## Schema + +### Required + +- `branch` (String) +- `database` (String) +- `organization` (String) + +### Read-Only + +- `backups` (Attributes List) (see [below for nested schema](#nestedatt--backups)) + + +### Nested Schema for `backups` + +Read-Only: + +- `actor` (Attributes) The actor that created the backup. (see [below for nested schema](#nestedatt--backups--actor)) +- `backup_policy` (Attributes) The backup policy being followed. (see [below for nested schema](#nestedatt--backups--backup_policy)) +- `branch` (String) The branch this backup belongs to. +- `created_at` (String) When the backup was created. +- `database` (String) The database this backup belongs to. +- `estimated_storage_cost` (String) The estimated storage cost of the backup. +- `id` (String) The ID of the backup. +- `name` (String) The name of the backup. +- `organization` (String) The organization this backup belongs to. +- `required` (Boolean) Whether or not the backup policy is required. +- `restored_branches` (List of String) Branches that have been restored with this backup. +- `size` (Number) The size of the backup. +- `state` (String) The current state of the backup. +- `updated_at` (String) When the backup was last updated. + + +### Nested Schema for `backups.actor` + +Read-Only: + +- `avatar_url` (String) The URL of the actor's avatar +- `display_name` (String) The name of the actor +- `id` (String) The ID of the actor + + + +### Nested Schema for `backups.backup_policy` + +Read-Only: + +- `created_at` (String) When the backup policy was created. +- `frequency_unit` (String) The unit for the frequency of the backup policy. +- `frequency_value` (Number) A number value for the frequency of the backup policy. +- `id` (String) The ID of the backup policy. +- `last_ran_at` (String) When the backup was last run. +- `name` (String) The name of the backup policy. +- `next_run_at` (String) When the backup will next run. +- `retention_unit` (String) The unit for the retention period of the backup policy. +- `retention_value` (Number) A number value for the retention period of the backup policy. +- `schedule_day` (String) Day of the week that the backup is scheduled. +- `schedule_week` (String) Week of the month that the backup is scheduled. +- `target` (String) Whether the backup policy is for a production or development database, or for a database branch. +- `updated_at` (String) When the backup policy was last updated. diff --git a/docs/data-sources/branch.md b/docs/data-sources/branch.md new file mode 100644 index 0000000..d2d697b --- /dev/null +++ b/docs/data-sources/branch.md @@ -0,0 +1,91 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_branch Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A PlanetScale branch. +--- + +# planetscale_branch (Data Source) + +A PlanetScale branch. + +## Example Usage + +```terraform +data "planetscale_branch" "example" { + organization = "example.com" + database = "example_db" + name = "main" +} + +output "branch" { + value = data.planetscale_branch.example +} +``` + + +## Schema + +### Required + +- `database` (String) The database this branch belongs to. +- `name` (String) The name of the branch. +- `organization` (String) The organization this branch belongs to. + +### Read-Only + +- `access_host_url` (String) The access host URL for the branch. This is a legacy field, use `mysql_edge_address`. +- `actor` (Attributes) The actor who created this branch. (see [below for nested schema](#nestedatt--actor)) +- `cluster_rate_name` (String) The SKU representing the branch's cluster size. +- `created_at` (String) When the branch was created. +- `html_url` (String) Planetscale app URL for the branch. +- `id` (String) The ID of the branch. +- `initial_restore_id` (String) The ID of the backup from which the branch was restored. +- `mysql_address` (String) The MySQL address for the branch. +- `mysql_edge_address` (String) The address of the MySQL provider for the branch. +- `parent_branch` (String) The name of the parent branch from which the branch was created. +- `production` (Boolean) Whether or not the branch is a production branch. +- `ready` (Boolean) Whether or not the branch is ready to serve queries. +- `region` (Attributes) The region in which this branch lives. (see [below for nested schema](#nestedatt--region)) +- `restore_checklist_completed_at` (String) When a user last marked a backup restore checklist as completed. +- `restored_from_branch` (Attributes) (see [below for nested schema](#nestedatt--restored_from_branch)) +- `schema_last_updated_at` (String) When the schema for the branch was last updated. +- `shard_count` (Number) The number of shards in the branch. +- `sharded` (Boolean) Whether or not the branch is sharded. +- `updated_at` (String) When the branch was last updated. + + +### Nested Schema for `actor` + +Read-Only: + +- `avatar_url` (String) The URL of the actor's avatar +- `display_name` (String) The name of the actor +- `id` (String) The ID of the actor + + + +### Nested Schema for `region` + +Read-Only: + +- `display_name` (String) Name of the region. +- `enabled` (Boolean) Whether or not the region is currently active. +- `id` (String) The ID of the region. +- `location` (String) Location of the region. +- `provider` (String) Provider for the region (ex. AWS). +- `public_ip_addresses` (List of String) Public IP addresses for the region. +- `slug` (String) The slug of the region. + + + +### Nested Schema for `restored_from_branch` + +Read-Only: + +- `created_at` (String) When the resource was created. +- `deleted_at` (String) When the resource was deleted, if deleted. +- `id` (String) The ID for the resource. +- `name` (String) The name for the resource. +- `updated_at` (String) When the resource was last updated. diff --git a/docs/data-sources/branch_schema.md b/docs/data-sources/branch_schema.md new file mode 100644 index 0000000..7b9c3a0 --- /dev/null +++ b/docs/data-sources/branch_schema.md @@ -0,0 +1,51 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_branch_schema Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + The schema of a PlanetScale branch. +--- + +# planetscale_branch_schema (Data Source) + +The schema of a PlanetScale branch. + +## Example Usage + +```terraform +data "planetscale_branch_schema" "example" { + organization = "example.com" + database = "example_db" + branch = "main" +} + +output "branch_schema" { + value = data.planetscale_branch_schema.example +} +``` + + +## Schema + +### Required + +- `branch` (String) +- `database` (String) +- `organization` (String) + +### Optional + +- `keyspace` (String) + +### Read-Only + +- `tables` (Attributes List) (see [below for nested schema](#nestedatt--tables)) + + +### Nested Schema for `tables` + +Read-Only: + +- `html` (String) Syntax highlighted HTML for the table's schema. +- `name` (String) Name of the table. +- `raw` (String) The table's schema. diff --git a/docs/data-sources/branch_schema_lint.md b/docs/data-sources/branch_schema_lint.md new file mode 100644 index 0000000..28f686d --- /dev/null +++ b/docs/data-sources/branch_schema_lint.md @@ -0,0 +1,61 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_branch_schema_lint Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + Linting errors for the schema of a PlanetScale branch. +--- + +# planetscale_branch_schema_lint (Data Source) + +Linting errors for the schema of a PlanetScale branch. + +## Example Usage + +```terraform +data "planetscale_branch_schema_lint" "example" { + organization = "example.com" + database = "example_db" + branch = "main" +} + +output "schema_lint" { + value = data.planetscale_branch_schema_lint.example +} +``` + + +## Schema + +### Required + +- `branch` (String) +- `database` (String) +- `organization` (String) + +### Read-Only + +- `errors` (Attributes List) (see [below for nested schema](#nestedatt--errors)) + + +### Nested Schema for `errors` + +Read-Only: + +- `auto_increment_column_names` (List of String) A list of invalid auto-incremented columns. +- `charset_name` (String) The charset of the schema. +- `check_constraint_name` (String) The name of the invalid check constraint. +- `column_name` (String) The column in a table relevant to the error. +- `docs_url` (String) A link to the documentation related to the error. +- `engine_name` (String) The engine of the schema. +- `enum_value` (String) The name of the invalid enum value. +- `error_description` (String) A description for the error that occurred. +- `foreign_key_column_names` (List of String) A list of invalid foreign key columns in a table. +- `json_path` (String) The path for an invalid JSON column. +- `keyspace_name` (String) The keyspace of the schema with the error. +- `lint_error` (String) Code representing. +- `partition_name` (String) The name of the invalid partition in the schema. +- `partitioning_type` (String) The name of the invalid partitioning type. +- `subject_type` (String) The subject for the errors. +- `table_name` (String) The table with the error. +- `vindex_name` (String) The name of the vindex for the schema. diff --git a/docs/data-sources/branches.md b/docs/data-sources/branches.md new file mode 100644 index 0000000..a946e18 --- /dev/null +++ b/docs/data-sources/branches.md @@ -0,0 +1,99 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_branches Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A list of PlanetScale branches. +--- + +# planetscale_branches (Data Source) + +A list of PlanetScale branches. + +## Example Usage + +```terraform +data "planetscale_branches" "example" { + organization = "example.com" + database = "example_db" +} + +output "branches" { + value = data.planetscale_branches.example +} +``` + + +## Schema + +### Required + +- `database` (String) +- `organization` (String) + +### Read-Only + +- `branches` (Attributes List) (see [below for nested schema](#nestedatt--branches)) + + +### Nested Schema for `branches` + +Read-Only: + +- `access_host_url` (String) The access host URL for the branch. This is a legacy field, use `mysql_edge_address`. +- `actor` (Attributes) The actor who created this branch. (see [below for nested schema](#nestedatt--branches--actor)) +- `cluster_rate_name` (String) The SKU representing the branch's cluster size. +- `created_at` (String) When the branch was created. +- `database` (String) The database this branch belongs to. +- `html_url` (String) Planetscale app URL for the branch. +- `id` (String) The ID of the branch. +- `initial_restore_id` (String) The ID of the backup from which the branch was restored. +- `mysql_address` (String) The MySQL address for the branch. +- `mysql_edge_address` (String) The address of the MySQL provider for the branch. +- `name` (String) The name of the branch. +- `organization` (String) The organization this branch belongs to. +- `parent_branch` (String) The name of the parent branch from which the branch was created. +- `production` (Boolean) Whether or not the branch is a production branch. +- `ready` (Boolean) Whether or not the branch is ready to serve queries. +- `region` (Attributes) The region in which this branch lives. (see [below for nested schema](#nestedatt--branches--region)) +- `restore_checklist_completed_at` (String) When a user last marked a backup restore checklist as completed. +- `restored_from_branch` (Attributes) (see [below for nested schema](#nestedatt--branches--restored_from_branch)) +- `schema_last_updated_at` (String) When the schema for the branch was last updated. +- `shard_count` (Number) The number of shards in the branch. +- `sharded` (Boolean) Whether or not the branch is sharded. +- `updated_at` (String) When the branch was last updated. + + +### Nested Schema for `branches.actor` + +Read-Only: + +- `avatar_url` (String) The URL of the actor's avatar +- `display_name` (String) The name of the actor +- `id` (String) The ID of the actor + + + +### Nested Schema for `branches.region` + +Read-Only: + +- `display_name` (String) Name of the region. +- `enabled` (Boolean) Whether or not the region is currently active. +- `id` (String) The ID of the region. +- `location` (String) Location of the region. +- `provider` (String) Provider for the region (ex. AWS). +- `public_ip_addresses` (List of String) Public IP addresses for the region. +- `slug` (String) The slug of the region. + + + +### Nested Schema for `branches.restored_from_branch` + +Read-Only: + +- `created_at` (String) When the resource was created. +- `deleted_at` (String) When the resource was deleted, if deleted. +- `id` (String) The ID for the resource. +- `name` (String) The name for the resource. +- `updated_at` (String) When the resource was last updated. diff --git a/docs/data-sources/database.md b/docs/data-sources/database.md new file mode 100644 index 0000000..a7ed44d --- /dev/null +++ b/docs/data-sources/database.md @@ -0,0 +1,105 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_database Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A PlanetScale database. +--- + +# planetscale_database (Data Source) + +A PlanetScale database. + +## Example Usage + +```terraform +data "planetscale_database" "example" { + organization = "example" + name = "again" +} + +output "db" { + value = data.planetscale_database.example +} +``` + + +## Schema + +### Required + +- `name` (String) The name of this database. +- `organization` (String) The organization this database belongs to. + +### Optional + +- `allow_data_branching` (Boolean) Whether seeding branches with data is enabled for all branches. +- `automatic_migrations` (Boolean) Whether to automatically manage Rails migrations during deploy requests. +- `data_import` (Attributes) If the database was created from an import, describes the import process. (see [below for nested schema](#nestedatt--data_import)) +- `default_branch` (String) The default branch for the database. +- `insights_raw_queries` (Boolean) The URL to see this database's branches in the web UI. +- `issues_count` (Number) The total number of ongoing issues within a database. +- `migration_framework` (String) Framework used for applying migrations. +- `migration_table_name` (String) Table name to use for copying schema migration data. +- `multiple_admins_required_for_deletion` (Boolean) If the database requires multiple admins for deletion. +- `plan` (String) The database plan. +- `production_branch_web_console` (Boolean) Whether web console is enabled for production branches. +- `region` (Attributes) The region the database lives in. (see [below for nested schema](#nestedatt--region)) +- `require_approval_for_deploy` (Boolean) Whether an approval is required to deploy schema changes to this database. +- `restrict_branch_region` (Boolean) Whether to restrict branch creation to one region. + +### Read-Only + +- `at_backup_restore_branches_limit` (Boolean) If the database has reached its backup restored branch limit. +- `at_development_branch_limit` (Boolean) If the database has reached its development branch limit. +- `branches_count` (Number) The total number of database branches. +- `branches_url` (String) The URL to retrieve this database's branches via the API. +- `created_at` (String) When the database was created. +- `default_branch_read_only_regions_count` (Number) Number of read only regions in the default branch. +- `default_branch_shard_count` (Number) Number of shards in the default branch. +- `default_branch_table_count` (Number) Number of tables in the default branch schema. +- `development_branches_count` (Number) The total number of database development branches. +- `html_url` (String) The total number of database development branches. +- `id` (String) The ID of the database. +- `production_branches_count` (Number) The total number of database production branches. +- `ready` (Boolean) If the database is ready to be used. +- `schema_last_updated_at` (String) When the default branch schema was last changed. +- `sharded` (Boolean) If the database is sharded. +- `state` (String) State of the database. +- `updated_at` (String) When the database was last updated. +- `url` (String) The URL to the database API endpoint. + + +### Nested Schema for `data_import` + +Read-Only: + +- `data_source` (Attributes) Connection information for the source of the data for the import. (see [below for nested schema](#nestedatt--data_import--data_source)) +- `finished_at` (String) When the import finished. +- `import_check_errors` (String) Errors encountered while preparing the import. +- `started_at` (String) When the import started. +- `state` (String) The state of the import, one of: pending, queued, in_progress, complete, cancelled, error. + + +### Nested Schema for `data_import.data_source` + +Required: + +- `database` (String) The name of the database imported from. +- `hostname` (String) The hostname where the database lives. +- `port` (String) The port on which the database listens on the host. + + + + +### Nested Schema for `region` + +Read-Only: + +- `display_name` (String) Name of the region. +- `enabled` (Boolean) Whether or not the region is currently active. +- `id` (String) The ID of the region. +- `location` (String) Location of the region. +- `provider` (String) Provider for the region (ex. AWS). +- `public_ip_addresses` (List of String) Public IP addresses for the region. +- `slug` (String) The slug of the region. diff --git a/docs/data-sources/database_read_only_regions.md b/docs/data-sources/database_read_only_regions.md new file mode 100644 index 0000000..56d0f05 --- /dev/null +++ b/docs/data-sources/database_read_only_regions.md @@ -0,0 +1,73 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_database_read_only_regions Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A list of PlanetScale read-only regions. +--- + +# planetscale_database_read_only_regions (Data Source) + +A list of PlanetScale read-only regions. + +## Example Usage + +```terraform +data "planetscale_database_read_only_regions" "example" { + organization = "example.com" + name = "example_db" +} + +output "database_ro_regions" { + value = data.planetscale_database_regions.example +} +``` + + +## Schema + +### Required + +- `name` (String) The name of the database for which the read-only regions are available. +- `organization` (String) The organization for which the read-only regions are available. + +### Read-Only + +- `regions` (Attributes List) The list of read-only regions available for the database. (see [below for nested schema](#nestedatt--regions)) + + +### Nested Schema for `regions` + +Read-Only: + +- `actor` (Attributes) The actor that created the read-only region. (see [below for nested schema](#nestedatt--regions--actor)) +- `created_at` (String) When the read-only region was created. +- `display_name` (String) The name of the read-only region. +- `id` (String) The ID of the read-only region. +- `ready` (Boolean) Whether or not the read-only region is ready to serve queries. +- `ready_at` (String) When the read-only region was ready to serve queries. +- `region` (Attributes) The details of the read-only region. (see [below for nested schema](#nestedatt--regions--region)) +- `updated_at` (String) When the read-only region was last updated. + + +### Nested Schema for `regions.actor` + +Read-Only: + +- `avatar_url` (String) The URL of the actor's avatar +- `display_name` (String) The name of the actor +- `id` (String) The ID of the actor + + + +### Nested Schema for `regions.region` + +Read-Only: + +- `display_name` (String) Name of the region. +- `enabled` (Boolean) Whether or not the region is currently active. +- `id` (String) The ID of the region. +- `location` (String) Location of the region. +- `provider` (String) Provider for the region (ex. AWS). +- `public_ip_addresses` (List of String) Public IP addresses for the region. +- `slug` (String) The slug of the region. diff --git a/docs/data-sources/database_regions.md b/docs/data-sources/database_regions.md new file mode 100644 index 0000000..dfb5f2f --- /dev/null +++ b/docs/data-sources/database_regions.md @@ -0,0 +1,49 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_database_regions Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A list of PlanetScale regions. +--- + +# planetscale_database_regions (Data Source) + +A list of PlanetScale regions. + +## Example Usage + +```terraform +data "planetscale_database_regions" "example" { + organization = "example.com" + name = "example_db" +} + +output "database_regions" { + value = data.planetscale_database_regions.example +} +``` + + +## Schema + +### Required + +- `name` (String) The database for which the regions are available. +- `organization` (String) The organization for which the regions are available. + +### Read-Only + +- `regions` (Attributes List) The list of regions available for the database. (see [below for nested schema](#nestedatt--regions)) + + +### Nested Schema for `regions` + +Read-Only: + +- `display_name` (String) Name of the region. +- `enabled` (Boolean) Whether or not the region is currently active. +- `id` (String) The ID of the region. +- `location` (String) Location of the region. +- `provider` (String) Provider for the region (ex. AWS). +- `public_ip_addresses` (List of String) Public IP addresses for the region. +- `slug` (String) The slug of the region. diff --git a/docs/data-sources/databases.md b/docs/data-sources/databases.md new file mode 100644 index 0000000..9ab9ab3 --- /dev/null +++ b/docs/data-sources/databases.md @@ -0,0 +1,112 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_databases Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A list of PlanetScale databases. +--- + +# planetscale_databases (Data Source) + +A list of PlanetScale databases. + +## Example Usage + +```terraform +data "planetscale_databases" "example" { + organization = "example" +} + +output "dbs" { + value = data.planetscale_databases.example +} +``` + + +## Schema + +### Required + +- `organization` (String) + +### Read-Only + +- `databases` (Attributes List) (see [below for nested schema](#nestedatt--databases)) + + +### Nested Schema for `databases` + +Optional: + +- `allow_data_branching` (Boolean) Whether seeding branches with data is enabled for all branches. +- `automatic_migrations` (Boolean) Whether to automatically manage Rails migrations during deploy requests. +- `data_import` (Attributes) If the database was created from an import, describes the import process. (see [below for nested schema](#nestedatt--databases--data_import)) +- `default_branch` (String) The default branch for the database. +- `insights_raw_queries` (Boolean) The URL to see this database's branches in the web UI. +- `issues_count` (Number) The total number of ongoing issues within a database. +- `migration_framework` (String) Framework used for applying migrations. +- `migration_table_name` (String) Table name to use for copying schema migration data. +- `multiple_admins_required_for_deletion` (Boolean) If the database requires multiple admins for deletion. +- `plan` (String) The database plan. +- `production_branch_web_console` (Boolean) Whether web console is enabled for production branches. +- `region` (Attributes) The region the database lives in. (see [below for nested schema](#nestedatt--databases--region)) +- `require_approval_for_deploy` (Boolean) Whether an approval is required to deploy schema changes to this database. +- `restrict_branch_region` (Boolean) Whether to restrict branch creation to one region. + +Read-Only: + +- `at_backup_restore_branches_limit` (Boolean) If the database has reached its backup restored branch limit. +- `at_development_branch_limit` (Boolean) If the database has reached its development branch limit. +- `branches_count` (Number) The total number of database branches. +- `branches_url` (String) The URL to retrieve this database's branches via the API. +- `created_at` (String) When the database was created. +- `default_branch_read_only_regions_count` (Number) Number of read only regions in the default branch. +- `default_branch_shard_count` (Number) Number of shards in the default branch. +- `default_branch_table_count` (Number) Number of tables in the default branch schema. +- `development_branches_count` (Number) The total number of database development branches. +- `html_url` (String) The total number of database development branches. +- `id` (String) The ID of the database. +- `name` (String) The name of this database. +- `organization` (String) The organization this database belongs to. +- `production_branches_count` (Number) The total number of database production branches. +- `ready` (Boolean) If the database is ready to be used. +- `schema_last_updated_at` (String) When the default branch schema was last changed. +- `sharded` (Boolean) If the database is sharded. +- `state` (String) State of the database. +- `updated_at` (String) When the database was last updated. +- `url` (String) The URL to the database API endpoint. + + +### Nested Schema for `databases.data_import` + +Read-Only: + +- `data_source` (Attributes) Connection information for the source of the data for the import. (see [below for nested schema](#nestedatt--databases--data_import--data_source)) +- `finished_at` (String) When the import finished. +- `import_check_errors` (String) Errors encountered while preparing the import. +- `started_at` (String) When the import started. +- `state` (String) The state of the import, one of: pending, queued, in_progress, complete, cancelled, error. + + +### Nested Schema for `databases.data_import.data_source` + +Required: + +- `database` (String) The name of the database imported from. +- `hostname` (String) The hostname where the database lives. +- `port` (String) The port on which the database listens on the host. + + + + +### Nested Schema for `databases.region` + +Read-Only: + +- `display_name` (String) Name of the region. +- `enabled` (Boolean) Whether or not the region is currently active. +- `id` (String) The ID of the region. +- `location` (String) Location of the region. +- `provider` (String) Provider for the region (ex. AWS). +- `public_ip_addresses` (List of String) Public IP addresses for the region. +- `slug` (String) The slug of the region. diff --git a/docs/data-sources/oauth_applications.md b/docs/data-sources/oauth_applications.md new file mode 100644 index 0000000..ad209b5 --- /dev/null +++ b/docs/data-sources/oauth_applications.md @@ -0,0 +1,52 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_oauth_applications Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A list of PlanetScale OAuth applications. (requires feature flag) +--- + +# planetscale_oauth_applications (Data Source) + +A list of PlanetScale OAuth applications. (requires feature flag) + +## Example Usage + +```terraform +# requires a feature flag, contact support to enable it + +data "planetscale_oauth_applications" "example" { + organization = data.planetscale_organization.example.name +} + +output "oauth_apps" { + value = data.planetscale_oauth_applications.example +} +``` + + +## Schema + +### Required + +- `organization` (String) + +### Read-Only + +- `applications` (Attributes List) (see [below for nested schema](#nestedatt--applications)) + + +### Nested Schema for `applications` + +Read-Only: + +- `avatar` (String) The image source for the OAuth application's avatar. +- `client_id` (String) The OAuth application's unique client id. +- `created_at` (String) When the OAuth application was created. +- `domain` (String) The domain of the OAuth application. Used for verification of a valid redirect uri. +- `id` (String) The ID of the OAuth application. +- `name` (String) The name of the OAuth application. +- `redirect_uri` (String) The redirect URI of the OAuth application. +- `scopes` (List of String) The scopes that the OAuth application requires on a user's accout. +- `tokens` (Number) The number of tokens issued by the OAuth application. +- `updated_at` (String) When the OAuth application was last updated. diff --git a/docs/data-sources/organization.md b/docs/data-sources/organization.md new file mode 100644 index 0000000..dd727cf --- /dev/null +++ b/docs/data-sources/organization.md @@ -0,0 +1,69 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_organization Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A PlanetScale organization. +--- + +# planetscale_organization (Data Source) + +A PlanetScale organization. + +## Example Usage + +```terraform +data "planetscale_organization" "example" { + name = "example" +} + +output "org" { + value = data.planetscale_organization.example +} +``` + + +## Schema + +### Required + +- `name` (String) The name of the organization. + +### Read-Only + +- `admin_only_production_access` (Boolean) Whether or not only administrators can access production branches in the organization. +- `billing_email` (String) The billing email of the organization. +- `can_create_databases` (Boolean) Whether or not more databases can be created in the organization. +- `created_at` (String) When the organization was created. +- `database_count` (Number) The number of databases in the organization. +- `features` (Attributes) Features that are enabled on the organization. (see [below for nested schema](#nestedatt--features)) +- `flags` (Attributes) . (see [below for nested schema](#nestedatt--flags)) +- `free_databases_remaining` (Number) The number of remaining free databases that can be created in the organization. +- `has_past_due_invoices` (Boolean) Whether or not the organization has past due billing invoices. +- `id` (String) The ID for the organization. +- `idp_managed_roles` (Boolean) Whether or not the IdP provider is be responsible for managing roles in PlanetScale. +- `plan` (String) The billing plan of the organization. +- `single_tenancy` (Boolean) Whether or not the organization has single tenancy enabled. +- `sleeping_database_count` (Number) The number of sleeping databases in the organization. +- `sso` (Boolean) Whether or not SSO is enabled on the organization. +- `sso_directory` (Boolean) Whether or not the organization uses a WorkOS directory. +- `sso_portal_url` (String) The URL of the organization's SSO portal. +- `updated_at` (String) When the organization was last updated. +- `valid_billing_info` (Boolean) Whether or not the organization's billing information is valid. + + +### Nested Schema for `features` + +Read-Only: + +- `insights` (Boolean) +- `single_tenancy` (Boolean) +- `sso` (Boolean) + + + +### Nested Schema for `flags` + +Read-Only: + +- `example_flag` (String) diff --git a/docs/data-sources/organization_regions.md b/docs/data-sources/organization_regions.md new file mode 100644 index 0000000..861868e --- /dev/null +++ b/docs/data-sources/organization_regions.md @@ -0,0 +1,47 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_organization_regions Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A list of PlanetScale regions for the organization. +--- + +# planetscale_organization_regions (Data Source) + +A list of PlanetScale regions for the organization. + +## Example Usage + +```terraform +data "planetscale_organization_regions" "example" { + organization = "example" +} + +output "org_regions" { + value = data.planetscale_organization_regions.example +} +``` + + +## Schema + +### Required + +- `organization` (String) + +### Read-Only + +- `regions` (Attributes List) (see [below for nested schema](#nestedatt--regions)) + + +### Nested Schema for `regions` + +Read-Only: + +- `display_name` (String) Name of the region. +- `enabled` (Boolean) Whether or not the region is currently active. +- `id` (String) The ID of the region. +- `location` (String) Location of the region. +- `provider` (String) Provider for the region (ex. AWS). +- `public_ip_addresses` (List of String) Public IP addresses for the region. +- `slug` (String) The slug of the region. diff --git a/docs/data-sources/organizations.md b/docs/data-sources/organizations.md new file mode 100644 index 0000000..2510888 --- /dev/null +++ b/docs/data-sources/organizations.md @@ -0,0 +1,71 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_organizations Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A list of PlanetScale organizations. +--- + +# planetscale_organizations (Data Source) + +A list of PlanetScale organizations. + +## Example Usage + +```terraform +data "planetscale_organizations" "example" {} + +output "orgs" { + value = data.planetscale_organizations.example +} +``` + + +## Schema + +### Read-Only + +- `organizations` (Attributes List) (see [below for nested schema](#nestedatt--organizations)) + + +### Nested Schema for `organizations` + +Read-Only: + +- `admin_only_production_access` (Boolean) Whether or not only administrators can access production branches in the organization. +- `billing_email` (String) The billing email of the organization. +- `can_create_databases` (Boolean) Whether or not more databases can be created in the organization. +- `created_at` (String) When the organization was created. +- `database_count` (Number) The number of databases in the organization. +- `features` (Attributes) Features that are enabled on the organization. (see [below for nested schema](#nestedatt--organizations--features)) +- `flags` (Attributes) . (see [below for nested schema](#nestedatt--organizations--flags)) +- `free_databases_remaining` (Number) The number of remaining free databases that can be created in the organization. +- `has_past_due_invoices` (Boolean) Whether or not the organization has past due billing invoices. +- `id` (String) The ID for the organization. +- `idp_managed_roles` (Boolean) Whether or not the IdP provider is be responsible for managing roles in PlanetScale. +- `name` (String) The name of the organization. +- `plan` (String) The billing plan of the organization. +- `single_tenancy` (Boolean) Whether or not the organization has single tenancy enabled. +- `sleeping_database_count` (Number) The number of sleeping databases in the organization. +- `sso` (Boolean) Whether or not SSO is enabled on the organization. +- `sso_directory` (Boolean) Whether or not the organization uses a WorkOS directory. +- `sso_portal_url` (String) The URL of the organization's SSO portal. +- `updated_at` (String) When the organization was last updated. +- `valid_billing_info` (Boolean) Whether or not the organization's billing information is valid. + + +### Nested Schema for `organizations.features` + +Read-Only: + +- `insights` (Boolean) +- `single_tenancy` (Boolean) +- `sso` (Boolean) + + + +### Nested Schema for `organizations.flags` + +Read-Only: + +- `example_flag` (String) diff --git a/docs/data-sources/password.md b/docs/data-sources/password.md new file mode 100644 index 0000000..449dff8 --- /dev/null +++ b/docs/data-sources/password.md @@ -0,0 +1,90 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_password Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A PlanetScale database password. +--- + +# planetscale_password (Data Source) + +A PlanetScale database password. + +## Example Usage + +```terraform +data "planetscale_password" "example" { + organization = "example" + database = "example_db" + branch = "main" + name = "antoine-was-here" +} + +output "password" { + value = data.planetscale_password.example +} +``` + + +## Schema + +### Required + +- `branch` (String) The branch this password belongs to.. +- `database` (String) The datanase this branch password belongs to. +- `id` (String) The ID for the password. +- `organization` (String) The organization this database branch password belongs to. + +### Optional + +- `read_only_region_id` (String) If the password is for a read-only region, the ID of the region. + +### Read-Only + +- `access_host_url` (String) The host URL for the password. +- `actor` (Attributes) The actor that created this branch. (see [below for nested schema](#nestedatt--actor)) +- `created_at` (String) When the password was created. +- `database_branch` (Attributes) The branch this password is allowed to access. (see [below for nested schema](#nestedatt--database_branch)) +- `deleted_at` (String) When the password was deleted. +- `expires_at` (String) When the password will expire. +- `name` (String) The display name for the password. +- `region` (Attributes) The region in which this password can be used. (see [below for nested schema](#nestedatt--region)) +- `renewable` (Boolean) Whether or not the password can be renewed. +- `role` (String) The role for the password. +- `ttl_seconds` (Number) Time to live (in seconds) for the password. The password will be invalid and unrenewable when TTL has passed. +- `username` (String) The username for the password. + + +### Nested Schema for `actor` + +Read-Only: + +- `avatar_url` (String) The URL of the actor's avatar +- `display_name` (String) The name of the actor +- `id` (String) The ID of the actor + + + +### Nested Schema for `database_branch` + +Read-Only: + +- `access_host_url` (String) +- `id` (String) +- `mysql_edge_address` (String) +- `name` (String) +- `production` (Boolean) + + + +### Nested Schema for `region` + +Read-Only: + +- `display_name` (String) Name of the region. +- `enabled` (Boolean) Whether or not the region is currently active. +- `id` (String) The ID of the region. +- `location` (String) Location of the region. +- `provider` (String) Provider for the region (ex. AWS). +- `public_ip_addresses` (List of String) Public IP addresses for the region. +- `slug` (String) The slug of the region. diff --git a/docs/data-sources/passwords.md b/docs/data-sources/passwords.md new file mode 100644 index 0000000..dd4f0a1 --- /dev/null +++ b/docs/data-sources/passwords.md @@ -0,0 +1,100 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_passwords Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A list of PlanetScale database passwords. +--- + +# planetscale_passwords (Data Source) + +A list of PlanetScale database passwords. + +## Example Usage + +```terraform +data "planetscale_passwords" "example" { + organization = "example" + database = "example_db" + branch = "main" +} + +output "passwords" { + value = data.planetscale_passwords.example +} +``` + + +## Schema + +### Required + +- `branch` (String) +- `database` (String) +- `organization` (String) + +### Optional + +- `read_only_region_id` (String) + +### Read-Only + +- `passwords` (Attributes List) (see [below for nested schema](#nestedatt--passwords)) + + +### Nested Schema for `passwords` + +Read-Only: + +- `access_host_url` (String) The host URL for the password. +- `actor` (Attributes) The actor that created this branch. (see [below for nested schema](#nestedatt--passwords--actor)) +- `branch` (String) The branch this password belongs to.. +- `created_at` (String) When the password was created. +- `database` (String) The datanase this branch password belongs to. +- `database_branch` (Attributes) The branch this password is allowed to access. (see [below for nested schema](#nestedatt--passwords--database_branch)) +- `deleted_at` (String) When the password was deleted. +- `expires_at` (String) When the password will expire. +- `id` (String) The ID for the password. +- `name` (String) The display name for the password. +- `organization` (String) The organization this database branch password belongs to. +- `read_only_region_id` (String) If the password is for a read-only region, the ID of the region. +- `region` (Attributes) The region in which this password can be used. (see [below for nested schema](#nestedatt--passwords--region)) +- `renewable` (Boolean) Whether or not the password can be renewed. +- `role` (String) The role for the password. +- `ttl_seconds` (Number) Time to live (in seconds) for the password. The password will be invalid and unrenewable when TTL has passed. +- `username` (String) The username for the password. + + +### Nested Schema for `passwords.actor` + +Read-Only: + +- `avatar_url` (String) The URL of the actor's avatar +- `display_name` (String) The name of the actor +- `id` (String) The ID of the actor + + + +### Nested Schema for `passwords.database_branch` + +Read-Only: + +- `access_host_url` (String) +- `id` (String) +- `mysql_edge_address` (String) +- `name` (String) +- `production` (Boolean) + + + +### Nested Schema for `passwords.region` + +Read-Only: + +- `display_name` (String) Name of the region. +- `enabled` (Boolean) Whether or not the region is currently active. +- `id` (String) The ID of the region. +- `location` (String) Location of the region. +- `provider` (String) Provider for the region (ex. AWS). +- `public_ip_addresses` (List of String) Public IP addresses for the region. +- `slug` (String) The slug of the region. diff --git a/docs/data-sources/user.md b/docs/data-sources/user.md new file mode 100644 index 0000000..b6bf5ab --- /dev/null +++ b/docs/data-sources/user.md @@ -0,0 +1,42 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_user Data Source - terraform-provider-planetscale" +subcategory: "" +description: |- + A PlanetScale user. +--- + +# planetscale_user (Data Source) + +A PlanetScale user. + +## Example Usage + +```terraform +# doesn't work right now for some reason + +data "planetscale_user" "example" {} + +output "current_user" { + value = data.planetscale_user.example +} +``` + + +## Schema + +### Read-Only + +- `avatar_url` (String) The URL source of the user's avatar. +- `created_at` (String) When the user was created. +- `default_organization_id` (String) The default organization for the user. +- `directory_managed` (Boolean) Whether or not the user is managed by a WorkOS directory. +- `display_name` (String) The display name of the user. +- `email` (String) The email of the user. +- `email_verified` (Boolean) Whether or not the user is verified by email. +- `id` (String) The ID of the user. +- `managed` (Boolean) Whether or not the user is managed by an authentication provider. +- `name` (String) The name of the user. +- `sso` (Boolean) Whether or not the user is managed by WorkOS. +- `two_factor_auth_configured` (Boolean) Whether or not the user has configured two factor authentication. +- `updated_at` (String) When the user was last updated. diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..0865ddc --- /dev/null +++ b/docs/index.md @@ -0,0 +1,35 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale Provider" +subcategory: "" +description: |- + +--- + +# planetscale Provider + + + +## Example Usage + +```terraform +terraform { + required_providers { + planetscale = { + source = "registry.terraform.io/planetscale/planetscale" + } + } +} + +provider "planetscale" { + service_token_name = "luq1jk0pjccp" +} +``` + + +## Schema + +### Optional + +- `endpoint` (String) Example provider attribute +- `service_token_name` (String) Name of the service token to use diff --git a/docs/resources/backup.md b/docs/resources/backup.md new file mode 100644 index 0000000..a494e41 --- /dev/null +++ b/docs/resources/backup.md @@ -0,0 +1,81 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_backup Resource - terraform-provider-planetscale" +subcategory: "" +description: |- + A PlanetScale backup +--- + +# planetscale_backup (Resource) + +A PlanetScale backup + +## Example Usage + +```terraform +resource "planetscale_backup" "example" { + organization = "example" + database = "example_db" + branch = "main" + name = "antoine_was_here" + backup_policy = { + retention_unit = "day" + retention_value = 1 + } +} +``` + + +## Schema + +### Required + +- `backup_policy` (Attributes) . (see [below for nested schema](#nestedatt--backup_policy)) +- `branch` (String) The branch being backed up. +- `database` (String) The database to which the branch being backed up belongs to. +- `name` (String) The name of the backup. +- `organization` (String) The organization in which the database branch being backed up belongs to. + +### Read-Only + +- `actor` (Attributes) . (see [below for nested schema](#nestedatt--actor)) +- `created_at` (String) When the backup was created. +- `estimated_storage_cost` (String) The estimated storage cost of the backup. +- `id` (String) The ID of the backup. +- `required` (Boolean) Whether or not the backup policy is required. +- `restored_branches` (List of String) Branches that have been restored with this backup. +- `size` (Number) The size of the backup. +- `state` (String) The current state of the backup. +- `updated_at` (String) When the backup was last updated. + + +### Nested Schema for `backup_policy` + +Required: + +- `retention_unit` (String) The unit for the retention period of the backup policy. +- `retention_value` (Number) A number value for the retention period of the backup policy. + +Read-Only: + +- `created_at` (String) When the backup policy was created. +- `frequency_unit` (String) The unit for the frequency of the backup policy. +- `frequency_value` (Number) A number value for the frequency of the backup policy. +- `id` (String) The ID of the backup policy. +- `last_ran_at` (String) When the backup was last run. +- `name` (String) The name of the backup policy. +- `next_run_at` (String) When the backup will next run. +- `schedule_day` (String) Day of the week that the backup is scheduled. +- `schedule_week` (String) Week of the month that the backup is scheduled. +- `target` (String) Whether the backup policy is for a production or development database, or for a database branch. +- `updated_at` (String) When the backup policy was last updated. + + + +### Nested Schema for `actor` + +Read-Only: + +- `avatar_url` (String) The URL of the actor's avatar +- `display_name` (String) The name of the actor +- `id` (String) The ID of the actor diff --git a/docs/resources/branch.md b/docs/resources/branch.md new file mode 100644 index 0000000..9f94c3b --- /dev/null +++ b/docs/resources/branch.md @@ -0,0 +1,91 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_branch Resource - terraform-provider-planetscale" +subcategory: "" +description: |- + A PlanetScale branch. +--- + +# planetscale_branch (Resource) + +A PlanetScale branch. + +## Example Usage + +```terraform +resource "planetscale_branch" "example" { + organization = "example" + database = "example_db" + name = "antoinewritescode" + parent_branch = "main" +} +``` + + +## Schema + +### Required + +- `database` (String) The database this branch belongs to. +- `name` (String) The name of the branch. +- `organization` (String) The organization this branch belongs to. +- `parent_branch` (String) The name of the parent branch from which the branch was created. + +### Optional + +- `production` (Boolean) Whether or not the branch is a production branch. + +### Read-Only + +- `access_host_url` (String) The access host URL for the branch. This is a legacy field, use `mysql_edge_address`. +- `actor` (Attributes) The actor who created this branch. (see [below for nested schema](#nestedatt--actor)) +- `cluster_rate_name` (String) The SKU representing the branch's cluster size. +- `created_at` (String) When the branch was created. +- `html_url` (String) Planetscale app URL for the branch. +- `id` (String) The ID of the branch. +- `initial_restore_id` (String) The ID of the backup from which the branch was restored. +- `mysql_address` (String) The MySQL address for the branch. +- `mysql_edge_address` (String) The address of the MySQL provider for the branch. +- `ready` (Boolean) Whether or not the branch is ready to serve queries. +- `region` (Attributes) The region in which this branch lives. (see [below for nested schema](#nestedatt--region)) +- `restore_checklist_completed_at` (String) When a user last marked a backup restore checklist as completed. +- `restored_from_branch` (Attributes) todo (see [below for nested schema](#nestedatt--restored_from_branch)) +- `schema_last_updated_at` (String) When the schema for the branch was last updated. +- `shard_count` (Number) The number of shards in the branch. +- `sharded` (Boolean) Whether or not the branch is sharded. +- `updated_at` (String) When the branch was last updated. + + +### Nested Schema for `actor` + +Read-Only: + +- `avatar_url` (String) The URL of the actor's avatar +- `display_name` (String) The name of the actor +- `id` (String) The ID of the actor + + + +### Nested Schema for `region` + +Read-Only: + +- `display_name` (String) Name of the region. +- `enabled` (Boolean) Whether or not the region is currently active. +- `id` (String) The ID of the region. +- `location` (String) Location of the region. +- `provider` (String) Provider for the region (ex. AWS). +- `public_ip_addresses` (List of String) Public IP addresses for the region. +- `slug` (String) The slug of the region. + + + +### Nested Schema for `restored_from_branch` + +Read-Only: + +- `created_at` (String) When the resource was created. +- `deleted_at` (String) When the resource was deleted, if deleted. +- `id` (String) The ID for the resource. +- `name` (String) The name for the resource. +- `updated_at` (String) When the resource was last updated. diff --git a/docs/resources/database.md b/docs/resources/database.md new file mode 100644 index 0000000..f1b720d --- /dev/null +++ b/docs/resources/database.md @@ -0,0 +1,87 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_database Resource - terraform-provider-planetscale" +subcategory: "" +description: |- + A PlanetScale database. +--- + +# planetscale_database (Resource) + +A PlanetScale database. + +## Example Usage + +```terraform +resource "planetscale_database" "example" { + organization = "example" + name = "anotherdb" +} +``` + + +## Schema + +### Required + +- `name` (String) The name of this database. +- `organization` (String) The organization this database belongs to. + +### Optional + +- `allow_data_branching` (Boolean) Whether seeding branches with data is enabled for all branches. +- `automatic_migrations` (Boolean) Whether to automatically manage Rails migrations during deploy requests. +- `cluster_size` (String) The size of the database cluster plan. +- `default_branch` (String) The default branch for the database. +- `insights_raw_queries` (Boolean) The URL to see this database's branches in the web UI. +- `issues_count` (Number) The total number of ongoing issues within a database. +- `migration_framework` (String) Framework used for applying migrations. +- `migration_table_name` (String) Table name to use for copying schema migration data. +- `multiple_admins_required_for_deletion` (Boolean) If the database requires multiple admins for deletion. +- `plan` (String) The database plan. +- `production_branch_web_console` (Boolean) Whether web console is enabled for production branches. +- `region` (String) The region the database lives in. +- `require_approval_for_deploy` (Boolean) Whether an approval is required to deploy schema changes to this database. +- `restrict_branch_region` (Boolean) Whether to restrict branch creation to one region. + +### Read-Only + +- `at_backup_restore_branches_limit` (Boolean) If the database has reached its backup restored branch limit. +- `at_development_branch_limit` (Boolean) If the database has reached its development branch limit. +- `branches_count` (Number) The total number of database branches. +- `branches_url` (String) The URL to retrieve this database's branches via the API. +- `created_at` (String) When the database was created. +- `data_import` (Attributes) If the database was created from an import, describes the import process. (see [below for nested schema](#nestedatt--data_import)) +- `default_branch_read_only_regions_count` (Number) Number of read only regions in the default branch. +- `default_branch_shard_count` (Number) Number of shards in the default branch. +- `default_branch_table_count` (Number) Number of tables in the default branch schema. +- `development_branches_count` (Number) The total number of database development branches. +- `html_url` (String) The total number of database development branches. +- `id` (String) The ID of the database. +- `production_branches_count` (Number) The total number of database production branches. +- `ready` (Boolean) If the database is ready to be used. +- `schema_last_updated_at` (String) When the default branch schema was last changed. +- `sharded` (Boolean) If the database is sharded. +- `state` (String) State of the database. +- `updated_at` (String) When the database was last updated. +- `url` (String) The URL to the database API endpoint. + + +### Nested Schema for `data_import` + +Read-Only: + +- `data_source` (Attributes) Connection information for the source of the data for the import. (see [below for nested schema](#nestedatt--data_import--data_source)) +- `finished_at` (String) When the import finished. +- `import_check_errors` (String) Errors encountered while preparing the import. +- `started_at` (String) When the import started. +- `state` (String) The state of the import, one of: pending, queued, in_progress, complete, cancelled, error. + + +### Nested Schema for `data_import.data_source` + +Required: + +- `database` (String) The name of the database imported from. +- `hostname` (String) The hostname where the database lives. +- `port` (String) The port on which the database listens on the host. diff --git a/docs/resources/password.md b/docs/resources/password.md new file mode 100644 index 0000000..4d4f6b0 --- /dev/null +++ b/docs/resources/password.md @@ -0,0 +1,91 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "planetscale_password Resource - terraform-provider-planetscale" +subcategory: "" +description: |- + A PlanetScale database password. +--- + +# planetscale_password (Resource) + +A PlanetScale database password. + +## Example Usage + +```terraform +resource "planetscale_password" "example" { + organization = "example" + database = "example_db" + branch = "main" + name = "a-password-for-antoine" +} + +output "password" { + sensitive = true + value = planetscale_password.example +} +``` + + +## Schema + +### Required + +- `branch` (String) The branch this password belongs to. +- `database` (String) The datanase this branch password belongs to. +- `organization` (String) The organization this database branch password belongs to. + +### Optional + +- `name` (String) The display name for the password. +- `role` (String) The role for the password. +- `ttl_seconds` (Number) Time to live (in seconds) for the password. The password will be invalid and unrenewable when TTL has passed. + +### Read-Only + +- `access_host_url` (String) The host URL for the password. +- `actor` (Attributes) The actor that created this branch. (see [below for nested schema](#nestedatt--actor)) +- `created_at` (String) When the password was created. +- `database_branch` (Attributes) The branch this password is allowed to access. (see [below for nested schema](#nestedatt--database_branch)) +- `deleted_at` (String) When the password was deleted. +- `expires_at` (String) When the password will expire. +- `id` (String) The ID for the password. +- `plaintext` (String, Sensitive) The plaintext password, only available if the password was created by this provider. +- `region` (Attributes) The region in which this password can be used. (see [below for nested schema](#nestedatt--region)) +- `renewable` (Boolean) Whether or not the password can be renewed. +- `username` (String) The username for the password. + + +### Nested Schema for `actor` + +Read-Only: + +- `avatar_url` (String) The URL of the actor's avatar +- `display_name` (String) The name of the actor +- `id` (String) The ID of the actor + + + +### Nested Schema for `database_branch` + +Read-Only: + +- `access_host_url` (String) +- `id` (String) +- `mysql_edge_address` (String) +- `name` (String) +- `production` (Boolean) + + + +### Nested Schema for `region` + +Read-Only: + +- `display_name` (String) Name of the region. +- `enabled` (Boolean) Whether or not the region is currently active. +- `id` (String) The ID of the region. +- `location` (String) Location of the region. +- `provider` (String) Provider for the region (ex. AWS). +- `public_ip_addresses` (List of String) Public IP addresses for the region. +- `slug` (String) The slug of the region. diff --git a/examples/data-sources/planetscale_backup/data-source.tf b/examples/data-sources/planetscale_backup/data-source.tf new file mode 100644 index 0000000..58d5f84 --- /dev/null +++ b/examples/data-sources/planetscale_backup/data-source.tf @@ -0,0 +1,10 @@ +data "planetscale_backup" "example" { + organization = "example.com" + database = "example_db" + branch = "main" + id = "k20nb1b7ut18" +} + +output "backup" { + value = data.planetscale_backup.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_backups/data-source.tf b/examples/data-sources/planetscale_backups/data-source.tf new file mode 100644 index 0000000..621f427 --- /dev/null +++ b/examples/data-sources/planetscale_backups/data-source.tf @@ -0,0 +1,9 @@ +data "planetscale_backups" "example" { + organization = "example.com" + database = "example_db" + branch = "main" +} + +output "backups" { + value = data.planetscale_backups.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_branch/data-source.tf b/examples/data-sources/planetscale_branch/data-source.tf new file mode 100644 index 0000000..a12e0fd --- /dev/null +++ b/examples/data-sources/planetscale_branch/data-source.tf @@ -0,0 +1,10 @@ + +data "planetscale_branch" "example" { + organization = "example.com" + database = "example_db" + name = "main" +} + +output "branch" { + value = data.planetscale_branch.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_branch_schema/data-source.tf b/examples/data-sources/planetscale_branch_schema/data-source.tf new file mode 100644 index 0000000..7c8bbf5 --- /dev/null +++ b/examples/data-sources/planetscale_branch_schema/data-source.tf @@ -0,0 +1,9 @@ +data "planetscale_branch_schema" "example" { + organization = "example.com" + database = "example_db" + branch = "main" +} + +output "branch_schema" { + value = data.planetscale_branch_schema.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_branch_schema_lint/data-source.tf b/examples/data-sources/planetscale_branch_schema_lint/data-source.tf new file mode 100644 index 0000000..1036362 --- /dev/null +++ b/examples/data-sources/planetscale_branch_schema_lint/data-source.tf @@ -0,0 +1,9 @@ +data "planetscale_branch_schema_lint" "example" { + organization = "example.com" + database = "example_db" + branch = "main" +} + +output "schema_lint" { + value = data.planetscale_branch_schema_lint.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_branches/data-source.tf b/examples/data-sources/planetscale_branches/data-source.tf new file mode 100644 index 0000000..a2dd1be --- /dev/null +++ b/examples/data-sources/planetscale_branches/data-source.tf @@ -0,0 +1,8 @@ +data "planetscale_branches" "example" { + organization = "example.com" + database = "example_db" +} + +output "branches" { + value = data.planetscale_branches.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_database/data-source.tf b/examples/data-sources/planetscale_database/data-source.tf new file mode 100644 index 0000000..7cc0dca --- /dev/null +++ b/examples/data-sources/planetscale_database/data-source.tf @@ -0,0 +1,8 @@ +data "planetscale_database" "example" { + organization = "example" + name = "again" +} + +output "db" { + value = data.planetscale_database.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_database_read_only_regions/data-source.tf b/examples/data-sources/planetscale_database_read_only_regions/data-source.tf new file mode 100644 index 0000000..d1a77b2 --- /dev/null +++ b/examples/data-sources/planetscale_database_read_only_regions/data-source.tf @@ -0,0 +1,8 @@ +data "planetscale_database_read_only_regions" "example" { + organization = "example.com" + name = "example_db" +} + +output "database_ro_regions" { + value = data.planetscale_database_regions.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_database_regions/data-source.tf b/examples/data-sources/planetscale_database_regions/data-source.tf new file mode 100644 index 0000000..3e0bf10 --- /dev/null +++ b/examples/data-sources/planetscale_database_regions/data-source.tf @@ -0,0 +1,8 @@ +data "planetscale_database_regions" "example" { + organization = "example.com" + name = "example_db" +} + +output "database_regions" { + value = data.planetscale_database_regions.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_databases/data-source.tf b/examples/data-sources/planetscale_databases/data-source.tf new file mode 100644 index 0000000..886f16b --- /dev/null +++ b/examples/data-sources/planetscale_databases/data-source.tf @@ -0,0 +1,7 @@ +data "planetscale_databases" "example" { + organization = "example" +} + +output "dbs" { + value = data.planetscale_databases.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_oauth_applications/data-source.tf b/examples/data-sources/planetscale_oauth_applications/data-source.tf new file mode 100644 index 0000000..a254d2f --- /dev/null +++ b/examples/data-sources/planetscale_oauth_applications/data-source.tf @@ -0,0 +1,9 @@ +# requires a feature flag, contact support to enable it + +data "planetscale_oauth_applications" "example" { + organization = data.planetscale_organization.example.name +} + +output "oauth_apps" { + value = data.planetscale_oauth_applications.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_organization/data-source.tf b/examples/data-sources/planetscale_organization/data-source.tf new file mode 100644 index 0000000..2b0f01f --- /dev/null +++ b/examples/data-sources/planetscale_organization/data-source.tf @@ -0,0 +1,7 @@ +data "planetscale_organization" "example" { + name = "example" +} + +output "org" { + value = data.planetscale_organization.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_organization_regions/data-source.tf b/examples/data-sources/planetscale_organization_regions/data-source.tf new file mode 100644 index 0000000..fcc564b --- /dev/null +++ b/examples/data-sources/planetscale_organization_regions/data-source.tf @@ -0,0 +1,7 @@ +data "planetscale_organization_regions" "example" { + organization = "example" +} + +output "org_regions" { + value = data.planetscale_organization_regions.example +} diff --git a/examples/data-sources/planetscale_organizations/data-source.tf b/examples/data-sources/planetscale_organizations/data-source.tf new file mode 100644 index 0000000..4c5568a --- /dev/null +++ b/examples/data-sources/planetscale_organizations/data-source.tf @@ -0,0 +1,5 @@ +data "planetscale_organizations" "example" {} + +output "orgs" { + value = data.planetscale_organizations.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_password/data-source.tf b/examples/data-sources/planetscale_password/data-source.tf new file mode 100644 index 0000000..00b7882 --- /dev/null +++ b/examples/data-sources/planetscale_password/data-source.tf @@ -0,0 +1,10 @@ +data "planetscale_password" "example" { + organization = "example" + database = "example_db" + branch = "main" + name = "antoine-was-here" +} + +output "password" { + value = data.planetscale_password.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_passwords/data-source.tf b/examples/data-sources/planetscale_passwords/data-source.tf new file mode 100644 index 0000000..a9ba15e --- /dev/null +++ b/examples/data-sources/planetscale_passwords/data-source.tf @@ -0,0 +1,9 @@ +data "planetscale_passwords" "example" { + organization = "example" + database = "example_db" + branch = "main" +} + +output "passwords" { + value = data.planetscale_passwords.example +} \ No newline at end of file diff --git a/examples/data-sources/planetscale_user/data-source.tf b/examples/data-sources/planetscale_user/data-source.tf new file mode 100644 index 0000000..6e4c1a4 --- /dev/null +++ b/examples/data-sources/planetscale_user/data-source.tf @@ -0,0 +1,7 @@ +# doesn't work right now for some reason + +data "planetscale_user" "example" {} + +output "current_user" { + value = data.planetscale_user.example +} \ No newline at end of file diff --git a/examples/provider/provider.tf b/examples/provider/provider.tf new file mode 100644 index 0000000..e495622 --- /dev/null +++ b/examples/provider/provider.tf @@ -0,0 +1,11 @@ +terraform { + required_providers { + planetscale = { + source = "registry.terraform.io/planetscale/planetscale" + } + } +} + +provider "planetscale" { + service_token_name = "luq1jk0pjccp" +} \ No newline at end of file diff --git a/examples/resources/planetscale_backup/resource.tf b/examples/resources/planetscale_backup/resource.tf new file mode 100644 index 0000000..2bf925b --- /dev/null +++ b/examples/resources/planetscale_backup/resource.tf @@ -0,0 +1,10 @@ +resource "planetscale_backup" "example" { + organization = "example" + database = "example_db" + branch = "main" + name = "antoine_was_here" + backup_policy = { + retention_unit = "day" + retention_value = 1 + } +} \ No newline at end of file diff --git a/examples/resources/planetscale_branch/resource.tf b/examples/resources/planetscale_branch/resource.tf new file mode 100644 index 0000000..1325888 --- /dev/null +++ b/examples/resources/planetscale_branch/resource.tf @@ -0,0 +1,6 @@ +resource "planetscale_branch" "example" { + organization = "example" + database = "example_db" + name = "antoinewritescode" + parent_branch = "main" +} \ No newline at end of file diff --git a/examples/resources/planetscale_database/resource.tf b/examples/resources/planetscale_database/resource.tf new file mode 100644 index 0000000..4f1f6e3 --- /dev/null +++ b/examples/resources/planetscale_database/resource.tf @@ -0,0 +1,4 @@ +resource "planetscale_database" "example" { + organization = "example" + name = "anotherdb" +} \ No newline at end of file diff --git a/examples/resources/planetscale_password/resource.tf b/examples/resources/planetscale_password/resource.tf new file mode 100644 index 0000000..ab99af4 --- /dev/null +++ b/examples/resources/planetscale_password/resource.tf @@ -0,0 +1,11 @@ +resource "planetscale_password" "example" { + organization = "example" + database = "example_db" + branch = "main" + name = "a-password-for-antoine" +} + +output "password" { + sensitive = true + value = planetscale_password.example +} \ No newline at end of file diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..d2561c5 --- /dev/null +++ b/go.mod @@ -0,0 +1,95 @@ +module github.com/planetscale/terraform-provider-planetscale + +go 1.21 + +require ( + github.com/dave/jennifer v1.7.0 + github.com/go-openapi/loads v0.21.2 + github.com/go-openapi/spec v0.20.9 + github.com/hashicorp/terraform-plugin-docs v0.16.0 + github.com/hashicorp/terraform-plugin-framework v1.3.3 + github.com/hashicorp/terraform-plugin-go v0.18.0 + github.com/hashicorp/terraform-plugin-log v0.9.0 + github.com/hashicorp/terraform-plugin-testing v1.5.1 + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 + github.com/pkg/errors v0.9.1 + github.com/sergi/go-diff v1.2.0 + golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819 + golang.org/x/oauth2 v0.10.0 + golang.org/x/text v0.12.0 +) + +require ( + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.1.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.2 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect + github.com/agext/levenshtein v1.2.2 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/armon/go-radix v1.0.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/bgentry/speakeasy v0.1.0 // indirect + github.com/cloudflare/circl v1.3.3 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/frankban/quicktest v1.14.5 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.4 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/strfmt v0.21.7 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-checkpoint v0.5.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-plugin v1.4.10 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/hc-install v0.5.2 // indirect + github.com/hashicorp/hcl/v2 v2.17.0 // indirect + github.com/hashicorp/logutils v1.0.0 // indirect + github.com/hashicorp/terraform-exec v0.18.1 // indirect + github.com/hashicorp/terraform-json v0.17.1 // indirect + github.com/hashicorp/terraform-plugin-sdk/v2 v2.28.0 // indirect + github.com/hashicorp/terraform-registry-address v0.2.1 // indirect + github.com/hashicorp/terraform-svchost v0.1.1 // indirect + github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect + github.com/huandu/xstrings v1.3.2 // indirect + github.com/imdario/mergo v0.3.13 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mitchellh/cli v1.1.5 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/oklog/run v1.0.0 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/posener/complete v1.2.3 // indirect + github.com/russross/blackfriday v1.6.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect + github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + github.com/zclconf/go-cty v1.13.3 // indirect + go.mongodb.org/mongo-driver v1.11.3 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/mod v0.11.0 // indirect + golang.org/x/net v0.12.0 // indirect + golang.org/x/sys v0.11.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.56.1 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..3b64aea --- /dev/null +++ b/go.sum @@ -0,0 +1,334 @@ +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= +github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= +github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= +github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= +github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= +github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= +github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= +github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= +github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/dave/jennifer v1.7.0 h1:uRbSBH9UTS64yXbh4FrMHfgfY762RD+C7bUPKODpSJE= +github.com/dave/jennifer v1.7.0/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= +github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= +github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= +github.com/go-git/go-git/v5 v5.6.1 h1:q4ZRqQl4pR/ZJHc1L5CFjGA1a10u76aV1iC+nh+bHsk= +github.com/go-git/go-git/v5 v5.6.1/go.mod h1:mvyoL6Unz0PiTQrGQfSfiLFhBH1c1e84ylC2MDs4ee8= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= +github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= +github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.10 h1:xUbmA4jC6Dq163/fWcp8P3JuHilrHHMLNRxzGQJ9hNk= +github.com/hashicorp/go-plugin v1.4.10/go.mod h1:6/1TEzT0eQznvI/gV2CM29DLSkAK/e58mUWKVsPaph0= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hc-install v0.5.2 h1:SfwMFnEXVVirpwkDuSF5kymUOhrUxrTq3udEseZdOD0= +github.com/hashicorp/hc-install v0.5.2/go.mod h1:9QISwe6newMWIfEiXpzuu1k9HAGtQYgnSH8H9T8wmoI= +github.com/hashicorp/hcl/v2 v2.17.0 h1:z1XvSUyXd1HP10U4lrLg5e0JMVz6CPaJvAgxM0KNZVY= +github.com/hashicorp/hcl/v2 v2.17.0/go.mod h1:gJyW2PTShkJqQBKpAmPO3yxMxIuoXkOF2TpqXzrQyx4= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/terraform-exec v0.18.1 h1:LAbfDvNQU1l0NOQlTuudjczVhHj061fNX5H8XZxHlH4= +github.com/hashicorp/terraform-exec v0.18.1/go.mod h1:58wg4IeuAJ6LVsLUeD2DWZZoc/bYi6dzhLHzxM41980= +github.com/hashicorp/terraform-json v0.17.1 h1:eMfvh/uWggKmY7Pmb3T85u86E2EQg6EQHgyRwf3RkyA= +github.com/hashicorp/terraform-json v0.17.1/go.mod h1:Huy6zt6euxaY9knPAFKjUITn8QxUFIe9VuSzb4zn/0o= +github.com/hashicorp/terraform-plugin-docs v0.16.0 h1:UmxFr3AScl6Wged84jndJIfFccGyBZn52KtMNsS12dI= +github.com/hashicorp/terraform-plugin-docs v0.16.0/go.mod h1:M3ZrlKBJAbPMtNOPwHicGi1c+hZUh7/g0ifT/z7TVfA= +github.com/hashicorp/terraform-plugin-framework v1.3.3 h1:D18BlA8gdV4+W8WKhUqxudiYomPZHv94FFzyoSCKC8Q= +github.com/hashicorp/terraform-plugin-framework v1.3.3/go.mod h1:2gGDpWiTI0irr9NSTLFAKlTi6KwGti3AoU19rFqU30o= +github.com/hashicorp/terraform-plugin-go v0.18.0 h1:IwTkOS9cOW1ehLd/rG0y+u/TGLK9y6fGoBjXVUquzpE= +github.com/hashicorp/terraform-plugin-go v0.18.0/go.mod h1:l7VK+2u5Kf2y+A+742GX0ouLut3gttudmvMgN0PA74Y= +github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= +github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.28.0 h1:gY4SG34ANc6ZSeWEKC9hDTChY0ZiN+Myon17fSA0Xgc= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.28.0/go.mod h1:deXEw/iJXtJxNV9d1c/OVJrvL7Zh0a++v7rzokW6wVY= +github.com/hashicorp/terraform-plugin-testing v1.5.1 h1:T4aQh9JAhmWo4+t1A7x+rnxAJHCDIYW9kXyo4sVO92c= +github.com/hashicorp/terraform-plugin-testing v1.5.1/go.mod h1:dg8clO6K59rZ8w9EshBmDp1CxTIPu3yA4iaDpX1h5u0= +github.com/hashicorp/terraform-registry-address v0.2.1 h1:QuTf6oJ1+WSflJw6WYOHhLgwUiQ0FrROpHPYFtwTYWM= +github.com/hashicorp/terraform-registry-address v0.2.1/go.mod h1:BSE9fIFzp0qWsJUUyGquo4ldV9k2n+psif6NYkBRS3Y= +github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= +github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng= +github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0= +github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/zclconf/go-cty v1.13.3 h1:m+b9q3YDbg6Bec5rr+KGy1MzEVzY/jC2X+YX4yqKtHI= +github.com/zclconf/go-cty v1.13.3/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= +go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819 h1:EDuYyU/MkFXllv9QF9819VlI9a4tzGuCbhG0ExK9o1U= +golang.org/x/exp v0.0.0-20230809150735-7b3493d9a819/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/client/planetscale/planetscale.go b/internal/client/planetscale/planetscale.go new file mode 100644 index 0000000..65d69fb --- /dev/null +++ b/internal/client/planetscale/planetscale.go @@ -0,0 +1,4012 @@ +// Code generated by `github.com/planetscale/terraform-provider-planetscale/internal/cmd/client_codegen` DO NOT EDIT +package planetscale + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +type Client struct { + httpCl *http.Client + baseURL *url.URL +} + +func NewClient(httpCl *http.Client, baseURL *url.URL) *Client { + if baseURL == nil { + baseURL = &url.URL{Scheme: "https", Host: "api.planetscale.com", Path: "/v1"} + } + if !strings.HasSuffix(baseURL.Path, "/") { + baseURL.Path = baseURL.Path + "/" + } + return &Client{httpCl: httpCl, baseURL: baseURL} +} + +type ErrorResponse struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (err *ErrorResponse) Error() string { + return fmt.Sprintf("error %s: %s", err.Code, err.Message) +} + +type Actor struct { + AvatarUrl string `json:"avatar_url" tfsdk:"avatar_url"` + DisplayName string `json:"display_name" tfsdk:"display_name"` + Id string `json:"id" tfsdk:"id"` +} +type Backup struct { + Actor Actor `json:"actor" tfsdk:"actor"` + BackupPolicy BackupPolicy `json:"backup_policy" tfsdk:"backup_policy"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + EstimatedStorageCost string `json:"estimated_storage_cost" tfsdk:"estimated_storage_cost"` + Id string `json:"id" tfsdk:"id"` + Name string `json:"name" tfsdk:"name"` + Required bool `json:"required" tfsdk:"required"` + RestoredBranches *[]string `json:"restored_branches,omitempty" tfsdk:"restored_branches"` + SchemaSnapshot SchemaSnapshot `json:"schema_snapshot" tfsdk:"schema_snapshot"` + Size float64 `json:"size" tfsdk:"size"` + State string `json:"state" tfsdk:"state"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type BackupPolicy struct { + CreatedAt string `json:"created_at" tfsdk:"created_at"` + FrequencyUnit string `json:"frequency_unit" tfsdk:"frequency_unit"` + FrequencyValue float64 `json:"frequency_value" tfsdk:"frequency_value"` + Id string `json:"id" tfsdk:"id"` + LastRanAt string `json:"last_ran_at" tfsdk:"last_ran_at"` + Name string `json:"name" tfsdk:"name"` + NextRunAt string `json:"next_run_at" tfsdk:"next_run_at"` + RetentionUnit string `json:"retention_unit" tfsdk:"retention_unit"` + RetentionValue float64 `json:"retention_value" tfsdk:"retention_value"` + ScheduleDay string `json:"schedule_day" tfsdk:"schedule_day"` + ScheduleWeek string `json:"schedule_week" tfsdk:"schedule_week"` + Target string `json:"target" tfsdk:"target"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type Branch struct { + AccessHostUrl *string `json:"access_host_url,omitempty" tfsdk:"access_host_url"` + Actor *Actor `json:"actor,omitempty" tfsdk:"actor"` + ClusterRateName string `json:"cluster_rate_name" tfsdk:"cluster_rate_name"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + HtmlUrl string `json:"html_url" tfsdk:"html_url"` + Id string `json:"id" tfsdk:"id"` + InitialRestoreId *string `json:"initial_restore_id,omitempty" tfsdk:"initial_restore_id"` + MysqlAddress string `json:"mysql_address" tfsdk:"mysql_address"` + MysqlEdgeAddress string `json:"mysql_edge_address" tfsdk:"mysql_edge_address"` + Name string `json:"name" tfsdk:"name"` + ParentBranch *string `json:"parent_branch,omitempty" tfsdk:"parent_branch"` + Production bool `json:"production" tfsdk:"production"` + Ready bool `json:"ready" tfsdk:"ready"` + Region *Region `json:"region,omitempty" tfsdk:"region"` + RestoreChecklistCompletedAt *string `json:"restore_checklist_completed_at,omitempty" tfsdk:"restore_checklist_completed_at"` + RestoredFromBranch *RestoredFromBranch `json:"restored_from_branch,omitempty" tfsdk:"restored_from_branch"` + SchemaLastUpdatedAt string `json:"schema_last_updated_at" tfsdk:"schema_last_updated_at"` + ShardCount *float64 `json:"shard_count,omitempty" tfsdk:"shard_count"` + Sharded bool `json:"sharded" tfsdk:"sharded"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type BranchForPassword struct { + AccessHostUrl string `json:"access_host_url" tfsdk:"access_host_url"` + Id string `json:"id" tfsdk:"id"` + MysqlEdgeAddress string `json:"mysql_edge_address" tfsdk:"mysql_edge_address"` + Name string `json:"name" tfsdk:"name"` + Production bool `json:"production" tfsdk:"production"` +} +type CreatedOauthToken struct { + ActorDisplayName *string `json:"actor_display_name,omitempty" tfsdk:"actor_display_name"` + ActorId *string `json:"actor_id,omitempty" tfsdk:"actor_id"` + DisplayName *string `json:"display_name,omitempty" tfsdk:"display_name"` + Name *string `json:"name,omitempty" tfsdk:"name"` + PlainTextRefreshToken *string `json:"plain_text_refresh_token,omitempty" tfsdk:"plain_text_refresh_token"` + ServiceTokenAccesses *[]string `json:"service_token_accesses,omitempty" tfsdk:"service_token_accesses"` + Token *string `json:"token,omitempty" tfsdk:"token"` +} +type DataImport struct { + DataSource DataSource `json:"data_source" tfsdk:"data_source"` + FinishedAt string `json:"finished_at" tfsdk:"finished_at"` + ImportCheckErrors string `json:"import_check_errors" tfsdk:"import_check_errors"` + StartedAt string `json:"started_at" tfsdk:"started_at"` + State string `json:"state" tfsdk:"state"` +} +type DataSource struct { + Database string `json:"database" tfsdk:"database"` + Hostname string `json:"hostname" tfsdk:"hostname"` + Port string `json:"port" tfsdk:"port"` +} +type Database struct { + AllowDataBranching bool `json:"allow_data_branching" tfsdk:"allow_data_branching"` + AtBackupRestoreBranchesLimit bool `json:"at_backup_restore_branches_limit" tfsdk:"at_backup_restore_branches_limit"` + AtDevelopmentBranchLimit bool `json:"at_development_branch_limit" tfsdk:"at_development_branch_limit"` + AutomaticMigrations *bool `json:"automatic_migrations,omitempty" tfsdk:"automatic_migrations"` + BranchesCount float64 `json:"branches_count" tfsdk:"branches_count"` + BranchesUrl string `json:"branches_url" tfsdk:"branches_url"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + DataImport *DataImport `json:"data_import,omitempty" tfsdk:"data_import"` + DefaultBranch string `json:"default_branch" tfsdk:"default_branch"` + DefaultBranchReadOnlyRegionsCount float64 `json:"default_branch_read_only_regions_count" tfsdk:"default_branch_read_only_regions_count"` + DefaultBranchShardCount float64 `json:"default_branch_shard_count" tfsdk:"default_branch_shard_count"` + DefaultBranchTableCount float64 `json:"default_branch_table_count" tfsdk:"default_branch_table_count"` + DevelopmentBranchesCount float64 `json:"development_branches_count" tfsdk:"development_branches_count"` + HtmlUrl string `json:"html_url" tfsdk:"html_url"` + Id string `json:"id" tfsdk:"id"` + InsightsRawQueries bool `json:"insights_raw_queries" tfsdk:"insights_raw_queries"` + IssuesCount float64 `json:"issues_count" tfsdk:"issues_count"` + MigrationFramework *string `json:"migration_framework,omitempty" tfsdk:"migration_framework"` + MigrationTableName *string `json:"migration_table_name,omitempty" tfsdk:"migration_table_name"` + MultipleAdminsRequiredForDeletion bool `json:"multiple_admins_required_for_deletion" tfsdk:"multiple_admins_required_for_deletion"` + Name string `json:"name" tfsdk:"name"` + Plan string `json:"plan" tfsdk:"plan"` + ProductionBranchWebConsole bool `json:"production_branch_web_console" tfsdk:"production_branch_web_console"` + ProductionBranchesCount float64 `json:"production_branches_count" tfsdk:"production_branches_count"` + Ready bool `json:"ready" tfsdk:"ready"` + Region Region `json:"region" tfsdk:"region"` + RequireApprovalForDeploy bool `json:"require_approval_for_deploy" tfsdk:"require_approval_for_deploy"` + RestrictBranchRegion bool `json:"restrict_branch_region" tfsdk:"restrict_branch_region"` + SchemaLastUpdatedAt *string `json:"schema_last_updated_at,omitempty" tfsdk:"schema_last_updated_at"` + Sharded bool `json:"sharded" tfsdk:"sharded"` + State string `json:"state" tfsdk:"state"` + Type string `json:"type" tfsdk:"type"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` + Url string `json:"url" tfsdk:"url"` +} +type DeployOperation struct { + CanDropData bool `json:"can_drop_data" tfsdk:"can_drop_data"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + DdlStatement string `json:"ddl_statement" tfsdk:"ddl_statement"` + DeployErrorDocsUrl string `json:"deploy_error_docs_url" tfsdk:"deploy_error_docs_url"` + DeployErrors []string `json:"deploy_errors" tfsdk:"deploy_errors"` + EtaSeconds float64 `json:"eta_seconds" tfsdk:"eta_seconds"` + Id string `json:"id" tfsdk:"id"` + KeyspaceName string `json:"keyspace_name" tfsdk:"keyspace_name"` + OperationName string `json:"operation_name" tfsdk:"operation_name"` + ProgressPercentage float64 `json:"progress_percentage" tfsdk:"progress_percentage"` + State string `json:"state" tfsdk:"state"` + SyntaxHighlightedDdl string `json:"syntax_highlighted_ddl" tfsdk:"syntax_highlighted_ddl"` + TableName string `json:"table_name" tfsdk:"table_name"` + TableRecentlyUsed bool `json:"table_recently_used" tfsdk:"table_recently_used"` + TableRecentlyUsedAt string `json:"table_recently_used_at" tfsdk:"table_recently_used_at"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type DeployRequest struct { + Actor Actor `json:"actor" tfsdk:"actor"` + Approved bool `json:"approved" tfsdk:"approved"` + Branch string `json:"branch" tfsdk:"branch"` + BranchDeleted bool `json:"branch_deleted" tfsdk:"branch_deleted"` + BranchDeletedAt string `json:"branch_deleted_at" tfsdk:"branch_deleted_at"` + BranchDeletedBy Actor `json:"branch_deleted_by" tfsdk:"branch_deleted_by"` + ClosedAt string `json:"closed_at" tfsdk:"closed_at"` + ClosedBy Actor `json:"closed_by" tfsdk:"closed_by"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + DeployedAt string `json:"deployed_at" tfsdk:"deployed_at"` + DeploymentState string `json:"deployment_state" tfsdk:"deployment_state"` + HtmlBody string `json:"html_body" tfsdk:"html_body"` + HtmlUrl string `json:"html_url" tfsdk:"html_url"` + Id string `json:"id" tfsdk:"id"` + IntoBranch string `json:"into_branch" tfsdk:"into_branch"` + IntoBranchShardCount float64 `json:"into_branch_shard_count" tfsdk:"into_branch_shard_count"` + IntoBranchSharded bool `json:"into_branch_sharded" tfsdk:"into_branch_sharded"` + Notes string `json:"notes" tfsdk:"notes"` + Number float64 `json:"number" tfsdk:"number"` + State string `json:"state" tfsdk:"state"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type DeployRequestWithDeployment struct { + Actor Actor `json:"actor" tfsdk:"actor"` + Approved bool `json:"approved" tfsdk:"approved"` + Branch string `json:"branch" tfsdk:"branch"` + BranchDeleted bool `json:"branch_deleted" tfsdk:"branch_deleted"` + BranchDeletedAt string `json:"branch_deleted_at" tfsdk:"branch_deleted_at"` + BranchDeletedBy Actor `json:"branch_deleted_by" tfsdk:"branch_deleted_by"` + ClosedAt string `json:"closed_at" tfsdk:"closed_at"` + ClosedBy Actor `json:"closed_by" tfsdk:"closed_by"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + DeployedAt string `json:"deployed_at" tfsdk:"deployed_at"` + Deployment Deployment `json:"deployment" tfsdk:"deployment"` + DeploymentState string `json:"deployment_state" tfsdk:"deployment_state"` + HtmlBody string `json:"html_body" tfsdk:"html_body"` + HtmlUrl string `json:"html_url" tfsdk:"html_url"` + Id string `json:"id" tfsdk:"id"` + IntoBranch string `json:"into_branch" tfsdk:"into_branch"` + IntoBranchShardCount float64 `json:"into_branch_shard_count" tfsdk:"into_branch_shard_count"` + IntoBranchSharded bool `json:"into_branch_sharded" tfsdk:"into_branch_sharded"` + Notes string `json:"notes" tfsdk:"notes"` + Number float64 `json:"number" tfsdk:"number"` + State string `json:"state" tfsdk:"state"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type DeployReview struct { + Actor Actor `json:"actor" tfsdk:"actor"` + Body string `json:"body" tfsdk:"body"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + HtmlBody string `json:"html_body" tfsdk:"html_body"` + Id string `json:"id" tfsdk:"id"` + State string `json:"state" tfsdk:"state"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type Deployment struct { + AutoCutover bool `json:"auto_cutover" tfsdk:"auto_cutover"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + CutoverAt *string `json:"cutover_at,omitempty" tfsdk:"cutover_at"` + CutoverExpiring bool `json:"cutover_expiring" tfsdk:"cutover_expiring"` + DeployCheckErrors *string `json:"deploy_check_errors,omitempty" tfsdk:"deploy_check_errors"` + FinishedAt *string `json:"finished_at,omitempty" tfsdk:"finished_at"` + Id string `json:"id" tfsdk:"id"` + QueuedAt *string `json:"queued_at,omitempty" tfsdk:"queued_at"` + ReadyToCutoverAt *string `json:"ready_to_cutover_at,omitempty" tfsdk:"ready_to_cutover_at"` + StartedAt *string `json:"started_at,omitempty" tfsdk:"started_at"` + State string `json:"state" tfsdk:"state"` + SubmittedAt string `json:"submitted_at" tfsdk:"submitted_at"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type Features struct { + Insights *bool `json:"insights,omitempty" tfsdk:"insights"` + SingleTenancy *bool `json:"single_tenancy,omitempty" tfsdk:"single_tenancy"` + Sso *bool `json:"sso,omitempty" tfsdk:"sso"` +} +type Flags struct { + ExampleFlag *string `json:"example_flag,omitempty" tfsdk:"example_flag"` +} +type LintError struct { + AutoIncrementColumnNames []string `json:"auto_increment_column_names" tfsdk:"auto_increment_column_names"` + CharsetName string `json:"charset_name" tfsdk:"charset_name"` + CheckConstraintName string `json:"check_constraint_name" tfsdk:"check_constraint_name"` + ColumnName string `json:"column_name" tfsdk:"column_name"` + DocsUrl string `json:"docs_url" tfsdk:"docs_url"` + EngineName string `json:"engine_name" tfsdk:"engine_name"` + EnumValue string `json:"enum_value" tfsdk:"enum_value"` + ErrorDescription string `json:"error_description" tfsdk:"error_description"` + ForeignKeyColumnNames []string `json:"foreign_key_column_names" tfsdk:"foreign_key_column_names"` + JsonPath string `json:"json_path" tfsdk:"json_path"` + KeyspaceName string `json:"keyspace_name" tfsdk:"keyspace_name"` + LintError string `json:"lint_error" tfsdk:"lint_error"` + PartitionName string `json:"partition_name" tfsdk:"partition_name"` + PartitioningType string `json:"partitioning_type" tfsdk:"partitioning_type"` + SubjectType string `json:"subject_type" tfsdk:"subject_type"` + TableName string `json:"table_name" tfsdk:"table_name"` + VindexName string `json:"vindex_name" tfsdk:"vindex_name"` +} +type OauthAccessesByResource struct { + Branch OauthBranchAccesses `json:"branch" tfsdk:"branch"` + Database OauthDatabaseAccesses `json:"database" tfsdk:"database"` + Organization OauthOrganizationAccesses `json:"organization" tfsdk:"organization"` + User OauthUserAccesses `json:"user" tfsdk:"user"` +} +type OauthApplication struct { + Avatar *string `json:"avatar,omitempty" tfsdk:"avatar"` + ClientId string `json:"client_id" tfsdk:"client_id"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + Domain string `json:"domain" tfsdk:"domain"` + Id string `json:"id" tfsdk:"id"` + Name string `json:"name" tfsdk:"name"` + RedirectUri string `json:"redirect_uri" tfsdk:"redirect_uri"` + Scopes []string `json:"scopes" tfsdk:"scopes"` + Tokens float64 `json:"tokens" tfsdk:"tokens"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type OauthBranchAccesses struct { + Accesses []string `json:"accesses" tfsdk:"accesses"` + Branches []string `json:"branches" tfsdk:"branches"` +} +type OauthDatabaseAccesses struct { + Accesses []string `json:"accesses" tfsdk:"accesses"` + Databases []string `json:"databases" tfsdk:"databases"` +} +type OauthOrganizationAccesses struct { + Accesses []string `json:"accesses" tfsdk:"accesses"` + Organizations []string `json:"organizations" tfsdk:"organizations"` +} +type OauthToken struct { + ActorDisplayName string `json:"actor_display_name" tfsdk:"actor_display_name"` + ActorId string `json:"actor_id" tfsdk:"actor_id"` + ActorType string `json:"actor_type" tfsdk:"actor_type"` + AvatarUrl string `json:"avatar_url" tfsdk:"avatar_url"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + DisplayName string `json:"display_name" tfsdk:"display_name"` + ExpiresAt string `json:"expires_at" tfsdk:"expires_at"` + Id string `json:"id" tfsdk:"id"` + LastUsedAt string `json:"last_used_at" tfsdk:"last_used_at"` + Name string `json:"name" tfsdk:"name"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type OauthTokenWithDetails struct { + ActorDisplayName string `json:"actor_display_name" tfsdk:"actor_display_name"` + ActorId string `json:"actor_id" tfsdk:"actor_id"` + ActorType string `json:"actor_type" tfsdk:"actor_type"` + AvatarUrl string `json:"avatar_url" tfsdk:"avatar_url"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + DisplayName string `json:"display_name" tfsdk:"display_name"` + ExpiresAt string `json:"expires_at" tfsdk:"expires_at"` + Id string `json:"id" tfsdk:"id"` + LastUsedAt string `json:"last_used_at" tfsdk:"last_used_at"` + Name string `json:"name" tfsdk:"name"` + OauthAccessesByResource OauthAccessesByResource `json:"oauth_accesses_by_resource" tfsdk:"oauth_accesses_by_resource"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type OauthUserAccesses struct { + Accesses []string `json:"accesses" tfsdk:"accesses"` + Users []string `json:"users" tfsdk:"users"` +} +type Organization struct { + AdminOnlyProductionAccess bool `json:"admin_only_production_access" tfsdk:"admin_only_production_access"` + BillingEmail *string `json:"billing_email,omitempty" tfsdk:"billing_email"` + CanCreateDatabases bool `json:"can_create_databases" tfsdk:"can_create_databases"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + DatabaseCount float64 `json:"database_count" tfsdk:"database_count"` + Features *Features `json:"features,omitempty" tfsdk:"features"` + Flags *Flags `json:"flags,omitempty" tfsdk:"flags"` + FreeDatabasesRemaining float64 `json:"free_databases_remaining" tfsdk:"free_databases_remaining"` + HasPastDueInvoices bool `json:"has_past_due_invoices" tfsdk:"has_past_due_invoices"` + Id string `json:"id" tfsdk:"id"` + IdpManagedRoles bool `json:"idp_managed_roles" tfsdk:"idp_managed_roles"` + Name string `json:"name" tfsdk:"name"` + Plan string `json:"plan" tfsdk:"plan"` + SingleTenancy bool `json:"single_tenancy" tfsdk:"single_tenancy"` + SleepingDatabaseCount float64 `json:"sleeping_database_count" tfsdk:"sleeping_database_count"` + Sso bool `json:"sso" tfsdk:"sso"` + SsoDirectory bool `json:"sso_directory" tfsdk:"sso_directory"` + SsoPortalUrl *string `json:"sso_portal_url,omitempty" tfsdk:"sso_portal_url"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` + ValidBillingInfo bool `json:"valid_billing_info" tfsdk:"valid_billing_info"` +} +type Password struct { + AccessHostUrl string `json:"access_host_url" tfsdk:"access_host_url"` + Actor *Actor `json:"actor,omitempty" tfsdk:"actor"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + DatabaseBranch BranchForPassword `json:"database_branch" tfsdk:"database_branch"` + DeletedAt *string `json:"deleted_at,omitempty" tfsdk:"deleted_at"` + ExpiresAt *string `json:"expires_at,omitempty" tfsdk:"expires_at"` + Id string `json:"id" tfsdk:"id"` + Name string `json:"name" tfsdk:"name"` + Region *Region `json:"region,omitempty" tfsdk:"region"` + Renewable bool `json:"renewable" tfsdk:"renewable"` + Role string `json:"role" tfsdk:"role"` + TtlSeconds float64 `json:"ttl_seconds" tfsdk:"ttl_seconds"` + Username *string `json:"username,omitempty" tfsdk:"username"` +} +type PasswordWithPlaintext struct { + AccessHostUrl string `json:"access_host_url" tfsdk:"access_host_url"` + Actor *Actor `json:"actor,omitempty" tfsdk:"actor"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + DatabaseBranch BranchForPassword `json:"database_branch" tfsdk:"database_branch"` + DeletedAt *string `json:"deleted_at,omitempty" tfsdk:"deleted_at"` + ExpiresAt *string `json:"expires_at,omitempty" tfsdk:"expires_at"` + Id string `json:"id" tfsdk:"id"` + Name string `json:"name" tfsdk:"name"` + PlainText string `json:"plain_text" tfsdk:"plain_text"` + Region *Region `json:"region,omitempty" tfsdk:"region"` + Renewable bool `json:"renewable" tfsdk:"renewable"` + Role string `json:"role" tfsdk:"role"` + TtlSeconds float64 `json:"ttl_seconds" tfsdk:"ttl_seconds"` + Username *string `json:"username,omitempty" tfsdk:"username"` +} +type QueuedDeployRequest struct { + AutoCutover bool `json:"auto_cutover" tfsdk:"auto_cutover"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + CutoverAt *string `json:"cutover_at,omitempty" tfsdk:"cutover_at"` + CutoverExpiring bool `json:"cutover_expiring" tfsdk:"cutover_expiring"` + DeployCheckErrors *string `json:"deploy_check_errors,omitempty" tfsdk:"deploy_check_errors"` + FinishedAt *string `json:"finished_at,omitempty" tfsdk:"finished_at"` + Id string `json:"id" tfsdk:"id"` + QueuedAt *string `json:"queued_at,omitempty" tfsdk:"queued_at"` + ReadyToCutoverAt *string `json:"ready_to_cutover_at,omitempty" tfsdk:"ready_to_cutover_at"` + StartedAt *string `json:"started_at,omitempty" tfsdk:"started_at"` + State string `json:"state" tfsdk:"state"` + SubmittedAt string `json:"submitted_at" tfsdk:"submitted_at"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type ReadOnlyRegion struct { + Actor Actor `json:"actor" tfsdk:"actor"` + CreatedAt string `json:"created_at" tfsdk:"created_at"` + DisplayName string `json:"display_name" tfsdk:"display_name"` + Id string `json:"id" tfsdk:"id"` + Ready bool `json:"ready" tfsdk:"ready"` + ReadyAt string `json:"ready_at" tfsdk:"ready_at"` + Region Region `json:"region" tfsdk:"region"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type Region struct { + DisplayName string `json:"display_name" tfsdk:"display_name"` + Enabled bool `json:"enabled" tfsdk:"enabled"` + Id string `json:"id" tfsdk:"id"` + Location string `json:"location" tfsdk:"location"` + Provider string `json:"provider" tfsdk:"provider"` + PublicIpAddresses []string `json:"public_ip_addresses" tfsdk:"public_ip_addresses"` + Slug string `json:"slug" tfsdk:"slug"` +} +type RestoredFromBranch struct { + CreatedAt string `json:"created_at" tfsdk:"created_at"` + DeletedAt string `json:"deleted_at" tfsdk:"deleted_at"` + Id string `json:"id" tfsdk:"id"` + Name string `json:"name" tfsdk:"name"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` +} +type SchemaSnapshot struct { + CreatedAt string `json:"created_at" tfsdk:"created_at"` + Id string `json:"id" tfsdk:"id"` + Name string `json:"name" tfsdk:"name"` + UpdatedAt string `json:"updated_at" tfsdk:"updated_at"` + Url string `json:"url" tfsdk:"url"` +} +type TableSchema struct { + Html string `json:"html" tfsdk:"html"` + Name string `json:"name" tfsdk:"name"` + Raw string `json:"raw" tfsdk:"raw"` +} +type User struct { + AvatarUrl *string `json:"avatar_url,omitempty" tfsdk:"avatar_url"` + CreatedAt *string `json:"created_at,omitempty" tfsdk:"created_at"` + DefaultOrganizationId *string `json:"default_organization_id,omitempty" tfsdk:"default_organization_id"` + DirectoryManaged *bool `json:"directory_managed,omitempty" tfsdk:"directory_managed"` + DisplayName *string `json:"display_name,omitempty" tfsdk:"display_name"` + Email *string `json:"email,omitempty" tfsdk:"email"` + EmailVerified *bool `json:"email_verified,omitempty" tfsdk:"email_verified"` + Id *string `json:"id,omitempty" tfsdk:"id"` + Managed *bool `json:"managed,omitempty" tfsdk:"managed"` + Name *string `json:"name,omitempty" tfsdk:"name"` + Sso *bool `json:"sso,omitempty" tfsdk:"sso"` + TwoFactorAuthConfigured *bool `json:"two_factor_auth_configured,omitempty" tfsdk:"two_factor_auth_configured"` + UpdatedAt *string `json:"updated_at,omitempty" tfsdk:"updated_at"` +} +type ListOrganizationsRes struct { + Data []Organization `json:"data" tfsdk:"data"` +} +type ListOrganizationsRes401 struct { + *ErrorResponse +} +type ListOrganizationsRes403 struct { + *ErrorResponse +} +type ListOrganizationsRes404 struct { + *ErrorResponse +} +type ListOrganizationsRes500 struct { + *ErrorResponse +} + +func (cl *Client) ListOrganizations(ctx context.Context, page *int, perPage *int) (res200 *ListOrganizationsRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations"}) + q := u.Query() + if page != nil { + q.Set("page", strconv.Itoa(*page)) + } + if perPage != nil { + q.Set("per_page", strconv.Itoa(*perPage)) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(ListOrganizationsRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(ListOrganizationsRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(ListOrganizationsRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(ListOrganizationsRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(ListOrganizationsRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type GetOrganizationRes struct { + Organization +} +type GetOrganizationRes401 struct { + *ErrorResponse +} +type GetOrganizationRes403 struct { + *ErrorResponse +} +type GetOrganizationRes404 struct { + *ErrorResponse +} +type GetOrganizationRes500 struct { + *ErrorResponse +} + +func (cl *Client) GetOrganization(ctx context.Context, name string) (res200 *GetOrganizationRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + name}) + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(GetOrganizationRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(GetOrganizationRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(GetOrganizationRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(GetOrganizationRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(GetOrganizationRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type UpdateOrganizationReq struct { + BillingEmail *string `json:"billing_email,omitempty" tfsdk:"billing_email"` + IdpManagedRoles *bool `json:"idp_managed_roles,omitempty" tfsdk:"idp_managed_roles"` + RequireAdminForProductionAccess *bool `json:"require_admin_for_production_access,omitempty" tfsdk:"require_admin_for_production_access"` +} +type UpdateOrganizationRes struct { + Organization +} +type UpdateOrganizationRes401 struct { + *ErrorResponse +} +type UpdateOrganizationRes403 struct { + *ErrorResponse +} +type UpdateOrganizationRes404 struct { + *ErrorResponse +} +type UpdateOrganizationRes500 struct { + *ErrorResponse +} + +func (cl *Client) UpdateOrganization(ctx context.Context, name string, req UpdateOrganizationReq) (res200 *UpdateOrganizationRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + name}) + body := bytes.NewBuffer(nil) + if err = json.NewEncoder(body).Encode(req); err != nil { + return res200, err + } + r, err := http.NewRequestWithContext(ctx, "PATCH", u.String(), body) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(UpdateOrganizationRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(UpdateOrganizationRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(UpdateOrganizationRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(UpdateOrganizationRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(UpdateOrganizationRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type ListRegionsForOrganizationRes struct { + Data []Region `json:"data" tfsdk:"data"` +} +type ListRegionsForOrganizationRes401 struct { + *ErrorResponse +} +type ListRegionsForOrganizationRes403 struct { + *ErrorResponse +} +type ListRegionsForOrganizationRes404 struct { + *ErrorResponse +} +type ListRegionsForOrganizationRes500 struct { + *ErrorResponse +} + +func (cl *Client) ListRegionsForOrganization(ctx context.Context, name string, page *int, perPage *int) (res200 *ListRegionsForOrganizationRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + name + "/regions"}) + q := u.Query() + if page != nil { + q.Set("page", strconv.Itoa(*page)) + } + if perPage != nil { + q.Set("per_page", strconv.Itoa(*perPage)) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(ListRegionsForOrganizationRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(ListRegionsForOrganizationRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(ListRegionsForOrganizationRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(ListRegionsForOrganizationRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(ListRegionsForOrganizationRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type ListDatabasesRes struct { + Data []Database `json:"data" tfsdk:"data"` +} +type ListDatabasesRes401 struct { + *ErrorResponse +} +type ListDatabasesRes403 struct { + *ErrorResponse +} +type ListDatabasesRes404 struct { + *ErrorResponse +} +type ListDatabasesRes500 struct { + *ErrorResponse +} + +func (cl *Client) ListDatabases(ctx context.Context, organization string, page *int, perPage *int) (res200 *ListDatabasesRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases"}) + q := u.Query() + if page != nil { + q.Set("page", strconv.Itoa(*page)) + } + if perPage != nil { + q.Set("per_page", strconv.Itoa(*perPage)) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(ListDatabasesRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(ListDatabasesRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(ListDatabasesRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(ListDatabasesRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(ListDatabasesRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type CreateDatabaseReq struct { + ClusterSize *string `json:"cluster_size,omitempty" tfsdk:"cluster_size"` + Name string `json:"name" tfsdk:"name"` + Plan *string `json:"plan,omitempty" tfsdk:"plan"` + Region *string `json:"region,omitempty" tfsdk:"region"` +} +type CreateDatabaseRes struct { + Database +} +type CreateDatabaseRes401 struct { + *ErrorResponse +} +type CreateDatabaseRes403 struct { + *ErrorResponse +} +type CreateDatabaseRes404 struct { + *ErrorResponse +} +type CreateDatabaseRes500 struct { + *ErrorResponse +} + +func (cl *Client) CreateDatabase(ctx context.Context, organization string, req CreateDatabaseReq) (res201 *CreateDatabaseRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases"}) + body := bytes.NewBuffer(nil) + if err = json.NewEncoder(body).Encode(req); err != nil { + return res201, err + } + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), body) + if err != nil { + return res201, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res201, err + } + defer res.Body.Close() + switch res.StatusCode { + case 201: + res201 = new(CreateDatabaseRes) + err = json.NewDecoder(res.Body).Decode(&res201) + case 401: + res401 := new(CreateDatabaseRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(CreateDatabaseRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(CreateDatabaseRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(CreateDatabaseRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res201, err +} + +type ListBranchesRes struct { + Data []Branch `json:"data" tfsdk:"data"` +} +type ListBranchesRes401 struct { + *ErrorResponse +} +type ListBranchesRes403 struct { + *ErrorResponse +} +type ListBranchesRes404 struct { + *ErrorResponse +} +type ListBranchesRes500 struct { + *ErrorResponse +} + +func (cl *Client) ListBranches(ctx context.Context, organization string, database string, page *int, perPage *int) (res200 *ListBranchesRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches"}) + q := u.Query() + if page != nil { + q.Set("page", strconv.Itoa(*page)) + } + if perPage != nil { + q.Set("per_page", strconv.Itoa(*perPage)) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(ListBranchesRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(ListBranchesRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(ListBranchesRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(ListBranchesRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(ListBranchesRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type CreateBranchReq struct { + BackupId *string `json:"backup_id,omitempty" tfsdk:"backup_id"` + Name string `json:"name" tfsdk:"name"` + ParentBranch string `json:"parent_branch" tfsdk:"parent_branch"` +} +type CreateBranchRes struct { + Branch +} +type CreateBranchRes401 struct { + *ErrorResponse +} +type CreateBranchRes403 struct { + *ErrorResponse +} +type CreateBranchRes404 struct { + *ErrorResponse +} +type CreateBranchRes500 struct { + *ErrorResponse +} + +func (cl *Client) CreateBranch(ctx context.Context, organization string, database string, req CreateBranchReq) (res201 *CreateBranchRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches"}) + body := bytes.NewBuffer(nil) + if err = json.NewEncoder(body).Encode(req); err != nil { + return res201, err + } + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), body) + if err != nil { + return res201, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res201, err + } + defer res.Body.Close() + switch res.StatusCode { + case 201: + res201 = new(CreateBranchRes) + err = json.NewDecoder(res.Body).Decode(&res201) + case 401: + res401 := new(CreateBranchRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(CreateBranchRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(CreateBranchRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(CreateBranchRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res201, err +} + +type ListBackupsRes struct { + Data []Backup `json:"data" tfsdk:"data"` +} +type ListBackupsRes401 struct { + *ErrorResponse +} +type ListBackupsRes403 struct { + *ErrorResponse +} +type ListBackupsRes404 struct { + *ErrorResponse +} +type ListBackupsRes500 struct { + *ErrorResponse +} + +func (cl *Client) ListBackups(ctx context.Context, organization string, database string, branch string, page *int, perPage *int) (res200 *ListBackupsRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + branch + "/backups"}) + q := u.Query() + if page != nil { + q.Set("page", strconv.Itoa(*page)) + } + if perPage != nil { + q.Set("per_page", strconv.Itoa(*perPage)) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(ListBackupsRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(ListBackupsRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(ListBackupsRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(ListBackupsRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(ListBackupsRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type CreateBackupReq struct { + Name *string `json:"name,omitempty" tfsdk:"name"` + RetentionUnit *string `json:"retention_unit,omitempty" tfsdk:"retention_unit"` + RetentionValue *float64 `json:"retention_value,omitempty" tfsdk:"retention_value"` +} +type CreateBackupRes struct { + Backup +} +type CreateBackupRes401 struct { + *ErrorResponse +} +type CreateBackupRes403 struct { + *ErrorResponse +} +type CreateBackupRes404 struct { + *ErrorResponse +} +type CreateBackupRes500 struct { + *ErrorResponse +} + +func (cl *Client) CreateBackup(ctx context.Context, organization string, database string, branch string, req CreateBackupReq) (res201 *CreateBackupRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + branch + "/backups"}) + body := bytes.NewBuffer(nil) + if err = json.NewEncoder(body).Encode(req); err != nil { + return res201, err + } + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), body) + if err != nil { + return res201, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res201, err + } + defer res.Body.Close() + switch res.StatusCode { + case 201: + res201 = new(CreateBackupRes) + err = json.NewDecoder(res.Body).Decode(&res201) + case 401: + res401 := new(CreateBackupRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(CreateBackupRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(CreateBackupRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(CreateBackupRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res201, err +} + +type GetBackupRes struct { + Backup +} +type GetBackupRes401 struct { + *ErrorResponse +} +type GetBackupRes403 struct { + *ErrorResponse +} +type GetBackupRes404 struct { + *ErrorResponse +} +type GetBackupRes500 struct { + *ErrorResponse +} + +func (cl *Client) GetBackup(ctx context.Context, organization string, database string, branch string, id string) (res200 *GetBackupRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + branch + "/backups/" + id}) + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(GetBackupRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(GetBackupRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(GetBackupRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(GetBackupRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(GetBackupRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type DeleteBackupRes struct{} +type DeleteBackupRes401 struct { + *ErrorResponse +} +type DeleteBackupRes403 struct { + *ErrorResponse +} +type DeleteBackupRes404 struct { + *ErrorResponse +} +type DeleteBackupRes500 struct { + *ErrorResponse +} + +func (cl *Client) DeleteBackup(ctx context.Context, organization string, database string, branch string, id string) (res204 *DeleteBackupRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + branch + "/backups/" + id}) + r, err := http.NewRequestWithContext(ctx, "DELETE", u.String(), nil) + if err != nil { + return res204, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res204, err + } + defer res.Body.Close() + switch res.StatusCode { + case 204: + res204 = new(DeleteBackupRes) + err = json.NewDecoder(res.Body).Decode(&res204) + case 401: + res401 := new(DeleteBackupRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(DeleteBackupRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(DeleteBackupRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(DeleteBackupRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res204, err +} + +type ListPasswordsRes struct { + Data []Password `json:"data" tfsdk:"data"` +} +type ListPasswordsRes401 struct { + *ErrorResponse +} +type ListPasswordsRes403 struct { + *ErrorResponse +} +type ListPasswordsRes404 struct { + *ErrorResponse +} +type ListPasswordsRes500 struct { + *ErrorResponse +} + +func (cl *Client) ListPasswords(ctx context.Context, organization string, database string, branch string, readOnlyRegionId *string, page *int, perPage *int) (res200 *ListPasswordsRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + branch + "/passwords"}) + q := u.Query() + if readOnlyRegionId != nil { + q.Set("read_only_region_id", *readOnlyRegionId) + } + if page != nil { + q.Set("page", strconv.Itoa(*page)) + } + if perPage != nil { + q.Set("per_page", strconv.Itoa(*perPage)) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(ListPasswordsRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(ListPasswordsRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(ListPasswordsRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(ListPasswordsRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(ListPasswordsRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type CreatePasswordReq struct { + Name *string `json:"name,omitempty" tfsdk:"name"` + Role *string `json:"role,omitempty" tfsdk:"role"` + Ttl *float64 `json:"ttl,omitempty" tfsdk:"ttl"` +} +type CreatePasswordRes struct { + PasswordWithPlaintext +} +type CreatePasswordRes401 struct { + *ErrorResponse +} +type CreatePasswordRes403 struct { + *ErrorResponse +} +type CreatePasswordRes404 struct { + *ErrorResponse +} +type CreatePasswordRes422 struct { + *ErrorResponse +} +type CreatePasswordRes500 struct { + *ErrorResponse +} + +func (cl *Client) CreatePassword(ctx context.Context, organization string, database string, branch string, req CreatePasswordReq) (res201 *CreatePasswordRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + branch + "/passwords"}) + body := bytes.NewBuffer(nil) + if err = json.NewEncoder(body).Encode(req); err != nil { + return res201, err + } + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), body) + if err != nil { + return res201, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res201, err + } + defer res.Body.Close() + switch res.StatusCode { + case 201: + res201 = new(CreatePasswordRes) + err = json.NewDecoder(res.Body).Decode(&res201) + case 401: + res401 := new(CreatePasswordRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(CreatePasswordRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(CreatePasswordRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 422: + res422 := new(CreatePasswordRes422) + err = json.NewDecoder(res.Body).Decode(&res422) + if err == nil { + err = res422 + } + case 500: + res500 := new(CreatePasswordRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res201, err +} + +type GetPasswordRes struct { + Password +} +type GetPasswordRes401 struct { + *ErrorResponse +} +type GetPasswordRes403 struct { + *ErrorResponse +} +type GetPasswordRes404 struct { + *ErrorResponse +} +type GetPasswordRes500 struct { + *ErrorResponse +} + +func (cl *Client) GetPassword(ctx context.Context, organization string, database string, branch string, id string, readOnlyRegionId *string) (res200 *GetPasswordRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + branch + "/passwords/" + id}) + q := u.Query() + if readOnlyRegionId != nil { + q.Set("read_only_region_id", *readOnlyRegionId) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(GetPasswordRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(GetPasswordRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(GetPasswordRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(GetPasswordRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(GetPasswordRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type DeletePasswordRes struct{} +type DeletePasswordRes401 struct { + *ErrorResponse +} +type DeletePasswordRes403 struct { + *ErrorResponse +} +type DeletePasswordRes404 struct { + *ErrorResponse +} +type DeletePasswordRes500 struct { + *ErrorResponse +} + +func (cl *Client) DeletePassword(ctx context.Context, organization string, database string, branch string, id string) (res204 *DeletePasswordRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + branch + "/passwords/" + id}) + r, err := http.NewRequestWithContext(ctx, "DELETE", u.String(), nil) + if err != nil { + return res204, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res204, err + } + defer res.Body.Close() + switch res.StatusCode { + case 204: + res204 = new(DeletePasswordRes) + err = json.NewDecoder(res.Body).Decode(&res204) + case 401: + res401 := new(DeletePasswordRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(DeletePasswordRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(DeletePasswordRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(DeletePasswordRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res204, err +} + +type UpdatePasswordReq struct { + Name string `json:"name" tfsdk:"name"` +} +type UpdatePasswordRes struct { + Password +} +type UpdatePasswordRes401 struct { + *ErrorResponse +} +type UpdatePasswordRes403 struct { + *ErrorResponse +} +type UpdatePasswordRes404 struct { + *ErrorResponse +} +type UpdatePasswordRes500 struct { + *ErrorResponse +} + +func (cl *Client) UpdatePassword(ctx context.Context, organization string, database string, branch string, id string, req UpdatePasswordReq) (res200 *UpdatePasswordRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + branch + "/passwords/" + id}) + body := bytes.NewBuffer(nil) + if err = json.NewEncoder(body).Encode(req); err != nil { + return res200, err + } + r, err := http.NewRequestWithContext(ctx, "PATCH", u.String(), body) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(UpdatePasswordRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(UpdatePasswordRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(UpdatePasswordRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(UpdatePasswordRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(UpdatePasswordRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type RenewPasswordReq struct { + ReadOnlyRegionId *string `json:"read_only_region_id,omitempty" tfsdk:"read_only_region_id"` +} +type RenewPasswordRes struct { + PasswordWithPlaintext +} +type RenewPasswordRes401 struct { + *ErrorResponse +} +type RenewPasswordRes403 struct { + *ErrorResponse +} +type RenewPasswordRes404 struct { + *ErrorResponse +} +type RenewPasswordRes500 struct { + *ErrorResponse +} + +func (cl *Client) RenewPassword(ctx context.Context, organization string, database string, branch string, id string, req RenewPasswordReq) (res200 *RenewPasswordRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + branch + "/passwords/" + id + "/renew"}) + body := bytes.NewBuffer(nil) + if err = json.NewEncoder(body).Encode(req); err != nil { + return res200, err + } + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), body) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(RenewPasswordRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(RenewPasswordRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(RenewPasswordRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(RenewPasswordRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(RenewPasswordRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type GetBranchRes struct { + Branch +} +type GetBranchRes401 struct { + *ErrorResponse +} +type GetBranchRes403 struct { + *ErrorResponse +} +type GetBranchRes404 struct { + *ErrorResponse +} +type GetBranchRes500 struct { + *ErrorResponse +} + +func (cl *Client) GetBranch(ctx context.Context, organization string, database string, name string) (res200 *GetBranchRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + name}) + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(GetBranchRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(GetBranchRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(GetBranchRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(GetBranchRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(GetBranchRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type DeleteBranchRes struct{} +type DeleteBranchRes401 struct { + *ErrorResponse +} +type DeleteBranchRes403 struct { + *ErrorResponse +} +type DeleteBranchRes404 struct { + *ErrorResponse +} +type DeleteBranchRes500 struct { + *ErrorResponse +} + +func (cl *Client) DeleteBranch(ctx context.Context, organization string, database string, name string) (res204 *DeleteBranchRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + name}) + r, err := http.NewRequestWithContext(ctx, "DELETE", u.String(), nil) + if err != nil { + return res204, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res204, err + } + defer res.Body.Close() + switch res.StatusCode { + case 204: + res204 = new(DeleteBranchRes) + err = json.NewDecoder(res.Body).Decode(&res204) + case 401: + res401 := new(DeleteBranchRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(DeleteBranchRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(DeleteBranchRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(DeleteBranchRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res204, err +} + +type DemoteBranchRes struct { + Branch +} +type DemoteBranchRes401 struct { + *ErrorResponse +} +type DemoteBranchRes403 struct { + *ErrorResponse +} +type DemoteBranchRes404 struct { + *ErrorResponse +} +type DemoteBranchRes500 struct { + *ErrorResponse +} + +func (cl *Client) DemoteBranch(ctx context.Context, organization string, database string, name string) (res200 *DemoteBranchRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + name + "/demote"}) + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(DemoteBranchRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(DemoteBranchRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(DemoteBranchRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(DemoteBranchRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(DemoteBranchRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type PromoteBranchRes struct { + Branch +} +type PromoteBranchRes401 struct { + *ErrorResponse +} +type PromoteBranchRes403 struct { + *ErrorResponse +} +type PromoteBranchRes404 struct { + *ErrorResponse +} +type PromoteBranchRes500 struct { + *ErrorResponse +} + +func (cl *Client) PromoteBranch(ctx context.Context, organization string, database string, name string) (res200 *PromoteBranchRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + name + "/promote"}) + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(PromoteBranchRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(PromoteBranchRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(PromoteBranchRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(PromoteBranchRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(PromoteBranchRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type EnableSafeMigrationsForBranchRes struct { + Branch +} +type EnableSafeMigrationsForBranchRes401 struct { + *ErrorResponse +} +type EnableSafeMigrationsForBranchRes403 struct { + *ErrorResponse +} +type EnableSafeMigrationsForBranchRes404 struct { + *ErrorResponse +} +type EnableSafeMigrationsForBranchRes500 struct { + *ErrorResponse +} + +func (cl *Client) EnableSafeMigrationsForBranch(ctx context.Context, organization string, database string, name string) (res200 *EnableSafeMigrationsForBranchRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + name + "/safe-migrations"}) + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(EnableSafeMigrationsForBranchRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(EnableSafeMigrationsForBranchRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(EnableSafeMigrationsForBranchRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(EnableSafeMigrationsForBranchRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(EnableSafeMigrationsForBranchRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type DisableSafeMigrationsForBranchRes struct { + Branch +} +type DisableSafeMigrationsForBranchRes401 struct { + *ErrorResponse +} +type DisableSafeMigrationsForBranchRes403 struct { + *ErrorResponse +} +type DisableSafeMigrationsForBranchRes404 struct { + *ErrorResponse +} +type DisableSafeMigrationsForBranchRes500 struct { + *ErrorResponse +} + +func (cl *Client) DisableSafeMigrationsForBranch(ctx context.Context, organization string, database string, name string) (res200 *DisableSafeMigrationsForBranchRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + name + "/safe-migrations"}) + r, err := http.NewRequestWithContext(ctx, "DELETE", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(DisableSafeMigrationsForBranchRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(DisableSafeMigrationsForBranchRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(DisableSafeMigrationsForBranchRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(DisableSafeMigrationsForBranchRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(DisableSafeMigrationsForBranchRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type GetBranchSchemaRes struct { + Data []TableSchema `json:"data" tfsdk:"data"` +} +type GetBranchSchemaRes401 struct { + *ErrorResponse +} +type GetBranchSchemaRes403 struct { + *ErrorResponse +} +type GetBranchSchemaRes404 struct { + *ErrorResponse +} +type GetBranchSchemaRes500 struct { + *ErrorResponse +} + +func (cl *Client) GetBranchSchema(ctx context.Context, organization string, database string, name string, keyspace *string) (res200 *GetBranchSchemaRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + name + "/schema"}) + q := u.Query() + if keyspace != nil { + q.Set("keyspace", *keyspace) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(GetBranchSchemaRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(GetBranchSchemaRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(GetBranchSchemaRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(GetBranchSchemaRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(GetBranchSchemaRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type LintBranchSchemaRes struct { + Data []LintError `json:"data" tfsdk:"data"` +} +type LintBranchSchemaRes401 struct { + *ErrorResponse +} +type LintBranchSchemaRes403 struct { + *ErrorResponse +} +type LintBranchSchemaRes404 struct { + *ErrorResponse +} +type LintBranchSchemaRes500 struct { + *ErrorResponse +} + +func (cl *Client) LintBranchSchema(ctx context.Context, organization string, database string, name string, page *int, perPage *int) (res200 *LintBranchSchemaRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/branches/" + name + "/schema/lint"}) + q := u.Query() + if page != nil { + q.Set("page", strconv.Itoa(*page)) + } + if perPage != nil { + q.Set("per_page", strconv.Itoa(*perPage)) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(LintBranchSchemaRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(LintBranchSchemaRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(LintBranchSchemaRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(LintBranchSchemaRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(LintBranchSchemaRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type GetTheDeployQueueRes struct { + Data []QueuedDeployRequest `json:"data" tfsdk:"data"` +} + +func (cl *Client) GetTheDeployQueue(ctx context.Context, organization string, database string) (res200 *GetTheDeployQueueRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-queue"}) + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(GetTheDeployQueueRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type ListDeployRequestsRes struct { + Data []DeployRequest `json:"data" tfsdk:"data"` +} + +func (cl *Client) ListDeployRequests(ctx context.Context, organization string, database string, page *int, perPage *int, state *string, branch *string, intoBranch *string) (res200 *ListDeployRequestsRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests"}) + q := u.Query() + if page != nil { + q.Set("page", strconv.Itoa(*page)) + } + if perPage != nil { + q.Set("per_page", strconv.Itoa(*perPage)) + } + if state != nil { + q.Set("state", *state) + } + if branch != nil { + q.Set("branch", *branch) + } + if intoBranch != nil { + q.Set("into_branch", *intoBranch) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(ListDeployRequestsRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type CreateDeployRequestReq struct { + Branch *string `json:"branch,omitempty" tfsdk:"branch"` + IntoBranch *string `json:"into_branch,omitempty" tfsdk:"into_branch"` + Notes *string `json:"notes,omitempty" tfsdk:"notes"` +} +type CreateDeployRequestRes struct { + DeployRequestWithDeployment +} + +func (cl *Client) CreateDeployRequest(ctx context.Context, organization string, database string, req CreateDeployRequestReq) (res201 *CreateDeployRequestRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests"}) + body := bytes.NewBuffer(nil) + if err = json.NewEncoder(body).Encode(req); err != nil { + return res201, err + } + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), body) + if err != nil { + return res201, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res201, err + } + defer res.Body.Close() + switch res.StatusCode { + case 201: + res201 = new(CreateDeployRequestRes) + err = json.NewDecoder(res.Body).Decode(&res201) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res201, err +} + +type GetDeployRequestRes struct { + DeployRequestWithDeployment +} + +func (cl *Client) GetDeployRequest(ctx context.Context, organization string, database string, number string) (res200 *GetDeployRequestRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests/" + number}) + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(GetDeployRequestRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type CloseDeployRequestReq struct { + State *string `json:"state,omitempty" tfsdk:"state"` +} +type CloseDeployRequestRes struct { + DeployRequestWithDeployment +} + +func (cl *Client) CloseDeployRequest(ctx context.Context, organization string, database string, number string, req CloseDeployRequestReq) (res200 *CloseDeployRequestRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests/" + number}) + body := bytes.NewBuffer(nil) + if err = json.NewEncoder(body).Encode(req); err != nil { + return res200, err + } + r, err := http.NewRequestWithContext(ctx, "PATCH", u.String(), body) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(CloseDeployRequestRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type CompleteGatedDeployRequestRes struct { + DeployRequest +} + +func (cl *Client) CompleteGatedDeployRequest(ctx context.Context, organization string, database string, number string) (res200 *CompleteGatedDeployRequestRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests/" + number + "/apply-deploy"}) + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(CompleteGatedDeployRequestRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type UpdateAutoApplyForDeployRequestReq struct { + Enable *bool `json:"enable,omitempty" tfsdk:"enable"` +} +type UpdateAutoApplyForDeployRequestRes struct { + DeployRequest +} + +func (cl *Client) UpdateAutoApplyForDeployRequest(ctx context.Context, organization string, database string, number string, req UpdateAutoApplyForDeployRequestReq) (res200 *UpdateAutoApplyForDeployRequestRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests/" + number + "/auto-apply"}) + body := bytes.NewBuffer(nil) + if err = json.NewEncoder(body).Encode(req); err != nil { + return res200, err + } + r, err := http.NewRequestWithContext(ctx, "PUT", u.String(), body) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(UpdateAutoApplyForDeployRequestRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type CancelQueuedDeployRequestRes struct { + DeployRequest +} + +func (cl *Client) CancelQueuedDeployRequest(ctx context.Context, organization string, database string, number string) (res200 *CancelQueuedDeployRequestRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests/" + number + "/cancel"}) + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(CancelQueuedDeployRequestRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type CompleteErroredDeployRes struct { + DeployRequest +} + +func (cl *Client) CompleteErroredDeploy(ctx context.Context, organization string, database string, number string) (res200 *CompleteErroredDeployRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests/" + number + "/complete-deploy"}) + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(CompleteErroredDeployRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type QueueDeployRequestRes struct { + DeployRequest +} + +func (cl *Client) QueueDeployRequest(ctx context.Context, organization string, database string, number string) (res200 *QueueDeployRequestRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests/" + number + "/deploy"}) + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(QueueDeployRequestRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type GetDeploymentRes struct { + Deployment +} + +func (cl *Client) GetDeployment(ctx context.Context, organization string, database string, number string) (res200 *GetDeploymentRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests/" + number + "/deployment"}) + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(GetDeploymentRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type ListDeployOperationsRes struct { + Data []DeployOperation `json:"data" tfsdk:"data"` +} + +func (cl *Client) ListDeployOperations(ctx context.Context, organization string, database string, number string, page *int, perPage *int) (res200 *ListDeployOperationsRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests/" + number + "/operations"}) + q := u.Query() + if page != nil { + q.Set("page", strconv.Itoa(*page)) + } + if perPage != nil { + q.Set("per_page", strconv.Itoa(*perPage)) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(ListDeployOperationsRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type CompleteRevertRes struct { + DeployRequest +} + +func (cl *Client) CompleteRevert(ctx context.Context, organization string, database string, number string) (res200 *CompleteRevertRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests/" + number + "/revert"}) + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(CompleteRevertRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type ListDeployRequestReviewsRes struct { + Data []DeployReview `json:"data" tfsdk:"data"` +} + +func (cl *Client) ListDeployRequestReviews(ctx context.Context, organization string, database string, number string) (res200 *ListDeployRequestReviewsRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests/" + number + "/reviews"}) + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(ListDeployRequestReviewsRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type ReviewDeployRequestReq struct { + Body *string `json:"body,omitempty" tfsdk:"body"` + State *string `json:"state,omitempty" tfsdk:"state"` +} +type ReviewDeployRequestRes struct { + DeployReview +} + +func (cl *Client) ReviewDeployRequest(ctx context.Context, organization string, database string, number string, req ReviewDeployRequestReq) (res201 *ReviewDeployRequestRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests/" + number + "/reviews"}) + body := bytes.NewBuffer(nil) + if err = json.NewEncoder(body).Encode(req); err != nil { + return res201, err + } + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), body) + if err != nil { + return res201, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res201, err + } + defer res.Body.Close() + switch res.StatusCode { + case 201: + res201 = new(ReviewDeployRequestRes) + err = json.NewDecoder(res.Body).Decode(&res201) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res201, err +} + +type SkipRevertPeriodRes struct { + DeployRequest +} + +func (cl *Client) SkipRevertPeriod(ctx context.Context, organization string, database string, number string) (res200 *SkipRevertPeriodRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + database + "/deploy-requests/" + number + "/skip-revert"}) + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(SkipRevertPeriodRes) + err = json.NewDecoder(res.Body).Decode(&res200) + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type GetDatabaseRes struct { + Database +} +type GetDatabaseRes401 struct { + *ErrorResponse +} +type GetDatabaseRes403 struct { + *ErrorResponse +} +type GetDatabaseRes404 struct { + *ErrorResponse +} +type GetDatabaseRes500 struct { + *ErrorResponse +} + +func (cl *Client) GetDatabase(ctx context.Context, organization string, name string) (res200 *GetDatabaseRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + name}) + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(GetDatabaseRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(GetDatabaseRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(GetDatabaseRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(GetDatabaseRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(GetDatabaseRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type DeleteDatabaseRes struct{} +type DeleteDatabaseRes401 struct { + *ErrorResponse +} +type DeleteDatabaseRes403 struct { + *ErrorResponse +} +type DeleteDatabaseRes404 struct { + *ErrorResponse +} +type DeleteDatabaseRes500 struct { + *ErrorResponse +} + +func (cl *Client) DeleteDatabase(ctx context.Context, organization string, name string) (res204 *DeleteDatabaseRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + name}) + r, err := http.NewRequestWithContext(ctx, "DELETE", u.String(), nil) + if err != nil { + return res204, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res204, err + } + defer res.Body.Close() + switch res.StatusCode { + case 204: + res204 = new(DeleteDatabaseRes) + err = json.NewDecoder(res.Body).Decode(&res204) + case 401: + res401 := new(DeleteDatabaseRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(DeleteDatabaseRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(DeleteDatabaseRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(DeleteDatabaseRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res204, err +} + +type UpdateDatabaseSettingsReq struct { + AllowDataBranching *bool `json:"allow_data_branching,omitempty" tfsdk:"allow_data_branching"` + AutomaticMigrations *bool `json:"automatic_migrations,omitempty" tfsdk:"automatic_migrations"` + DefaultBranch *string `json:"default_branch,omitempty" tfsdk:"default_branch"` + InsightsRawQueries *bool `json:"insights_raw_queries,omitempty" tfsdk:"insights_raw_queries"` + MigrationFramework *string `json:"migration_framework,omitempty" tfsdk:"migration_framework"` + MigrationTableName *string `json:"migration_table_name,omitempty" tfsdk:"migration_table_name"` + ProductionBranchWebConsole *bool `json:"production_branch_web_console,omitempty" tfsdk:"production_branch_web_console"` + RequireApprovalForDeploy *bool `json:"require_approval_for_deploy,omitempty" tfsdk:"require_approval_for_deploy"` + RestrictBranchRegion *bool `json:"restrict_branch_region,omitempty" tfsdk:"restrict_branch_region"` +} +type UpdateDatabaseSettingsRes struct { + Database +} +type UpdateDatabaseSettingsRes401 struct { + *ErrorResponse +} +type UpdateDatabaseSettingsRes403 struct { + *ErrorResponse +} +type UpdateDatabaseSettingsRes404 struct { + *ErrorResponse +} +type UpdateDatabaseSettingsRes500 struct { + *ErrorResponse +} + +func (cl *Client) UpdateDatabaseSettings(ctx context.Context, organization string, name string, req UpdateDatabaseSettingsReq) (res200 *UpdateDatabaseSettingsRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + name}) + body := bytes.NewBuffer(nil) + if err = json.NewEncoder(body).Encode(req); err != nil { + return res200, err + } + r, err := http.NewRequestWithContext(ctx, "PATCH", u.String(), body) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(UpdateDatabaseSettingsRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(UpdateDatabaseSettingsRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(UpdateDatabaseSettingsRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(UpdateDatabaseSettingsRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(UpdateDatabaseSettingsRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type ListReadOnlyRegionsRes struct { + Data []ReadOnlyRegion `json:"data" tfsdk:"data"` +} +type ListReadOnlyRegionsRes401 struct { + *ErrorResponse +} +type ListReadOnlyRegionsRes403 struct { + *ErrorResponse +} +type ListReadOnlyRegionsRes404 struct { + *ErrorResponse +} +type ListReadOnlyRegionsRes500 struct { + *ErrorResponse +} + +func (cl *Client) ListReadOnlyRegions(ctx context.Context, organization string, name string, page *int, perPage *int) (res200 *ListReadOnlyRegionsRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + name + "/read-only-regions"}) + q := u.Query() + if page != nil { + q.Set("page", strconv.Itoa(*page)) + } + if perPage != nil { + q.Set("per_page", strconv.Itoa(*perPage)) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(ListReadOnlyRegionsRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(ListReadOnlyRegionsRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(ListReadOnlyRegionsRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(ListReadOnlyRegionsRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(ListReadOnlyRegionsRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type ListDatabaseRegionsRes struct { + Data []Region `json:"data" tfsdk:"data"` +} +type ListDatabaseRegionsRes401 struct { + *ErrorResponse +} +type ListDatabaseRegionsRes403 struct { + *ErrorResponse +} +type ListDatabaseRegionsRes404 struct { + *ErrorResponse +} +type ListDatabaseRegionsRes500 struct { + *ErrorResponse +} + +func (cl *Client) ListDatabaseRegions(ctx context.Context, organization string, name string, page *int, perPage *int) (res200 *ListDatabaseRegionsRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/databases/" + name + "/regions"}) + q := u.Query() + if page != nil { + q.Set("page", strconv.Itoa(*page)) + } + if perPage != nil { + q.Set("per_page", strconv.Itoa(*perPage)) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(ListDatabaseRegionsRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(ListDatabaseRegionsRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(ListDatabaseRegionsRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(ListDatabaseRegionsRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(ListDatabaseRegionsRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type ListOauthApplicationsRes struct { + Data []OauthApplication `json:"data" tfsdk:"data"` +} +type ListOauthApplicationsRes401 struct { + *ErrorResponse +} +type ListOauthApplicationsRes403 struct { + *ErrorResponse +} +type ListOauthApplicationsRes404 struct { + *ErrorResponse +} +type ListOauthApplicationsRes500 struct { + *ErrorResponse +} + +func (cl *Client) ListOauthApplications(ctx context.Context, organization string, page *int, perPage *int) (res200 *ListOauthApplicationsRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/oauth-applications"}) + q := u.Query() + if page != nil { + q.Set("page", strconv.Itoa(*page)) + } + if perPage != nil { + q.Set("per_page", strconv.Itoa(*perPage)) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(ListOauthApplicationsRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(ListOauthApplicationsRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(ListOauthApplicationsRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(ListOauthApplicationsRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(ListOauthApplicationsRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type GetOauthApplicationRes struct { + OauthApplication +} +type GetOauthApplicationRes401 struct { + *ErrorResponse +} +type GetOauthApplicationRes403 struct { + *ErrorResponse +} +type GetOauthApplicationRes404 struct { + *ErrorResponse +} +type GetOauthApplicationRes500 struct { + *ErrorResponse +} + +func (cl *Client) GetOauthApplication(ctx context.Context, organization string, applicationId string) (res200 *GetOauthApplicationRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/oauth-applications/" + applicationId}) + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(GetOauthApplicationRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(GetOauthApplicationRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(GetOauthApplicationRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(GetOauthApplicationRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(GetOauthApplicationRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type ListOauthTokensRes struct { + Data []OauthToken `json:"data" tfsdk:"data"` +} +type ListOauthTokensRes401 struct { + *ErrorResponse +} +type ListOauthTokensRes403 struct { + *ErrorResponse +} +type ListOauthTokensRes404 struct { + *ErrorResponse +} +type ListOauthTokensRes500 struct { + *ErrorResponse +} + +func (cl *Client) ListOauthTokens(ctx context.Context, organization string, applicationId string, page *int, perPage *int) (res200 *ListOauthTokensRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/oauth-applications/" + applicationId + "/tokens"}) + q := u.Query() + if page != nil { + q.Set("page", strconv.Itoa(*page)) + } + if perPage != nil { + q.Set("per_page", strconv.Itoa(*perPage)) + } + u.RawQuery = q.Encode() + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(ListOauthTokensRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(ListOauthTokensRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(ListOauthTokensRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(ListOauthTokensRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(ListOauthTokensRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type GetOauthTokenRes struct { + OauthTokenWithDetails +} +type GetOauthTokenRes401 struct { + *ErrorResponse +} +type GetOauthTokenRes403 struct { + *ErrorResponse +} +type GetOauthTokenRes404 struct { + *ErrorResponse +} +type GetOauthTokenRes500 struct { + *ErrorResponse +} + +func (cl *Client) GetOauthToken(ctx context.Context, organization string, applicationId string, tokenId string) (res200 *GetOauthTokenRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/oauth-applications/" + applicationId + "/tokens/" + tokenId}) + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(GetOauthTokenRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(GetOauthTokenRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(GetOauthTokenRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(GetOauthTokenRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(GetOauthTokenRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type DeleteOauthTokenRes struct{} +type DeleteOauthTokenRes401 struct { + *ErrorResponse +} +type DeleteOauthTokenRes403 struct { + *ErrorResponse +} +type DeleteOauthTokenRes404 struct { + *ErrorResponse +} +type DeleteOauthTokenRes500 struct { + *ErrorResponse +} + +func (cl *Client) DeleteOauthToken(ctx context.Context, organization string, applicationId string, tokenId string) (res204 *DeleteOauthTokenRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/oauth-applications/" + applicationId + "/tokens/" + tokenId}) + r, err := http.NewRequestWithContext(ctx, "DELETE", u.String(), nil) + if err != nil { + return res204, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res204, err + } + defer res.Body.Close() + switch res.StatusCode { + case 204: + res204 = new(DeleteOauthTokenRes) + err = json.NewDecoder(res.Body).Decode(&res204) + case 401: + res401 := new(DeleteOauthTokenRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(DeleteOauthTokenRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(DeleteOauthTokenRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(DeleteOauthTokenRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res204, err +} + +type CreateOrRenewOauthTokenReq struct { + ClientId string `json:"client_id" tfsdk:"client_id"` + ClientSecret string `json:"client_secret" tfsdk:"client_secret"` + Code *string `json:"code,omitempty" tfsdk:"code"` + GrantType string `json:"grant_type" tfsdk:"grant_type"` + RedirectUri *string `json:"redirect_uri,omitempty" tfsdk:"redirect_uri"` + RefreshToken *string `json:"refresh_token,omitempty" tfsdk:"refresh_token"` +} +type CreateOrRenewOauthTokenRes struct { + CreatedOauthToken +} +type CreateOrRenewOauthTokenRes403 struct { + *ErrorResponse +} +type CreateOrRenewOauthTokenRes404 struct { + *ErrorResponse +} +type CreateOrRenewOauthTokenRes422 struct { + *ErrorResponse +} +type CreateOrRenewOauthTokenRes500 struct { + *ErrorResponse +} + +func (cl *Client) CreateOrRenewOauthToken(ctx context.Context, organization string, id string, req CreateOrRenewOauthTokenReq) (res200 *CreateOrRenewOauthTokenRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "organizations/" + organization + "/oauth-applications/" + id + "/token"}) + body := bytes.NewBuffer(nil) + if err = json.NewEncoder(body).Encode(req); err != nil { + return res200, err + } + r, err := http.NewRequestWithContext(ctx, "POST", u.String(), body) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(CreateOrRenewOauthTokenRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 403: + res403 := new(CreateOrRenewOauthTokenRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(CreateOrRenewOauthTokenRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 422: + res422 := new(CreateOrRenewOauthTokenRes422) + err = json.NewDecoder(res.Body).Decode(&res422) + if err == nil { + err = res422 + } + case 500: + res500 := new(CreateOrRenewOauthTokenRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} + +type GetCurrentUserRes struct { + User +} +type GetCurrentUserRes401 struct { + *ErrorResponse +} +type GetCurrentUserRes403 struct { + *ErrorResponse +} +type GetCurrentUserRes404 struct { + *ErrorResponse +} +type GetCurrentUserRes500 struct { + *ErrorResponse +} + +func (cl *Client) GetCurrentUser(ctx context.Context) (res200 *GetCurrentUserRes, err error) { + u := cl.baseURL.ResolveReference(&url.URL{Path: "user"}) + r, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return res200, err + } + r.Header.Set("Content-Type", "application/json") + r.Header.Set("Accept", "application/json") + res, err := cl.httpCl.Do(r) + if err != nil { + return res200, err + } + defer res.Body.Close() + switch res.StatusCode { + case 200: + res200 = new(GetCurrentUserRes) + err = json.NewDecoder(res.Body).Decode(&res200) + case 401: + res401 := new(GetCurrentUserRes401) + err = json.NewDecoder(res.Body).Decode(&res401) + if err == nil { + err = res401 + } + case 403: + res403 := new(GetCurrentUserRes403) + err = json.NewDecoder(res.Body).Decode(&res403) + if err == nil { + err = res403 + } + case 404: + res404 := new(GetCurrentUserRes404) + err = json.NewDecoder(res.Body).Decode(&res404) + if err == nil { + err = res404 + } + case 500: + res500 := new(GetCurrentUserRes500) + err = json.NewDecoder(res.Body).Decode(&res500) + if err == nil { + err = res500 + } + default: + var errBody *ErrorResponse + _ = json.NewDecoder(res.Body).Decode(&errBody) + if errBody != nil { + err = errBody + } else { + err = fmt.Errorf("unexpected status code %d", res.StatusCode) + } + } + if errors.Is(err, io.EOF) { + err = nil + } + return res200, err +} diff --git a/internal/cmd/client_codegen/jenny.go b/internal/cmd/client_codegen/jenny.go new file mode 100644 index 0000000..6e4e344 --- /dev/null +++ b/internal/cmd/client_codegen/jenny.go @@ -0,0 +1,475 @@ +package main + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/dave/jennifer/jen" + "github.com/go-openapi/spec" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" +) + +func genEmbedStruct(file *jen.File, typename, embedname string) { + file.Type().Id(typename).Struct(jen.Id(embedname)) +} + +func genParamStruct(defns spec.Definitions, file *jen.File, typename string, body *spec.Schema) error { + toField := func(item spec.OrderSchemaItem) (jen.Code, error) { + fieldName := snakeToCamel(item.Name) + f := jen.Id(fieldName) + isOptional := !slices.Contains(body.Required, item.Name) + if isOptional { + f = f.Op("*") + } + switch { + case item.Type.Contains("string"): + f = f.String() + case item.Type.Contains("number"): + f = f.Float64() + case item.Type.Contains("boolean"): + f = f.Bool() + case item.Type.Contains("array"): + itemTypename := "" + switch { + case item.Items.Schema.Type.Contains("string"): + itemTypename += "string" + case item.Items.Schema.Type.Contains("number"): + itemTypename += "float64" + case item.Items.Schema.Type.Contains("boolean"): + itemTypename += "bool" + case item.Items.Schema.Type.Contains("object"): + itemTypename = typename + "_" + fieldName + "Item" + + if err := genParamStruct(defns, file, itemTypename, item.Items.Schema); err != nil { + return nil, fmt.Errorf("generating child item type: %w", err) + } + case item.Items.Schema.Type.Contains("array"): + return nil, fmt.Errorf("arrays of array aren't supported") + case item.Items.Schema.Ref.GetURL() != nil || item.Items.Schema.Ref.GetURL().Fragment != "": + fragment := item.Items.Schema.Ref.GetURL().Fragment + defnName := strings.TrimPrefix(fragment, "/definitions/") + _, ok := defns[defnName] + if !ok { + return nil, fmt.Errorf("no definition with name %q exists in the openapi spec", defnName) + } + itemTypename = snakeToCamel(defnName) + } + f = f.Id("[]" + itemTypename) + + case item.Type.Contains("object"): + itemTypename := typename + "_" + fieldName + if err := genParamStruct(defns, file, itemTypename, &item.Schema); err != nil { + return nil, fmt.Errorf("generating child item type: %w", err) + } + f = f.Id(itemTypename) + default: + // perhaps it's a ref? + if item.Ref.GetURL() == nil || item.Ref.GetURL().Fragment == "" { + return nil, fmt.Errorf("unhandled item type %v", item.Type) + } + fragment := item.Ref.GetURL().Fragment + defnName := strings.TrimPrefix(fragment, "/definitions/") + _, ok := defns[defnName] + if !ok { + return nil, fmt.Errorf("no definition with name %q exists in the openapi spec", defnName) + } + defnTypeName := snakeToCamel(defnName) + f = f.Id(defnTypeName) + } + jsonTag := item.Name + if isOptional { + jsonTag += ",omitempty" + } + f = f.Tag(map[string]string{ + "json": jsonTag, + "tfsdk": item.Name, + }) + return f, nil + } + + var fields []jen.Code + if body != nil { + for _, item := range body.Properties.ToOrderedSchemaItems() { + f, err := toField(item) + if err != nil { + return fmt.Errorf("looking at item %q: %w", item.Name, err) + } + fields = append(fields, f) + } + } + file.Type().Id(typename).Struct(fields...) + return nil +} + +func genErrRespParamStruct(defns spec.Definitions, file *jen.File, typename string, body *spec.Schema) error { + toField := func(item spec.OrderSchemaItem) (jen.Code, error) { + fieldName := snakeToCamel(item.Name) + f := jen.Id(fieldName) + isOptional := !slices.Contains(body.Required, item.Name) + if isOptional { + f = f.Op("*") + } + switch { + case item.Type.Contains("string"): + f = f.String() + case item.Type.Contains("number"): + f = f.Float64() + case item.Type.Contains("boolean"): + f = f.Bool() + case item.Type.Contains("array"): + itemTypename := "" + switch { + case item.Items.Schema.Type.Contains("string"): + itemTypename += "string" + case item.Items.Schema.Type.Contains("number"): + itemTypename += "float64" + case item.Items.Schema.Type.Contains("boolean"): + itemTypename += "bool" + case item.Items.Schema.Type.Contains("object"): + itemTypename = typename + "_" + fieldName + "Item" + + if err := genParamStruct(defns, file, itemTypename, item.Items.Schema); err != nil { + return nil, fmt.Errorf("generating child item type: %w", err) + } + case item.Items.Schema.Type.Contains("array"): + return nil, fmt.Errorf("arrays of array aren't supported") + } + f = f.Id("[]" + itemTypename) + + case item.Type.Contains("object"): + itemTypename := typename + "_" + fieldName + if err := genParamStruct(defns, file, itemTypename, &item.Schema); err != nil { + return nil, fmt.Errorf("generating child item type: %w", err) + } + f = f.Id(itemTypename) + default: + return nil, fmt.Errorf("unhandled item type %v", item.Type) + } + jsonTag := item.Name + if isOptional { + jsonTag += ",omitempty" + } + f = f.Tag(map[string]string{ + "json": jsonTag, + "tfsdk": item.Name, + }) + return f, nil + } + + fields := []jen.Code{ + jen.Op("*").Id("ErrorResponse"), + } + + if body != nil { + for _, item := range body.Properties.ToOrderedSchemaItems() { + f, err := toField(item) + if err != nil { + return fmt.Errorf("looking at item %q: %w", item.Name, err) + } + fields = append(fields, f) + } + } + file.Type().Id(typename).Struct(fields...) + return nil +} + +func genClientStruct( + file *jen.File, + spec *spec.Swagger, +) { + file.Type().Id("Client").Struct( + jen.Id("httpCl").Op("*").Qual("net/http", "Client"), + jen.Id("baseURL").Op("*").Qual("net/url", "URL"), + ) + + file.Func().Id("NewClient").Params( + jen.Id("httpCl").Op("*").Qual("net/http", "Client"), + jen.Id("baseURL").Op("*").Qual("net/url", "URL"), + ).Parens(jen.Op("*").Id("Client")).BlockFunc(func(g *jen.Group) { + g.If(jen.Id("baseURL").Op("==").Nil()).Block( + jen.Id("baseURL").Op("=").Op("&").Qual("net/url", "URL").Values( + jen.Id("Scheme").Op(":").Lit("https"), + jen.Id("Host").Op(":").Lit(spec.Host), + jen.Id("Path").Op(":").Lit(spec.BasePath), + ), + ) + g.If(jen.Op("!").Qual("strings", "HasSuffix").Call(jen.Id("baseURL").Dot("Path"), jen.Lit("/"))).Block( + jen.Id("baseURL").Dot("Path").Op("=").Id("baseURL").Dot("Path").Op("+").Lit("/"), + ) + + g.Return(jen.Op("&").Id("Client").Values( + jen.Id("httpCl").Op(":").Id("httpCl"), + jen.Id("baseURL").Op(":").Id("baseURL"), + )) + }) +} + +func genErrorStruct( + file *jen.File, +) { + file.Type().Id("ErrorResponse").Struct( + jen.Id("Code").Id("string").Tag(map[string]string{"json": "code"}), + jen.Id("Message").Id("string").Tag(map[string]string{"json": "message"}), + ) + + file.Func().Params( + jen.Id("err").Op("*").Id("ErrorResponse"), + ).Id("Error").Params().Parens(jen.String()).BlockFunc(func(g *jen.Group) { + g.Return( + jen.Qual("fmt", "Sprintf").Call( + jen.Lit("error %s: %s"), + jen.Id("err").Dot("Code"), + jen.Id("err").Dot("Message"), + ), + ) + }) +} + +func genClientCall( + file *jen.File, + path, verb string, + clientCallFuncName string, + pathArgs []spec.Parameter, queryArgs []spec.Parameter, + reqBodyTypeName string, + responseTypeNames map[int]string, +) error { + + args := []jen.Code{jen.Id("ctx").Qual("context", "Context")} + + path = strings.TrimPrefix(path, "/") + + pathBuilderArg, pathArgs := pathInterpolator(path, pathArgs) + for _, pathArg := range pathArgs { + argName := lowerSnakeToCamel(pathArg.Name) + + argF := jen.Id(argName) + switch pathArg.Type { + case "string": + argF = argF.String() + case "number": + argF = argF.Float64() + default: + return fmt.Errorf("unhandled pathArg type %v", pathArg.Type) + } + + args = append(args, argF) + } + if reqBodyTypeName != "" { + args = append(args, jen.Id("req").Id(reqBodyTypeName)) + } + for _, queryArg := range queryArgs { + argName := lowerSnakeToCamel(queryArg.Name) + argF := jen.Id(argName) + if !queryArg.Required { + argF = argF.Op("*") + } + switch queryArg.Type { + case "string": + argF = argF.String() + case "number": + argF = argF.Int() + default: + return fmt.Errorf("unhandled queryArg type %v", queryArg.Type) + } + args = append(args, argF) + } + + var returnVals []jen.Code + var returnNames []jen.Code + codes := maps.Keys(responseTypeNames) + sort.Ints(codes) + for _, code := range codes { + if code >= 400 { + continue + } + returnValName := "res" + strconv.Itoa(code) + returnValTypeName := responseTypeNames[code] + returnF := jen.Id(returnValName).Op("*").Id(returnValTypeName) + returnVals = append(returnVals, returnF) + returnNames = append(returnNames, jen.Id(returnValName)) + } + returnVals = append(returnVals, jen.Id("err").Id("error")) + returnNames = append(returnNames, jen.Id("err")) + + rcvrName := "cl" + rcvrType := "Client" + file.Func().Params( + jen.Id(rcvrName).Op("*").Id(rcvrType), + ).Id(clientCallFuncName).Params(args...).Parens( + jen.List(returnVals...), + ).BlockFunc(func(g *jen.Group) { + g.Id("u").Op(":=").Id("cl").Dot("baseURL").Dot("ResolveReference").Call( + jen.Op("&").Qual("net/url", "URL").Values(jen.Id("Path").Op(":").Add(pathBuilderArg...)), + ) + if len(queryArgs) > 0 { + g.Id("q").Op(":=").Id("u").Dot("Query").Call() + for _, queryArg := range queryArgs { + argName := lowerSnakeToCamel(queryArg.Name) + queryVal := jen.Id(argName) + if !queryArg.Required { + queryVal = jen.Op("*").Add(queryVal) + } + switch queryArg.Type { + case "string": + // nothing to do + case "number": + queryVal = jen.Qual("strconv", "Itoa").Call(queryVal) + default: + panic("should have been handled earlier") + } + if !queryArg.Required { + g.If(jen.Id(argName).Op("!=").Nil()).Block( + jen.Id("q").Dot("Set").Call( + jen.Lit(queryArg.Name), + queryVal, + ), + ) + } else { + g.Id("q").Dot("Set").Call( + jen.Lit(queryArg.Name), + queryVal, + ) + } + } + g.Id("u").Dot("RawQuery").Op("=").Id("q").Dot("Encode").Call() + } + + var bodyStmt *jen.Statement + if reqBodyTypeName == "" { + bodyStmt = jen.Nil() + } else { + bodyStmt = jen.Id("body") + g.Id("body").Op(":=").Qual("bytes", "NewBuffer").Call(jen.Nil()) + g.If( + jen.Id("err").Op("=").Qual("encoding/json", "NewEncoder").Call(jen.Id("body")).Dot("Encode").Call(jen.Id("req")), + jen.Id("err").Op("!=").Nil(), + ).Block( + jen.Return(returnNames...), + ) + } + + g.List(jen.Id("r"), jen.Id("err")).Op(":=").Qual("net/http", "NewRequestWithContext").Call( + jen.Id("ctx"), + jen.Lit(verb), + jen.Id("u").Dot("String").Call(), + bodyStmt, + ) + g.If(jen.Id("err").Op("!=").Nil()).Block( + jen.Return(returnNames...), + ) + g.Id("r").Dot("Header").Dot("Set").Call(jen.Lit("Content-Type"), jen.Lit("application/json")) + g.Id("r").Dot("Header").Dot("Set").Call(jen.Lit("Accept"), jen.Lit("application/json")) + + g.List(jen.Id("res"), jen.Id("err")).Op(":=").Id("cl").Dot("httpCl").Dot("Do").Call(jen.Id("r")) + g.If(jen.Id("err").Op("!=").Nil()).Block( + jen.Return(returnNames...), + ) + g.Defer().Id("res").Dot("Body").Dot("Close").Call() + + g.Switch(jen.Id("res").Dot("StatusCode")).BlockFunc(func(g *jen.Group) { + + for _, code := range codes { + returnValName := "res" + strconv.Itoa(code) + returnValTypeName := responseTypeNames[code] + if code < 400 { + + g.Case(jen.Lit(code)).Block( + jen.Id(returnValName).Op("=").New(jen.Id(returnValTypeName)), + jen.Id("err").Op("=").Qual("encoding/json", "NewDecoder").Call(jen.Id("res").Dot("Body")).Dot("Decode").Call(jen.Op("&").Id(returnValName)), + ) + } else { + g.Case(jen.Lit(code)).Block( + jen.Id(returnValName).Op(":=").New(jen.Id(returnValTypeName)), + jen.Id("err").Op("=").Qual("encoding/json", "NewDecoder").Call(jen.Id("res").Dot("Body")).Dot("Decode").Call(jen.Op("&").Id(returnValName)), + jen.If(jen.Id("err").Op("==").Nil()).Block( + jen.Id("err").Op("=").Id(returnValName), + ), + ) + } + } + g.Default().Block( + jen.Var().Id("errBody").Op("*").Id("ErrorResponse"), + jen.Id("_").Op("=").Qual("encoding/json", "NewDecoder").Call(jen.Id("res").Dot("Body")).Dot("Decode").Call(jen.Op("&").Id("errBody")), + jen.If(jen.Id("errBody").Op("!=").Nil()).Block( + jen.Id("err").Op("=").Id("errBody"), + ).Else().Block( + jen.Id("err").Op("=").Qual("fmt", "Errorf").Call(jen.Lit("unexpected status code %d"), jen.Id("res").Dot("StatusCode")), + ), + ) + }) + g.If(jen.Qual("errors", "Is").Call(jen.Id("err"), jen.Qual("io", "EOF"))).Block(jen.Id("err").Op("=").Nil()) + g.Return(returnNames...) + }) + + return nil +} + +func pathInterpolator(path string, pathArgs []spec.Parameter) ([]jen.Code, []spec.Parameter) { + type interpolateArg struct { + StringLiteral *string + VariableName *string + } + args := []interpolateArg{ + {StringLiteral: &path}, + } + for _, pathArg := range pathArgs { + key := "{" + pathArg.Name + "}" + argName := lowerSnakeToCamel(pathArg.Name) + + for i, arg := range args { + if arg.StringLiteral == nil { + continue + } + lit := *arg.StringLiteral + idx := strings.Index(lit, key) + if idx < 0 { + continue + } + prePathPart := lit[:idx] + + inserts := []interpolateArg{ + {VariableName: &argName}, + } + if len(lit) > idx+len(key) { + postPathPart := lit[idx+len(key):] + inserts = append(inserts, interpolateArg{StringLiteral: &postPathPart}) + } + arg.StringLiteral = &prePathPart + args[i] = arg + if len(args) > i+1 { + args = slices.Insert(args, i+1, inserts...) + } else { + args = append(args, inserts...) + } + } + } + var ( + out []jen.Code + ordered []spec.Parameter + ) + for i, arg := range args { + if i != 0 { + out = append(out, jen.Op("+")) + } + switch { + case arg.StringLiteral != nil: + out = append(out, jen.Lit(*arg.StringLiteral)) + case arg.VariableName != nil: + out = append(out, jen.Id(*arg.VariableName)) + // reorder the path args in the way they appear in the URL + // so that they can be returned in a sane order for the + // func argument names + for _, pathArg := range pathArgs { + argName := lowerSnakeToCamel(pathArg.Name) + if argName == *arg.VariableName { + ordered = append(ordered, pathArg) + } + } + + } + } + return out, ordered +} diff --git a/internal/cmd/client_codegen/main.go b/internal/cmd/client_codegen/main.go new file mode 100644 index 0000000..c7325ae --- /dev/null +++ b/internal/cmd/client_codegen/main.go @@ -0,0 +1,230 @@ +package main + +import ( + "flag" + "fmt" + "os" + "sort" + "strconv" + "strings" + + "github.com/dave/jennifer/jen" + "github.com/go-openapi/loads" + "github.com/go-openapi/spec" + "golang.org/x/exp/maps" + "golang.org/x/exp/slog" + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +func main() { + specFilepath := flag.String("spec", "../../../openapi-spec.json", "") + flag.Parse() + + if err := realMain(*specFilepath); err != nil { + slog.Error("failed", "err", err) + } +} + +func realMain(specFilepath string) error { + slog.Info("loading spec") + doc, err := loads.Spec(specFilepath) + if err != nil { + return fmt.Errorf("loading spec: %w", err) + } + + spec := doc.Spec() + slog.Info("loaded openapi spec", "spec.id", spec.ID) + + f := jen.NewFile("planetscale") + f.PackageComment("Code generated by `github.com/planetscale/terraform-provider-planetscale/internal/cmd/client_codegen` DO NOT EDIT") + + genClientStruct(f, spec) + genErrorStruct(f) + + keys := maps.Keys(spec.Definitions) + sort.Strings(keys) + for _, name := range keys { + defn := spec.Definitions[name] + ll := slog.With("definition", name) + typeName := snakeToCamel(name) + ll = ll.With("type_name", typeName) + ll.Info("generating type for definition") + if err := genParamStruct(spec.Definitions, f, typeName, &defn); err != nil { + return fmt.Errorf("generating type for definition %q: %w", name, err) + } + } + + paths := maps.Keys(spec.Paths.Paths) + sort.Strings(paths) + for _, path := range paths { + ll := slog.With("path", path) + pathItem := spec.Paths.Paths[path] + props := pathItem.PathItemProps + if err := handlePath(ll, spec.Definitions, f, path, props); err != nil { + return fmt.Errorf("handling path %q: %w", path, err) + } + } + + return f.Render(os.Stdout) +} + +func handlePath(ll *slog.Logger, defns spec.Definitions, f *jen.File, path string, props spec.PathItemProps) error { + if props.Get != nil { + if err := handleVerbPath(ll, defns, f, path, "GET", props.Get); err != nil { + return fmt.Errorf("handling GET props: %w", err) + } + } + if props.Put != nil { + if err := handleVerbPath(ll, defns, f, path, "PUT", props.Put); err != nil { + return fmt.Errorf("handling PUT props: %w", err) + } + } + if props.Post != nil { + if err := handleVerbPath(ll, defns, f, path, "POST", props.Post); err != nil { + return fmt.Errorf("handling POST props: %w", err) + } + } + if props.Delete != nil { + if err := handleVerbPath(ll, defns, f, path, "DELETE", props.Delete); err != nil { + return fmt.Errorf("handling DELETE props: %w", err) + } + } + if props.Options != nil { + if err := handleVerbPath(ll, defns, f, path, "OPTIONS", props.Options); err != nil { + return fmt.Errorf("handling OPTIONS props: %w", err) + } + } + if props.Head != nil { + if err := handleVerbPath(ll, defns, f, path, "HEAD", props.Head); err != nil { + return fmt.Errorf("handling HEAD props: %w", err) + } + } + if props.Patch != nil { + if err := handleVerbPath(ll, defns, f, path, "PATCH", props.Patch); err != nil { + return fmt.Errorf("handling PATCH props: %w", err) + } + } + + return nil +} + +func handleVerbPath(ll *slog.Logger, defns spec.Definitions, f *jen.File, path, verb string, operation *spec.Operation) error { + ll.Info("looking at prop", "verb", verb) + pathParams, queryParams, reqBody, err := splitParams(operation.Parameters) + if err != nil { + return fmt.Errorf("splitting params: %w", err) + } + + var reqBodyStructName string + if reqBody != nil { + reqBodyStructName = kebabToCamel(removeFillerWords(operation.ID)) + "Req" + if err := genParamStruct(defns, f, reqBodyStructName, reqBody.Schema); err != nil { + return fmt.Errorf("generating call param struct: %w", err) + } + } + + responses := make(map[int]string) + resCodes := maps.Keys(operation.Responses.StatusCodeResponses) + sort.Ints(resCodes) + successResponseTypes := 0 + for _, code := range resCodes { + if code < 400 { + successResponseTypes++ + } + } + for _, code := range resCodes { + resBodyStructName := kebabToCamel(removeFillerWords(operation.ID)) + "Res" + strconv.Itoa(code) + res := operation.Responses.StatusCodeResponses[code] + if code < 400 { + if successResponseTypes == 1 { + resBodyStructName = kebabToCamel(removeFillerWords(operation.ID)) + "Res" + } + respSchema := res.ResponseProps.Schema + + if respSchema != nil && respSchema.Ref.GetURL() != nil && respSchema.Ref.GetURL().Fragment != "" { + defnName := strings.TrimPrefix(respSchema.Ref.GetURL().Fragment, "/definitions/") + _, ok := defns[defnName] + if !ok { + return fmt.Errorf("no definition with name %q exists in the openapi spec", defnName) + } + genEmbedStruct(f, resBodyStructName, snakeToCamel(defnName)) + + } else { + if err := genParamStruct(defns, f, resBodyStructName, res.Schema); err != nil { + return fmt.Errorf("generating call response struct: %w", err) + } + } + + } else { + if err := genErrRespParamStruct(defns, f, resBodyStructName, res.Schema); err != nil { + return fmt.Errorf("generating call response struct: %w", err) + } + } + responses[code] = resBodyStructName + } + + clientCallFuncName := kebabToCamel(removeFillerWords(operation.ID)) + if err := genClientCall(f, path, verb, clientCallFuncName, pathParams, queryParams, reqBodyStructName, responses); err != nil { + return fmt.Errorf("generating client call method: %w", err) + } + + return nil +} + +func removeFillerWords(name string) string { + name = strings.ReplaceAll(name, "-an-", "-") + name = strings.ReplaceAll(name, "-a-", "-") + return name +} + +func kebabToCamel(kebab string) string { + var out strings.Builder + for _, w := range strings.Split(kebab, "-") { + out.WriteString(cases.Title(language.AmericanEnglish).String(w)) + } + return out.String() +} + +func snakeToCamel(snake string) string { + var out strings.Builder + for _, w := range strings.Split(snake, "_") { + out.WriteString(cases.Title(language.AmericanEnglish).String(w)) + } + return out.String() +} + +func lowerSnakeToCamel(snake string) string { + var out strings.Builder + for i, w := range strings.Split(snake, "_") { + if i == 0 { + out.WriteString(w) + } else { + out.WriteString(cases.Title(language.AmericanEnglish).String(w)) + } + } + return out.String() +} + +func splitParams(params []spec.Parameter) (path, query []spec.Parameter, body *spec.Parameter, err error) { + for _, param := range params { + param := param + switch param.In { + case "path": + path = append(path, param) + case "query": + query = append(query, param) + case "body": + if body != nil { + return nil, nil, nil, fmt.Errorf("multiple bodies specified: %q", param.Name) + } + if param.Type != "object" && len(param.Schema.Properties) == 0 { + return nil, nil, nil, fmt.Errorf("body should be an object: was a %q", param.Type) + } + body = ¶m + default: + return nil, nil, nil, fmt.Errorf("unhandled param.In: %q", param.In) + } + } + return +} diff --git a/internal/cmd/clientrunner/main.go b/internal/cmd/clientrunner/main.go new file mode 100644 index 0000000..d7b9789 --- /dev/null +++ b/internal/cmd/clientrunner/main.go @@ -0,0 +1,79 @@ +package main + +import ( + "context" + "flag" + "io" + "net/http" + "net/http/httputil" + "os" + + "github.com/pkg/errors" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" + "golang.org/x/exp/slog" + "golang.org/x/oauth2" +) + +func main() { + accessToken := flag.String("access-token", "", "") + serviceTokenID := flag.String("service-token-id", "", "") + serviceToken := flag.String("service-token", "", "") + flag.Parse() + + debugTpt := DebugRoundTripper(os.Stderr, http.DefaultTransport) + var tpt http.RoundTripper + if *accessToken != "" { + tok := &oauth2.Token{AccessToken: *accessToken} + tpt = &oauth2.Transport{Base: debugTpt, Source: oauth2.StaticTokenSource(tok)} + } else if *serviceTokenID != "" && *serviceToken != "" { + tpt = RoundtripperFunc(func(r *http.Request) (*http.Response, error) { + r.Header.Set("Authorization", *serviceTokenID+":"+*serviceToken) + return debugTpt.RoundTrip(r) + }) + } + cl := planetscale.NewClient(&http.Client{Transport: tpt}, nil) + + ctx := context.Background() + + res200, err := cl.ListOauthApplications(ctx, "ps-e2e-prod", nil, nil) + if err != nil { + slog.Error("failed to get current user", "err", err) + return + } + slog.Info("response", "resp", res200) + +} + +func DebugRoundTripper(out io.Writer, tpt http.RoundTripper) http.RoundTripper { + return RoundtripperFunc(func(r *http.Request) (*http.Response, error) { + debugReq, err := httputil.DumpRequestOut(r, true) + if err != nil { + return nil, errors.Wrap(err, "dumping request output") + } + debugReq = append(debugReq, '\n') + _, err = out.Write(debugReq) + if err != nil { + return nil, errors.Wrap(err, "writing request output to stderr") + } + res, err := tpt.RoundTrip(r) + if res == nil { + return res, err + } + debugRes, err := httputil.DumpResponse(res, true) + if err != nil { + return nil, errors.Wrap(err, "dumping response output") + } + debugRes = append(debugRes, '\n') + _, err = out.Write(debugRes) + if err != nil { + return nil, errors.Wrap(err, "writing response output to stderr") + } + return res, err + }) +} + +type RoundtripperFunc func(*http.Request) (*http.Response, error) + +func (fn RoundtripperFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return fn(req) +} diff --git a/internal/cmd/extractref/main.go b/internal/cmd/extractref/main.go new file mode 100644 index 0000000..246ece7 --- /dev/null +++ b/internal/cmd/extractref/main.go @@ -0,0 +1,228 @@ +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "os" + "strings" + + "github.com/go-openapi/spec" + "github.com/pkg/browser" + "github.com/sergi/go-diff/diffmatchpatch" + "golang.org/x/exp/slog" +) + +func main() { + cfgFilepath := flag.String("cfg", "../../../openapi/extract-ref-cfg.json", "") + specFilepath := flag.String("spec", "../../../openapi/openapi-spec.json", "") + flag.Parse() + + if err := realMain(*cfgFilepath, *specFilepath); err != nil { + slog.Error("failed", "err", err.Error()) + } +} + +type ExtractConfig struct { + Extractions []ExtractRule `json:"extractions"` +} + +type ExtractRule struct { + Path string `json:"path"` + Method string `json:"method"` + Response int `json:"responses"` + Prop string `json:"prop"` + + BecomeRef string `json:"become_ref"` +} + +func readCfg(filepath string) (*ExtractConfig, error) { + var cfg ExtractConfig + cfgRaw, err := os.ReadFile(filepath) + if err != nil { + return nil, err + } + return &cfg, json.Unmarshal(cfgRaw, &cfg) +} + +func readSpec(filepath string) (*spec.Swagger, error) { + var ( + file *os.File + err error + ) + if filepath == "-" { + file = os.Stdin + } else { + file, err = os.Open(filepath) + } + if err != nil { + return nil, err + } + defer file.Close() + var spec spec.Swagger + if err := json.NewDecoder(file).Decode(&spec); err != nil { + return nil, fmt.Errorf("decoding JSOn: %w", err) + } + return &spec, nil +} + +func realMain(cfgFile, specFile string) error { + slog.Info("loading cfg") + cfg, err := readCfg(cfgFile) + if err != nil { + return fmt.Errorf("loading cfg: %w", err) + } + slog.Info("loading spec") + + spec, err := readSpec(specFile) + if err != nil { + return fmt.Errorf("loading spec: %w", err) + } + + slog.Info("loaded openapi spec", "spec.id", spec.ID) + + for _, extraction := range cfg.Extractions { + slog.Info("applying extraction rule", "path", extraction.Path) + p, ok := spec.Paths.Paths[extraction.Path] + if !ok { + return fmt.Errorf("path doesn't exist in openapi spec: %q", extraction.Path) + } + if err := handlePath(spec, extraction, p); err != nil { + return fmt.Errorf("handling rule for path %q: %v", extraction.Path, err) + } + } + slog.Info("encoding modified spec") + + return json.NewEncoder(os.Stdout).Encode(spec) +} + +func handlePath(doc *spec.Swagger, rule ExtractRule, path spec.PathItem) error { + ref, err := spec.NewRef("#/definitions/" + rule.BecomeRef) + if err != nil { + return fmt.Errorf("invalid `become_ref` rule: %v", err) + } + + var op *spec.Operation + switch strings.ToUpper(rule.Method) { + case "GET": + op = path.Get + case "PUT": + op = path.Put + case "POST": + op = path.Post + case "DELETE": + op = path.Delete + case "OPTIONS": + op = path.Options + case "HEAD": + op = path.Head + case "PATCH": + op = path.Patch + default: + return fmt.Errorf("unsupported method %q", rule.Method) + } + if op == nil { + return fmt.Errorf("no definition for method %q", rule.Method) + } + + resp, ok := op.Responses.StatusCodeResponses[rule.Response] + if !ok { + return fmt.Errorf("response doesn't support code %d", rule.Response) + } + if resp.Schema == nil { + return fmt.Errorf("response at this path has no schema") + } + + pathParts := strings.Split(rule.Prop, ".") + if len(pathParts) == 1 && pathParts[0] == "" { + pathParts = nil + } + + tgt, err := resolvePath(pathParts, resp.Schema, func(path string, parent, schema *spec.Schema) { + // replace the tgt schema with the ref + switch { + case parent.Type.Contains("array"): + parent.Items.Schema = spec.RefSchema(ref.String()) + case parent.Type.Contains("object"): + if path == "" { + // the root schema itself is changed + desc := resp.Schema.Description + nullable := resp.Schema.Nullable + resp.Schema = spec.RefSchema(ref.String()) + resp.Schema.Description = desc + resp.Schema.Nullable = nullable + } else { + parent.Properties[path] = *spec.RefSchema(ref.String()) + } + default: + panic(fmt.Sprintf("unhandled case: %#v", parent.Type)) + } + }) + if err != nil { + return fmt.Errorf("resolving prop at path %q: %v", rule.Prop, err) + } + op.Responses.StatusCodeResponses[rule.Response] = resp + existingDef, ok := doc.Definitions[rule.BecomeRef] + if !ok { + doc.Definitions[rule.BecomeRef] = *tgt + } else { + oldDef, err := json.MarshalIndent(existingDef, "", " ") + if err != nil { + return fmt.Errorf("encoding existing def %q: %v", rule.BecomeRef, err) + } + newDef, err := json.MarshalIndent(*tgt, "", " ") + if err != nil { + return fmt.Errorf("encoding new def %q: %v", rule.BecomeRef, err) + } + if !bytes.Equal(oldDef, newDef) { + slog.Error("old definition", "def", string(oldDef)) + slog.Error("new definition", "def", string(newDef)) + dmp := diffmatchpatch.New() + diffs := dmp.DiffMain(string(oldDef), string(newDef), false) + if err := browser.OpenReader(bytes.NewBufferString(dmp.DiffPrettyHtml(diffs))); err != nil { + panic(err) + } + return fmt.Errorf("duplicate reference to %q, using a non-equal schema definition", rule.BecomeRef) + } + } + + return nil +} + +func resolvePath(pathParts []string, schema *spec.Schema, atTarget func(path string, parent, schema *spec.Schema)) (*spec.Schema, error) { + return resolvePathRecurse(pathParts, "", schema, schema, atTarget) +} + +func resolvePathRecurse( + pathParts []string, + atPath string, + parent, + schema *spec.Schema, + atTarget func(path string, parent, schema *spec.Schema), +) (*spec.Schema, error) { + if schema.Type.Contains("array") { + if schema.Items.Schema == nil { + return nil, fmt.Errorf("path is an array and its `items` schema isn't unitary") + } + parent = schema + schema = schema.Items.Schema + } + + if len(pathParts) == 0 { + atTarget(atPath, parent, schema) + return schema, nil + } + currentPath := pathParts[0] + + var ( + nextSchema spec.Schema + ok bool + ) + + nextSchema, ok = schema.Properties[currentPath] + if !ok { + return nil, fmt.Errorf("path %q doesn't exist", currentPath) + } + return resolvePathRecurse(pathParts[1:], currentPath, schema, &nextSchema, atTarget) +} diff --git a/internal/provider/backup_data_source.go b/internal/provider/backup_data_source.go new file mode 100644 index 0000000..a7a9dab --- /dev/null +++ b/internal/provider/backup_data_source.go @@ -0,0 +1,76 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &backupDataSource{} + _ datasource.DataSourceWithConfigure = &backupDataSource{} +) + +func newBackupDataSource() datasource.DataSource { + return &backupDataSource{} +} + +type backupDataSource struct { + client *planetscale.Client +} + +func (d *backupDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_backup" +} + +func (d *backupDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A PlanetScale backup.", + MarkdownDescription: "A PlanetScale backup.", + Attributes: backupDataSourceSchemaAttribute(false), + } +} + +func (d *backupDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *backupDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *backupDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + res, err := d.client.GetBackup(ctx, data.Organization.ValueString(), data.Database.ValueString(), data.Branch.ValueString(), data.Id.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Unable to read database branch backup", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to read database branch backup", "no data") + return + } + state := backupFromClient(&res.Backup, data.Organization.ValueString(), data.Database.ValueString(), data.Branch.ValueString(), resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/backup_resource.go b/internal/provider/backup_resource.go new file mode 100644 index 0000000..9a2fce1 --- /dev/null +++ b/internal/provider/backup_resource.go @@ -0,0 +1,346 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +// Ensure provider defined types fully satisfy framework interfaces. +var _ resource.Resource = &backupResource{} +var _ resource.ResourceWithImportState = &backupResource{} + +func newBackupResource() resource.Resource { + return &backupResource{} +} + +// backupResource defines the resource implementation. +type backupResource struct { + client *planetscale.Client +} + +type backupResourceModel struct { + Organization types.String `tfsdk:"organization"` + Database types.String `tfsdk:"database"` + Branch types.String `tfsdk:"branch"` + Name types.String `tfsdk:"name"` + Actor types.Object `tfsdk:"actor"` + BackupPolicy types.Object `tfsdk:"backup_policy"` + CreatedAt types.String `tfsdk:"created_at"` + EstimatedStorageCost types.String `tfsdk:"estimated_storage_cost"` + Id types.String `tfsdk:"id"` + Required types.Bool `tfsdk:"required"` + RestoredBranches types.List `tfsdk:"restored_branches"` + Size types.Float64 `tfsdk:"size"` + State types.String `tfsdk:"state"` + UpdatedAt types.String `tfsdk:"updated_at"` +} + +func backupResourceFromClient(ctx context.Context, backup *planetscale.Backup, organization, database, branch types.String, diags diag.Diagnostics) *backupResourceModel { + if backup == nil { + return nil + } + actor, diags := types.ObjectValueFrom(ctx, actorResourceAttrTypes, backup.Actor) + diags.Append(diags...) + backupPolicy, diags := types.ObjectValueFrom(ctx, backupPolicyResourceAttrTypes, backup.BackupPolicy) + diags.Append(diags...) + + restoredBranch := types.ListNull(types.StringType) + if backup.RestoredBranches != nil { + restoredBranch = stringsToListValue(*backup.RestoredBranches, diags) + } + return &backupResourceModel{ + Organization: organization, + Database: database, + Branch: branch, + + // partially required + BackupPolicy: backupPolicy, + + Name: types.StringValue(backup.Name), + Actor: actor, + CreatedAt: types.StringValue(backup.CreatedAt), + EstimatedStorageCost: types.StringValue(backup.EstimatedStorageCost), + Id: types.StringValue(backup.Id), + Required: types.BoolValue(backup.Required), + RestoredBranches: restoredBranch, + Size: types.Float64Value(backup.Size), + State: types.StringValue(backup.State), + UpdatedAt: types.StringValue(backup.UpdatedAt), + } +} + +func (r *backupResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_backup" +} + +func (r *backupResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A PlanetScale backup.", + MarkdownDescription: "A PlanetScale backup", + Attributes: map[string]schema.Attribute{ + "organization": schema.StringAttribute{ + Description: "The organization in which the database branch being backed up belongs to.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }}, + "database": schema.StringAttribute{ + Description: "The database to which the branch being backed up belongs to.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }}, + "branch": schema.StringAttribute{ + Description: "The branch being backed up.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }}, + "name": schema.StringAttribute{ + Description: "The name of the backup.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }}, + "backup_policy": schema.SingleNestedAttribute{ + Description: ".", + Required: true, + Attributes: backupPolicyResourceAttribute, + }, + + // read only + "actor": schema.SingleNestedAttribute{ + Description: ".", + Computed: true, + Attributes: actorResourceSchemaAttribute, + }, + "id": schema.StringAttribute{ + Description: "The ID of the backup.", + Computed: true, + }, + "created_at": schema.StringAttribute{ + Description: "When the backup was created.", + Computed: true, + }, + "estimated_storage_cost": schema.StringAttribute{ + Description: "The estimated storage cost of the backup.", + Computed: true, + }, + "required": schema.BoolAttribute{ + Description: "Whether or not the backup policy is required.", + Computed: true, + }, + "restored_branches": schema.ListAttribute{ + Description: "Branches that have been restored with this backup.", + Computed: true, ElementType: types.StringType, + }, + "size": schema.Float64Attribute{ + Description: "The size of the backup.", + Computed: true, + }, + "state": schema.StringAttribute{ + Description: "The current state of the backup.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the backup was last updated.", + Computed: true, + }, + }, + } +} + +func (r *backupResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + r.client = client +} + +func (r *backupResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data *backupResourceModel + tflog.Info(ctx, "getting current backup resource from plan") + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + org := data.Organization + database := data.Database + branch := data.Branch + name := data.Name + backupPolicy := data.BackupPolicy + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if database.IsNull() || database.IsUnknown() || database.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("database"), "database is required", "a database must be provided and cannot be empty") + return + } + if branch.IsNull() || branch.IsUnknown() || branch.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("branch"), "branch is required", "a branch must be provided and cannot be empty") + return + } + if name.IsNull() || name.IsUnknown() || name.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("name"), "name is required", "a name must be provided and cannot be empty") + return + } + if backupPolicy.IsNull() || backupPolicy.IsUnknown() { + resp.Diagnostics.AddAttributeError(path.Root("backup_policy"), "backup_policy is required", "a backup_policy must be provided and cannot be empty") + return + } + var bp backupPolicyDataSourceModel + resp.Diagnostics.Append(backupPolicy.As(ctx, &bp, basetypes.ObjectAsOptions{})...) + if resp.Diagnostics.HasError() { + return + } + + createReq := planetscale.CreateBackupReq{ + Name: name.ValueStringPointer(), + RetentionUnit: bp.RetentionUnit.ValueStringPointer(), + RetentionValue: bp.RetentionValue.ValueFloat64Pointer(), + } + res, err := r.client.CreateBackup(ctx, org.ValueString(), database.ValueString(), branch.ValueString(), createReq) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to create backup, got error: %s", err)) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to create backups", "no data") + return + } + + data = backupResourceFromClient(ctx, &res.Backup, data.Organization, data.Database, data.Branch, resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *backupResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var data *backupResourceModel + + tflog.Info(ctx, "getting current backup resource from state") + // Read Terraform prior state data into the model + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + org := data.Organization + database := data.Database + branch := data.Branch + id := data.Id + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if database.IsNull() || database.IsUnknown() || database.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("database"), "database is required", "a database must be provided and cannot be empty") + return + } + if branch.IsNull() || branch.IsUnknown() || branch.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("branch"), "branch is required", "a branch must be provided and cannot be empty") + return + } + if id.IsNull() || id.IsUnknown() || id.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("id"), "id is required", "an ID must be provided and cannot be empty") + return + } + + res, err := r.client.GetBackup(ctx, org.ValueString(), database.ValueString(), branch.ValueString(), id.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to read backup, got error: %s", err)) + return + } + + data = backupResourceFromClient(ctx, &res.Backup, data.Organization, data.Database, data.Branch, resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *backupResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // nothing to do, backups have no updatable settings +} + +func (r *backupResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var data *backupResourceModel + + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + org := data.Organization + database := data.Database + branch := data.Branch + id := data.Id + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if database.IsNull() || database.IsUnknown() || database.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("database"), "database is required", "a database must be provided and cannot be empty") + return + } + if branch.IsNull() || branch.IsUnknown() || branch.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("branch"), "branch is required", "a branch must be provided and cannot be empty") + return + } + if id.IsNull() || id.IsUnknown() || id.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("id"), "id is required", "an ID must be provided and cannot be empty") + return + } + + res, err := r.client.DeleteBackup(ctx, org.ValueString(), database.ValueString(), branch.ValueString(), id.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to delete backup, got error: %s", err)) + return + } + _ = res +} + +func (r *backupResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, ",") + if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: organization,database,name,id. Got: %q", req.ID), + ) + return + } + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("organization"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("database"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("branch"), idParts[2])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), idParts[3])...) +} diff --git a/internal/provider/backups_data_source.go b/internal/provider/backups_data_source.go new file mode 100644 index 0000000..4679572 --- /dev/null +++ b/internal/provider/backups_data_source.go @@ -0,0 +1,101 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &backupsDataSource{} + _ datasource.DataSourceWithConfigure = &backupsDataSource{} +) + +func newBackupsDataSource() datasource.DataSource { + return &backupsDataSource{} +} + +type backupsDataSourceModel struct { + Organization string `tfsdk:"organization"` + Database string `tfsdk:"database"` + Branch string `tfsdk:"branch"` + Backups []backupDataSourceModel `tfsdk:"backups"` +} + +type backupsDataSource struct { + client *planetscale.Client +} + +func (d *backupsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_backups" +} + +func (d *backupsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A list of PlanetScale backups.", + MarkdownDescription: "A list of PlanetScale backups.", + Attributes: map[string]schema.Attribute{ + "organization": schema.StringAttribute{Required: true}, + "database": schema.StringAttribute{Required: true}, + "branch": schema.StringAttribute{Required: true}, + "backups": schema.ListNestedAttribute{Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: backupDataSourceSchemaAttribute(true), + }, + }, + }, + } +} + +func (d *backupsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *backupsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *backupsDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + res, err := d.client.ListBackups(ctx, data.Organization, data.Database, data.Branch, nil, nil) + if err != nil { + resp.Diagnostics.AddError("Unable to read database branch backups", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to read database branch backups", "no data") + return + } + state := backupsDataSourceModel{ + Organization: data.Organization, + Database: data.Database, + Branch: data.Branch, + Backups: make([]backupDataSourceModel, 0, len(res.Data)), + } + for _, item := range res.Data { + item := item + state.Backups = append(state.Backups, *backupFromClient(&item, data.Organization, data.Database, data.Branch, resp.Diagnostics)) + if resp.Diagnostics.HasError() { + return + } + } + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/branch_data_source.go b/internal/provider/branch_data_source.go new file mode 100644 index 0000000..17e363a --- /dev/null +++ b/internal/provider/branch_data_source.go @@ -0,0 +1,76 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &branchDataSource{} + _ datasource.DataSourceWithConfigure = &branchDataSource{} +) + +func newBranchDataSource() datasource.DataSource { + return &branchDataSource{} +} + +type branchDataSource struct { + client *planetscale.Client +} + +func (d *branchDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_branch" +} + +func (d *branchDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A PlanetScale branch.", + MarkdownDescription: "A PlanetScale branch.", + Attributes: branchDataSourceSchemaAttribute(false), + } +} + +func (d *branchDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *branchDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *branchDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + res, err := d.client.GetBranch(ctx, data.Organization.ValueString(), data.Database.ValueString(), data.Name.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Unable to read database branch", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to read database branch", "no data") + return + } + state := branchFromClient(&res.Branch, data.Organization.ValueString(), data.Database.ValueString(), resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/branch_resource.go b/internal/provider/branch_resource.go new file mode 100644 index 0000000..dc3c59d --- /dev/null +++ b/internal/provider/branch_resource.go @@ -0,0 +1,440 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +// Ensure provider defined types fully satisfy framework interfaces. +var _ resource.Resource = &branchResource{} +var _ resource.ResourceWithImportState = &branchResource{} + +func newBranchResource() resource.Resource { + return &branchResource{} +} + +// branchResource defines the resource implementation. +type branchResource struct { + client *planetscale.Client +} + +type branchResourceModel struct { + Organization types.String `tfsdk:"organization"` + Database types.String `tfsdk:"database"` + + Name types.String `tfsdk:"name"` + ParentBranch types.String `tfsdk:"parent_branch"` + AccessHostUrl types.String `tfsdk:"access_host_url"` + Actor types.Object `tfsdk:"actor"` + ClusterRateName types.String `tfsdk:"cluster_rate_name"` + CreatedAt types.String `tfsdk:"created_at"` + HtmlUrl types.String `tfsdk:"html_url"` + Id types.String `tfsdk:"id"` + InitialRestoreId types.String `tfsdk:"initial_restore_id"` + MysqlAddress types.String `tfsdk:"mysql_address"` + MysqlEdgeAddress types.String `tfsdk:"mysql_edge_address"` + Region types.Object `tfsdk:"region"` + Production types.Bool `tfsdk:"production"` + Ready types.Bool `tfsdk:"ready"` + RestoreChecklistCompletedAt types.String `tfsdk:"restore_checklist_completed_at"` + RestoredFromBranch types.Object `tfsdk:"restored_from_branch"` + SchemaLastUpdatedAt types.String `tfsdk:"schema_last_updated_at"` + ShardCount types.Float64 `tfsdk:"shard_count"` + Sharded types.Bool `tfsdk:"sharded"` + UpdatedAt types.String `tfsdk:"updated_at"` +} + +func branchResourceFromClient(ctx context.Context, branch *planetscale.Branch, organization, database types.String, diags diag.Diagnostics) *branchResourceModel { + if branch == nil { + return nil + } + actor, diags := types.ObjectValueFrom(ctx, actorResourceAttrTypes, branch.Actor) + diags.Append(diags...) + region, diags := types.ObjectValueFrom(ctx, regionResourceAttrTypes, branch.Region) + diags.Append(diags...) + restoredFromBranch, diags := types.ObjectValueFrom(ctx, restoredFromBranchResourceAttrTypes, branch.RestoredFromBranch) + diags.Append(diags...) + return &branchResourceModel{ + Organization: organization, + Database: database, + + Actor: actor, + Region: region, + RestoredFromBranch: restoredFromBranch, + Name: types.StringValue(branch.Name), + ParentBranch: types.StringPointerValue(branch.ParentBranch), + AccessHostUrl: types.StringPointerValue(branch.AccessHostUrl), + ClusterRateName: types.StringValue(branch.ClusterRateName), + CreatedAt: types.StringValue(branch.CreatedAt), + HtmlUrl: types.StringValue(branch.HtmlUrl), + Id: types.StringValue(branch.Id), + InitialRestoreId: types.StringPointerValue(branch.InitialRestoreId), + MysqlAddress: types.StringValue(branch.MysqlAddress), + MysqlEdgeAddress: types.StringValue(branch.MysqlEdgeAddress), + Production: types.BoolValue(branch.Production), + Ready: types.BoolValue(branch.Ready), + RestoreChecklistCompletedAt: types.StringPointerValue(branch.RestoreChecklistCompletedAt), + SchemaLastUpdatedAt: types.StringValue(branch.SchemaLastUpdatedAt), + ShardCount: types.Float64PointerValue(branch.ShardCount), + Sharded: types.BoolValue(branch.Sharded), + UpdatedAt: types.StringValue(branch.UpdatedAt), + } +} + +func (r *branchResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_branch" +} + +func (r *branchResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A PlanetScale branch.", + MarkdownDescription: "A PlanetScale branch.", + Attributes: map[string]schema.Attribute{ + "organization": schema.StringAttribute{ + Description: "The organization this branch belongs to.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "database": schema.StringAttribute{ + Description: "The database this branch belongs to.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "name": schema.StringAttribute{ + Description: "The name of the branch.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "parent_branch": schema.StringAttribute{ + Description: "The name of the parent branch from which the branch was created.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + // updatable + "production": schema.BoolAttribute{ + Description: "Whether or not the branch is a production branch.", + Computed: true, Optional: true, + }, + + // read only + "id": schema.StringAttribute{ + Description: "The ID of the branch.", + Computed: true, + }, + "access_host_url": schema.StringAttribute{ + Description: "The access host URL for the branch. This is a legacy field, use `mysql_edge_address`.", + Computed: true, + }, + "actor": schema.SingleNestedAttribute{ + Description: "The actor who created this branch.", + Computed: true, + Attributes: actorResourceSchemaAttribute, + }, + "cluster_rate_name": schema.StringAttribute{ + Description: "The SKU representing the branch's cluster size.", + Computed: true, + }, + "created_at": schema.StringAttribute{ + Description: "When the branch was created.", + Computed: true, + }, + "html_url": schema.StringAttribute{ + Description: "Planetscale app URL for the branch.", + Computed: true, + }, + "initial_restore_id": schema.StringAttribute{ + Description: "The ID of the backup from which the branch was restored.", + Computed: true, + }, + "mysql_address": schema.StringAttribute{ + Description: "The MySQL address for the branch.", + Computed: true, + }, + "mysql_edge_address": schema.StringAttribute{ + Description: "The address of the MySQL provider for the branch.", + Computed: true, + }, + "region": schema.SingleNestedAttribute{ + Description: "The region in which this branch lives.", + Computed: true, + Attributes: regionResourceSchemaAttribute, + }, + "ready": schema.BoolAttribute{ + Description: "Whether or not the branch is ready to serve queries.", + Computed: true, + }, + "restore_checklist_completed_at": schema.StringAttribute{ + Description: "When a user last marked a backup restore checklist as completed.", + Computed: true, + }, + "restored_from_branch": schema.SingleNestedAttribute{ + Description: "todo", + Computed: true, + Attributes: restoredFromBranchSchemaAttribute, + }, + "schema_last_updated_at": schema.StringAttribute{ + Description: "When the schema for the branch was last updated.", + Computed: true, + }, + "shard_count": schema.Float64Attribute{ + Description: "The number of shards in the branch.", + Computed: true, + }, + "sharded": schema.BoolAttribute{ + Description: "Whether or not the branch is sharded.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the branch was last updated.", + Computed: true, + }, + }, + } +} + +func (r *branchResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + r.client = client +} + +func (r *branchResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data *branchResourceModel + tflog.Info(ctx, "getting current branch resource from plan") + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + org := data.Organization + database := data.Database + name := data.Name + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if database.IsNull() || database.IsUnknown() || database.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("database"), "database is required", "a database must be provided and cannot be empty") + return + } + if name.IsNull() || name.IsUnknown() || name.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("name"), "name is required", "a name must be provided and cannot be empty") + return + } + + parentBranch := stringValueIfKnown(data.ParentBranch) + if parentBranch == nil { + resp.Diagnostics.AddAttributeError(path.Root("parent_branch"), "Missing parent branch", "All newly created branches require a parent branch.") + return + } + + createReq := planetscale.CreateBranchReq{ + Name: name.ValueString(), + ParentBranch: *parentBranch, + } + if !(data.RestoredFromBranch.IsNull() || data.RestoredFromBranch.IsUnknown()) { + var rfb restoredFromBranchResource + resp.Diagnostics.Append(data.RestoredFromBranch.As(ctx, &rfb, basetypes.ObjectAsOptions{})...) + if resp.Diagnostics.HasError() { + return + } + backupID := rfb.Id.String() + createReq.BackupId = &backupID + } + res, err := r.client.CreateBranch(ctx, org.ValueString(), database.ValueString(), createReq) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to create branch, got error: %s", err)) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to create branchs", "no data") + return + } + + data = branchResourceFromClient(ctx, &res.Branch, data.Organization, data.Database, resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *branchResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var data *branchResourceModel + + tflog.Info(ctx, "getting current branch resource from state") + // Read Terraform prior state data into the model + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + org := data.Organization + database := data.Database + name := data.Name + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if database.IsNull() || database.IsUnknown() || database.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("database"), "database is required", "a database must be provided and cannot be empty") + return + } + if name.IsNull() || name.IsUnknown() || name.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("name"), "name is required", "a name must be provided and cannot be empty") + return + } + + res, err := r.client.GetBranch(ctx, org.ValueString(), database.ValueString(), name.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to read branch, got error: %s", err)) + return + } + + data = branchResourceFromClient(ctx, &res.Branch, data.Organization, data.Database, resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *branchResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var ( + old *branchResourceModel + data *branchResourceModel + ) + resp.Diagnostics.Append(req.State.Get(ctx, &old)...) + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + org := data.Organization + database := data.Database + name := data.Name + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if database.IsNull() || database.IsUnknown() || database.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("database"), "database is required", "a database must be provided and cannot be empty") + return + } + if name.IsNull() || name.IsUnknown() || name.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("name"), "name is required", "a name must be provided and cannot be empty") + return + } + + productionWasChanged := false + isProduction := boolIfDifferent(old.Production, data.Production, &productionWasChanged) + var branch planetscale.Branch + if productionWasChanged { + if *isProduction { + res, err := r.client.PromoteBranch(ctx, org.ValueString(), database.ValueString(), name.ValueString()) + if err != nil { + resp.Diagnostics.AddAttributeError(path.Root("production"), "Failed to promote branch", "Unable to promote branch to production: "+err.Error()) + if resp.Diagnostics.HasError() { + return + } + } + branch = res.Branch + } else { + res, err := r.client.DemoteBranch(ctx, org.ValueString(), database.ValueString(), name.ValueString()) + if err != nil { + resp.Diagnostics.AddAttributeError(path.Root("production"), "Failed to demote branch", "Unable to demote branch from production: "+err.Error()) + if resp.Diagnostics.HasError() { + return + } + } + branch = res.Branch + } + } + data = branchResourceFromClient(ctx, &branch, data.Organization, data.Database, resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *branchResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var data *branchResourceModel + + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + org := data.Organization + database := data.Database + name := data.Name + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if database.IsNull() || database.IsUnknown() || database.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("database"), "database is required", "a database must be provided and cannot be empty") + return + } + if name.IsNull() || name.IsUnknown() || name.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("name"), "name is required", "a name must be provided and cannot be empty") + return + } + + res, err := r.client.DeleteBranch(ctx, org.ValueString(), database.ValueString(), name.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to delete branch, got error: %s", err)) + return + } + _ = res +} + +func (r *branchResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, ",") + if len(idParts) != 3 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: organization,database,name. Got: %q", req.ID), + ) + return + } + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("organization"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("database"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("name"), idParts[2])...) +} diff --git a/internal/provider/branch_schema_data_source.go b/internal/provider/branch_schema_data_source.go new file mode 100644 index 0000000..6a9d7ca --- /dev/null +++ b/internal/provider/branch_schema_data_source.go @@ -0,0 +1,108 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &branchSchemaDataSource{} + _ datasource.DataSourceWithConfigure = &branchSchemaDataSource{} +) + +func newBranchSchemaDataSource() datasource.DataSource { + return &branchSchemaDataSource{} +} + +type branchSchemaDataSource struct { + client *planetscale.Client +} + +type branchSchemaDataSourceModel struct { + Organization string `tfsdk:"organization"` + Database string `tfsdk:"database"` + Branch string `tfsdk:"branch"` + Keyspace types.String `tfsdk:"keyspace"` + + Tables []tableSchemaDataSourceModel `tfsdk:"tables"` +} + +func (d *branchSchemaDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_branch_schema" +} + +func (d *branchSchemaDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "The schema of a PlanetScale branch.", + MarkdownDescription: "The schema of a PlanetScale branch.", + Attributes: map[string]schema.Attribute{ + "organization": schema.StringAttribute{Required: true}, + "database": schema.StringAttribute{Required: true}, + "branch": schema.StringAttribute{Required: true}, + "keyspace": schema.StringAttribute{Optional: true}, + "tables": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: tableSchemaDataSourceSchemaAttribute, + }, + }, + }, + } +} + +func (d *branchSchemaDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *branchSchemaDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *branchSchemaDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + res, err := d.client.GetBranchSchema(ctx, data.Organization, data.Database, data.Branch, stringValueIfKnown(data.Keyspace)) + if err != nil { + resp.Diagnostics.AddError("Unable to read database branch schema", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to read database branch schema", "no data") + return + } + state := branchSchemaDataSourceModel{ + Organization: data.Organization, + Database: data.Database, + Branch: data.Branch, + Keyspace: data.Keyspace, + Tables: make([]tableSchemaDataSourceModel, 0, len(res.Data)), + } + for _, item := range res.Data { + item := item + state.Tables = append(state.Tables, *tableSchemaFromClient(&item)) + if resp.Diagnostics.HasError() { + return + } + } + + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/branch_schema_lint_data_source.go b/internal/provider/branch_schema_lint_data_source.go new file mode 100644 index 0000000..1e28bfe --- /dev/null +++ b/internal/provider/branch_schema_lint_data_source.go @@ -0,0 +1,108 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &branchSchemaLintDataSource{} + _ datasource.DataSourceWithConfigure = &branchSchemaLintDataSource{} +) + +func newBranchSchemaLintDataSource() datasource.DataSource { + return &branchSchemaLintDataSource{} +} + +type branchSchemaLintDataSource struct { + client *planetscale.Client +} + +type branchSchemaLintDataSourceModel struct { + Organization string `tfsdk:"organization"` + Database string `tfsdk:"database"` + Branch string `tfsdk:"branch"` + + Errors []lintErrorDataSourceModel `tfsdk:"errors"` +} + +func (d *branchSchemaLintDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_branch_schema_lint" +} + +func (d *branchSchemaLintDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Linting errors for the schema of a PlanetScale branch.", + MarkdownDescription: "Linting errors for the schema of a PlanetScale branch.", + Attributes: map[string]schema.Attribute{ + "organization": schema.StringAttribute{Required: true}, + "database": schema.StringAttribute{Required: true}, + "branch": schema.StringAttribute{Required: true}, + "errors": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: lintErrorDataSourceSchemaAttribute, + }, + }, + }, + } +} + +func (d *branchSchemaLintDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *branchSchemaLintDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + + var data *branchSchemaLintDataSourceModel + + // Read Terraform prior state data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + res200, err := d.client.LintBranchSchema(ctx, data.Organization, data.Database, data.Branch, nil, nil) + if err != nil { + resp.Diagnostics.AddError("Unable to read database branch schema", err.Error()) + return + } + if res200 == nil { + resp.Diagnostics.AddError("Unable to read database branch schema", "no data") + return + } + state := branchSchemaLintDataSourceModel{ + Organization: data.Organization, + Database: data.Database, + Branch: data.Branch, + Errors: make([]lintErrorDataSourceModel, 0, len(res200.Data)), + } + for _, item := range res200.Data { + item := item + state.Errors = append(state.Errors, *lintErrorFromClient(&item, resp.Diagnostics)) + if resp.Diagnostics.HasError() { + return + } + } + + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/branches_data_source.go b/internal/provider/branches_data_source.go new file mode 100644 index 0000000..cd19cb8 --- /dev/null +++ b/internal/provider/branches_data_source.go @@ -0,0 +1,102 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &branchesDataSource{} + _ datasource.DataSourceWithConfigure = &branchesDataSource{} +) + +func newBranchesDataSource() datasource.DataSource { + return &branchesDataSource{} +} + +type branchesDataSource struct { + client *planetscale.Client +} + +type branchesDataSourceModel struct { + Organization string `tfsdk:"organization"` + Database string `tfsdk:"database"` + Branches []branchDataSourceModel `tfsdk:"branches"` +} + +func (d *branchesDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_branches" +} + +func (d *branchesDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A list of PlanetScale branches.", + MarkdownDescription: "A list of PlanetScale branches.", + Attributes: map[string]schema.Attribute{ + "organization": schema.StringAttribute{Required: true}, + "database": schema.StringAttribute{Required: true}, + "branches": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: branchDataSourceSchemaAttribute(true), + }, + }, + }, + } +} + +func (d *branchesDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *branchesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + + var data *branchesDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + res, err := d.client.ListBranches(ctx, data.Organization, data.Database, nil, nil) + if err != nil { + resp.Diagnostics.AddError("Unable to read database branches", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to read database branches", "no data") + return + } + state := branchesDataSourceModel{ + Organization: data.Organization, + Database: data.Database, + Branches: make([]branchDataSourceModel, 0, len(res.Data)), + } + + for _, item := range res.Data { + item := item + state.Branches = append(state.Branches, *branchFromClient(&item, data.Organization, data.Database, resp.Diagnostics)) + if resp.Diagnostics.HasError() { + return + } + } + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/database_data_source.go b/internal/provider/database_data_source.go new file mode 100644 index 0000000..2ee8826 --- /dev/null +++ b/internal/provider/database_data_source.go @@ -0,0 +1,79 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &databaseDataSource{} + _ datasource.DataSourceWithConfigure = &databaseDataSource{} +) + +func newDatabaseDataSource() datasource.DataSource { + return &databaseDataSource{} +} + +type databaseDataSource struct { + client *planetscale.Client +} + +func (d *databaseDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_database" +} + +func (d *databaseDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A PlanetScale database.", + MarkdownDescription: "A PlanetScale database.", + Attributes: databaseDataSourceSchemaAttribute(false), + } +} + +func (d *databaseDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *databaseDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *databaseDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "unable to read config") + return + } + res, err := d.client.GetDatabase(ctx, data.Organization, data.Name.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Unable to read database", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Received a nil database", "") + return + } + state := databaseFromClient(&res.Database, data.Organization, resp.Diagnostics) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "unable to convert client object to tf model") + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "unable to store tf model") + return + } +} diff --git a/internal/provider/database_read_only_regions_data_source.go b/internal/provider/database_read_only_regions_data_source.go new file mode 100644 index 0000000..b9dcc8d --- /dev/null +++ b/internal/provider/database_read_only_regions_data_source.go @@ -0,0 +1,91 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &databaseReadOnlyRegionsDataSource{} + _ datasource.DataSourceWithConfigure = &databaseReadOnlyRegionsDataSource{} +) + +func newDatabaseReadOnlyRegionsDataSource() datasource.DataSource { + return &databaseReadOnlyRegionsDataSource{} +} + +type databaseReadOnlyRegionsDataSource struct { + client *planetscale.Client +} + +type readOnlyRegionsDataSourceModel struct { + Organization string `tfsdk:"organization"` + Name string `tfsdk:"name"` + Regions []readOnlyRegionDataSourceModel `tfsdk:"regions"` +} + +func (d *databaseReadOnlyRegionsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_database_read_only_regions" +} + +func (d *databaseReadOnlyRegionsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A list of PlanetScale read-only regions.", + MarkdownDescription: "A list of PlanetScale read-only regions.", + Attributes: readOnlyRegionsDataSourceSchemaAttribute, + } +} + +func (d *databaseReadOnlyRegionsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *databaseReadOnlyRegionsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *readOnlyRegionsDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + res, err := d.client.ListReadOnlyRegions(ctx, data.Organization, data.Name, nil, nil) + if err != nil { + resp.Diagnostics.AddError("Unable to list database read only regions", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Received a nil database read only regions list", "") + return + } + state := readOnlyRegionsDataSourceModel{ + Organization: data.Organization, + Name: data.Name, + Regions: make([]readOnlyRegionDataSourceModel, 0, len(res.Data)), + } + for _, item := range res.Data { + item := item + state.Regions = append(state.Regions, *readOnlyRegionFromClient(&item, resp.Diagnostics)) + if resp.Diagnostics.HasError() { + return + } + } + + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/database_regions_data_source.go b/internal/provider/database_regions_data_source.go new file mode 100644 index 0000000..13bf37f --- /dev/null +++ b/internal/provider/database_regions_data_source.go @@ -0,0 +1,120 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &databaseRegionsDataSource{} + _ datasource.DataSourceWithConfigure = &databaseRegionsDataSource{} +) + +func newDatabaseRegionsDataSource() datasource.DataSource { + return &databaseRegionsDataSource{} +} + +type databaseRegionsDataSource struct { + client *planetscale.Client +} + +type databaseRegionsDataSourceModel struct { + Organization string `tfsdk:"organization"` + Name string `tfsdk:"name"` + Regions []databaseRegionDataSourceModel `tfsdk:"regions"` +} + +type databaseRegionDataSourceModel struct { + DisplayName string `tfsdk:"display_name"` + Enabled bool `tfsdk:"enabled"` + Id string `tfsdk:"id"` + Location string `tfsdk:"location"` + Provider string `tfsdk:"provider"` + PublicIpAddresses []string `tfsdk:"public_ip_addresses"` + Slug string `tfsdk:"slug"` +} + +func (d *databaseRegionsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_database_regions" +} + +func (d *databaseRegionsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A list of PlanetScale regions.", + MarkdownDescription: "A list of PlanetScale regions.", + Attributes: map[string]schema.Attribute{ + "organization": schema.StringAttribute{ + Description: "The organization for which the regions are available.", + Required: true, + }, + "name": schema.StringAttribute{ + Description: "The database for which the regions are available.", + Required: true, + }, + "regions": schema.ListNestedAttribute{ + Description: "The list of regions available for the database.", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: regionDataSourceSchemaAttribute, + }, + }, + }, + } +} + +func (d *databaseRegionsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *databaseRegionsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *databaseRegionsDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + res, err := d.client.ListDatabaseRegions(ctx, data.Organization, data.Name, nil, nil) + if err != nil { + resp.Diagnostics.AddError("Unable to list database regions", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Received a nil database regions list", "") + return + } + state := databaseRegionsDataSourceModel{ + Organization: data.Organization, + Name: data.Name, + } + for _, region := range res.Data { + state.Regions = append(state.Regions, databaseRegionDataSourceModel{ + DisplayName: region.DisplayName, + Enabled: region.Enabled, + Id: region.Id, + Location: region.Location, + Provider: region.Provider, + PublicIpAddresses: region.PublicIpAddresses, + Slug: region.Slug, + }) + } + + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/database_resource.go b/internal/provider/database_resource.go new file mode 100644 index 0000000..ff0ad3b --- /dev/null +++ b/internal/provider/database_resource.go @@ -0,0 +1,511 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +// Ensure provider defined types fully satisfy framework interfaces. +var _ resource.Resource = &databaseResource{} +var _ resource.ResourceWithImportState = &databaseResource{} + +func newDatabaseResource() resource.Resource { + return &databaseResource{} +} + +// databaseResource defines the resource implementation. +type databaseResource struct { + client *planetscale.Client +} + +type databaseResourceModel struct { + Organization types.String `tfsdk:"organization"` + Id types.String `tfsdk:"id"` + AllowDataBranching types.Bool `tfsdk:"allow_data_branching"` + AtBackupRestoreBranchesLimit types.Bool `tfsdk:"at_backup_restore_branches_limit"` + AtDevelopmentBranchLimit types.Bool `tfsdk:"at_development_branch_limit"` + AutomaticMigrations types.Bool `tfsdk:"automatic_migrations"` + BranchesCount types.Float64 `tfsdk:"branches_count"` + BranchesUrl types.String `tfsdk:"branches_url"` + CreatedAt types.String `tfsdk:"created_at"` + DataImport types.Object `tfsdk:"data_import"` + DefaultBranch types.String `tfsdk:"default_branch"` + DefaultBranchReadOnlyRegionsCount types.Float64 `tfsdk:"default_branch_read_only_regions_count"` + DefaultBranchShardCount types.Float64 `tfsdk:"default_branch_shard_count"` + DefaultBranchTableCount types.Float64 `tfsdk:"default_branch_table_count"` + DevelopmentBranchesCount types.Float64 `tfsdk:"development_branches_count"` + HtmlUrl types.String `tfsdk:"html_url"` + InsightsRawQueries types.Bool `tfsdk:"insights_raw_queries"` + IssuesCount types.Float64 `tfsdk:"issues_count"` + MigrationFramework types.String `tfsdk:"migration_framework"` + MigrationTableName types.String `tfsdk:"migration_table_name"` + MultipleAdminsRequiredForDeletion types.Bool `tfsdk:"multiple_admins_required_for_deletion"` + Name types.String `tfsdk:"name"` + Plan types.String `tfsdk:"plan"` + ClusterSize types.String `tfsdk:"cluster_size"` + ProductionBranchWebConsole types.Bool `tfsdk:"production_branch_web_console"` + ProductionBranchesCount types.Float64 `tfsdk:"production_branches_count"` + Ready types.Bool `tfsdk:"ready"` + Region types.String `tfsdk:"region"` + RequireApprovalForDeploy types.Bool `tfsdk:"require_approval_for_deploy"` + RestrictBranchRegion types.Bool `tfsdk:"restrict_branch_region"` + SchemaLastUpdatedAt types.String `tfsdk:"schema_last_updated_at"` + Sharded types.Bool `tfsdk:"sharded"` + State types.String `tfsdk:"state"` + UpdatedAt types.String `tfsdk:"updated_at"` + Url types.String `tfsdk:"url"` +} + +func databaseResourcefromClient(ctx context.Context, database *planetscale.Database, organization, clusterSize types.String, diags diag.Diagnostics) *databaseResourceModel { + if database == nil { + return nil + } + if clusterSize.IsUnknown() { + clusterSize = types.StringNull() + } + dataImport, diags := types.ObjectValueFrom(ctx, importResourceAttrTypes, database.DataImport) + diags.Append(diags...) + return &databaseResourceModel{ + Organization: organization, + DataImport: dataImport, + Id: types.StringValue(database.Id), + AllowDataBranching: types.BoolValue(database.AllowDataBranching), + AtBackupRestoreBranchesLimit: types.BoolValue(database.AtBackupRestoreBranchesLimit), + AtDevelopmentBranchLimit: types.BoolValue(database.AtDevelopmentBranchLimit), + AutomaticMigrations: types.BoolPointerValue(database.AutomaticMigrations), + BranchesCount: types.Float64Value(database.BranchesCount), + BranchesUrl: types.StringValue(database.BranchesUrl), + CreatedAt: types.StringValue(database.CreatedAt), + DefaultBranch: types.StringValue(database.DefaultBranch), + DefaultBranchReadOnlyRegionsCount: types.Float64Value(database.DefaultBranchReadOnlyRegionsCount), + DefaultBranchShardCount: types.Float64Value(database.DefaultBranchShardCount), + DefaultBranchTableCount: types.Float64Value(database.DefaultBranchTableCount), + DevelopmentBranchesCount: types.Float64Value(database.DevelopmentBranchesCount), + HtmlUrl: types.StringValue(database.HtmlUrl), + InsightsRawQueries: types.BoolValue(database.InsightsRawQueries), + IssuesCount: types.Float64Value(database.IssuesCount), + MigrationFramework: types.StringPointerValue(database.MigrationFramework), + MigrationTableName: types.StringPointerValue(database.MigrationTableName), + MultipleAdminsRequiredForDeletion: types.BoolValue(database.MultipleAdminsRequiredForDeletion), + Name: types.StringValue(database.Name), + Plan: types.StringValue(database.Plan), + ClusterSize: clusterSize, + ProductionBranchWebConsole: types.BoolValue(database.ProductionBranchWebConsole), + ProductionBranchesCount: types.Float64Value(database.ProductionBranchesCount), + Ready: types.BoolValue(database.Ready), + Region: types.StringValue(database.Region.Slug), + RequireApprovalForDeploy: types.BoolValue(database.RequireApprovalForDeploy), + RestrictBranchRegion: types.BoolValue(database.RestrictBranchRegion), + SchemaLastUpdatedAt: types.StringPointerValue(database.SchemaLastUpdatedAt), + Sharded: types.BoolValue(database.Sharded), + State: types.StringValue(database.State), + UpdatedAt: types.StringValue(database.UpdatedAt), + Url: types.StringValue(database.Url), + } +} + +func (r *databaseResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_database" +} + +func (r *databaseResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A PlanetScale database.", + MarkdownDescription: "A PlanetScale database.", + Attributes: map[string]schema.Attribute{ + "organization": schema.StringAttribute{ + Description: "The organization this database belongs to.", + Required: true, PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "name": schema.StringAttribute{ + Description: "The name of this database.", + Required: true, PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "id": schema.StringAttribute{ + Description: "The ID of the database.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "allow_data_branching": schema.BoolAttribute{ + Description: "Whether seeding branches with data is enabled for all branches.", + Computed: true, Optional: true, + }, + "at_backup_restore_branches_limit": schema.BoolAttribute{ + Description: "If the database has reached its backup restored branch limit.", + Computed: true, + }, + "at_development_branch_limit": schema.BoolAttribute{ + Description: "If the database has reached its development branch limit.", + Computed: true, + }, + "automatic_migrations": schema.BoolAttribute{ + Description: "Whether to automatically manage Rails migrations during deploy requests.", + Computed: true, Optional: true, + }, + "branches_count": schema.Float64Attribute{ + Description: "The total number of database branches.", + Computed: true, + }, + "branches_url": schema.StringAttribute{ + Description: "The URL to retrieve this database's branches via the API.", + Computed: true, + }, + "created_at": schema.StringAttribute{ + Description: "When the database was created.", + Computed: true, + }, + "data_import": schema.SingleNestedAttribute{ + Description: "If the database was created from an import, describes the import process.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "data_source": schema.SingleNestedAttribute{ + Description: "Connection information for the source of the data for the import.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "database": schema.StringAttribute{ + Description: "The name of the database imported from.", + Required: true, + }, + "hostname": schema.StringAttribute{ + Description: "The hostname where the database lives.", + Required: true, + }, + "port": schema.StringAttribute{ + Description: "The port on which the database listens on the host.", + Required: true, + }, + }, + }, + "finished_at": schema.StringAttribute{ + Description: "When the import finished.", + Computed: true, + }, + "import_check_errors": schema.StringAttribute{ + Description: "Errors encountered while preparing the import.", + Computed: true, + }, + "started_at": schema.StringAttribute{ + Description: "When the import started.", + Computed: true, + }, + "state": schema.StringAttribute{ + Description: "The state of the import, one of: pending, queued, in_progress, complete, cancelled, error.", + Computed: true, + }, + }, + }, + "default_branch": schema.StringAttribute{ + Description: "The default branch for the database.", + Computed: true, Optional: true, + }, + "default_branch_read_only_regions_count": schema.Float64Attribute{ + Description: "Number of read only regions in the default branch.", + Computed: true, + }, + "default_branch_shard_count": schema.Float64Attribute{ + Description: "Number of shards in the default branch.", + Computed: true, + }, + "default_branch_table_count": schema.Float64Attribute{ + Description: "Number of tables in the default branch schema.", + Computed: true, + }, + "development_branches_count": schema.Float64Attribute{ + Description: "The total number of database development branches.", + Computed: true, + }, + "html_url": schema.StringAttribute{ + Description: "The total number of database development branches.", + Computed: true, + }, + "insights_raw_queries": schema.BoolAttribute{ + Description: "The URL to see this database's branches in the web UI.", + Computed: true, Optional: true, + }, + "issues_count": schema.Float64Attribute{ + Description: "The total number of ongoing issues within a database.", + Computed: true, Optional: true, + }, + "migration_framework": schema.StringAttribute{ + Description: "Framework used for applying migrations.", + Computed: true, Optional: true, + }, + "migration_table_name": schema.StringAttribute{ + Description: "Table name to use for copying schema migration data.", + Computed: true, Optional: true, + }, + "multiple_admins_required_for_deletion": schema.BoolAttribute{ + Description: "If the database requires multiple admins for deletion.", + Computed: true, Optional: true, + }, + "plan": schema.StringAttribute{ + Description: "The database plan.", + Computed: true, Optional: true, + }, + "cluster_size": schema.StringAttribute{ + Description: "The size of the database cluster plan.", + Computed: true, Optional: true, + }, + "production_branch_web_console": schema.BoolAttribute{ + Description: "Whether web console is enabled for production branches.", + Computed: true, Optional: true, + }, + "production_branches_count": schema.Float64Attribute{ + Description: "The total number of database production branches.", + Computed: true, + }, + "ready": schema.BoolAttribute{ + Description: "If the database is ready to be used.", + Computed: true, + }, + "region": schema.StringAttribute{ + Description: "The region the database lives in.", + Computed: true, Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplaceIfConfigured(), + }, + }, + "require_approval_for_deploy": schema.BoolAttribute{ + Description: "Whether an approval is required to deploy schema changes to this database.", + Computed: true, Optional: true, + }, + "restrict_branch_region": schema.BoolAttribute{ + Description: "Whether to restrict branch creation to one region.", + Computed: true, Optional: true, + }, + "schema_last_updated_at": schema.StringAttribute{ + Description: "When the default branch schema was last changed.", + Computed: true, + }, + "sharded": schema.BoolAttribute{ + Description: "If the database is sharded.", + Computed: true, + }, + "state": schema.StringAttribute{ + Description: "State of the database.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the database was last updated.", + Computed: true, + }, + "url": schema.StringAttribute{ + Description: "The URL to the database API endpoint.", + Computed: true, + }, + }, + } +} + +func (r *databaseResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + r.client = client +} + +func (r *databaseResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data *databaseResourceModel + tflog.Info(ctx, "getting current database resource from plan") + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + org := data.Organization + name := data.Name + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if name.IsNull() || name.IsUnknown() || name.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("name"), "name is required", "a name must be provided and cannot be empty") + return + } + + createDbReq := planetscale.CreateDatabaseReq{ + Name: name.ValueString(), + Plan: stringValueIfKnown(data.Plan), + ClusterSize: stringValueIfKnown(data.ClusterSize), + Region: stringValueIfKnown(data.Region), + } + res, err := r.client.CreateDatabase(ctx, org.ValueString(), createDbReq) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to create database, got error: %s", err)) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to create databases", "no data") + return + } + data = databaseResourcefromClient(ctx, &res.Database, data.Organization, data.ClusterSize, resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *databaseResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var data *databaseResourceModel + + tflog.Info(ctx, "getting current database resource from state") + // Read Terraform prior state data into the model + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + org := data.Organization + name := data.Name + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if name.IsNull() || name.IsUnknown() || name.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("name"), "name is required", "a name must be provided and cannot be empty") + return + } + + res, err := r.client.GetDatabase(ctx, org.ValueString(), name.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to read database, got error: %s", err)) + return + } + + data = databaseResourcefromClient(ctx, &res.Database, data.Organization, data.ClusterSize, resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + // Save updated data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *databaseResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var ( + old *databaseResourceModel + data *databaseResourceModel + ) + resp.Diagnostics.Append(req.State.Get(ctx, &old)...) + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + org := data.Organization + name := data.Name + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if name.IsNull() || name.IsUnknown() || name.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("name"), "name is required", "a name must be provided and cannot be empty") + return + } + + changedUpdatableSettings := false + updateReq := planetscale.UpdateDatabaseSettingsReq{ + AllowDataBranching: boolIfDifferent(old.AllowDataBranching, data.AllowDataBranching, &changedUpdatableSettings), + AutomaticMigrations: boolIfDifferent(old.AutomaticMigrations, data.AutomaticMigrations, &changedUpdatableSettings), + DefaultBranch: stringIfDifferent(old.DefaultBranch, data.DefaultBranch, &changedUpdatableSettings), + InsightsRawQueries: boolIfDifferent(old.InsightsRawQueries, data.InsightsRawQueries, &changedUpdatableSettings), + MigrationFramework: stringIfDifferent(old.MigrationFramework, data.MigrationFramework, &changedUpdatableSettings), + MigrationTableName: stringIfDifferent(old.MigrationTableName, data.MigrationTableName, &changedUpdatableSettings), + ProductionBranchWebConsole: boolIfDifferent(old.ProductionBranchWebConsole, data.ProductionBranchWebConsole, &changedUpdatableSettings), + RequireApprovalForDeploy: boolIfDifferent(old.RequireApprovalForDeploy, data.RequireApprovalForDeploy, &changedUpdatableSettings), + RestrictBranchRegion: boolIfDifferent(old.RestrictBranchRegion, data.RestrictBranchRegion, &changedUpdatableSettings), + } + + if changedUpdatableSettings { + res, err := r.client.UpdateDatabaseSettings(ctx, org.ValueString(), name.ValueString(), updateReq) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to update database settings, got error: %s", err)) + return + } + data = databaseResourcefromClient(ctx, &res.Database, data.Organization, data.ClusterSize, resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *databaseResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var data *databaseResourceModel + + // Read Terraform prior state data into the model + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + org := data.Organization + name := data.Name + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if name.IsNull() || name.IsUnknown() || name.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("name"), "name is required", "a name must be provided and cannot be empty") + return + } + + res, err := r.client.DeleteDatabase(ctx, org.ValueString(), name.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to delete database, got error: %s", err)) + return + } + _ = res +} + +func (r *databaseResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, ",") + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: organization,name. Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("organization"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("name"), idParts[1])...) + +} diff --git a/internal/provider/databases_data_source.go b/internal/provider/databases_data_source.go new file mode 100644 index 0000000..6dd87bd --- /dev/null +++ b/internal/provider/databases_data_source.go @@ -0,0 +1,97 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &databasesDataSource{} + _ datasource.DataSourceWithConfigure = &databasesDataSource{} +) + +func newDatabasesDataSource() datasource.DataSource { + return &databasesDataSource{} +} + +type databasesDataSource struct { + client *planetscale.Client +} + +type databasesDataSourceModel struct { + Organization string `tfsdk:"organization"` + Databases []databaseDataSourceModel `tfsdk:"databases"` +} + +func (d *databasesDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_databases" +} + +func (d *databasesDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A list of PlanetScale databases.", + MarkdownDescription: "A list of PlanetScale databases.", + Attributes: map[string]schema.Attribute{ + "organization": schema.StringAttribute{Required: true}, + "databases": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: databaseDataSourceSchemaAttribute(true), + }, + }, + }, + } +} + +func (d *databasesDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *databasesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *databasesDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + orgName := data.Organization + res, err := d.client.ListDatabases(ctx, orgName, nil, nil) + if err != nil { + resp.Diagnostics.AddError("Unable to read databases", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to read databases", "no data") + return + } + state := databasesDataSourceModel{ + Organization: data.Organization, + Databases: make([]databaseDataSourceModel, 0, len(res.Data)), + } + for _, item := range res.Data { + item := item + state.Databases = append(state.Databases, *databaseFromClient(&item, orgName, resp.Diagnostics)) + if resp.Diagnostics.HasError() { + return + } + } + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/models_data_source.go b/internal/provider/models_data_source.go new file mode 100644 index 0000000..d97c121 --- /dev/null +++ b/internal/provider/models_data_source.go @@ -0,0 +1,1431 @@ +package provider + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +func organizationDataSourceSchemaAttribute(computedName bool) map[string]schema.Attribute { + return map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Description: "The name of the organization.", + Required: !computedName, Computed: computedName, + }, + "admin_only_production_access": schema.BoolAttribute{ + Description: "Whether or not only administrators can access production branches in the organization.", + Computed: true, + }, + "billing_email": schema.StringAttribute{ + Description: "The billing email of the organization.", + Computed: true, + }, + "can_create_databases": schema.BoolAttribute{ + Description: "Whether or not more databases can be created in the organization.", + Computed: true, + }, + "created_at": schema.StringAttribute{ + Description: "When the organization was created.", + Computed: true, + }, + "database_count": schema.Float64Attribute{ + Description: "The number of databases in the organization.", + Computed: true, + }, + "features": schema.SingleNestedAttribute{ + Description: "Features that are enabled on the organization.", + Computed: true, + Attributes: featuresDataSourceSchemaAttribute, + }, + "flags": schema.SingleNestedAttribute{ + Description: ".", + Computed: true, + Attributes: flagsDataSourceSchemaAttribute, + }, + "free_databases_remaining": schema.Float64Attribute{ + Description: "The number of remaining free databases that can be created in the organization.", + Computed: true, + }, + "has_past_due_invoices": schema.BoolAttribute{ + Description: "Whether or not the organization has past due billing invoices.", + Computed: true, + }, + "id": schema.StringAttribute{ + Description: "The ID for the organization.", + Computed: true, + }, + "idp_managed_roles": schema.BoolAttribute{ + Description: "Whether or not the IdP provider is be responsible for managing roles in PlanetScale.", + Computed: true, + }, + "plan": schema.StringAttribute{ + Description: "The billing plan of the organization.", + Computed: true, + }, + "single_tenancy": schema.BoolAttribute{ + Description: "Whether or not the organization has single tenancy enabled.", + Computed: true, + }, + "sleeping_database_count": schema.Float64Attribute{ + Description: "The number of sleeping databases in the organization.", + Computed: true, + }, + "sso": schema.BoolAttribute{ + Description: "Whether or not SSO is enabled on the organization.", + Computed: true, + }, + "sso_directory": schema.BoolAttribute{ + Description: "Whether or not the organization uses a WorkOS directory.", + Computed: true, + }, + "sso_portal_url": schema.StringAttribute{ + Description: "The URL of the organization's SSO portal.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the organization was last updated.", + Computed: true, + }, + "valid_billing_info": schema.BoolAttribute{ + Description: "Whether or not the organization's billing information is valid.", + Computed: true, + }, + } +} + +type organizationDataSourceModel struct { + AdminOnlyProductionAccess types.Bool `tfsdk:"admin_only_production_access"` + BillingEmail types.String `tfsdk:"billing_email"` + CanCreateDatabases types.Bool `tfsdk:"can_create_databases"` + CreatedAt types.String `tfsdk:"created_at"` + DatabaseCount types.Float64 `tfsdk:"database_count"` + Features *featuresDataSourceModel `tfsdk:"features"` + Flags *flagsDataSourceModel `tfsdk:"flags"` + FreeDatabasesRemaining types.Float64 `tfsdk:"free_databases_remaining"` + HasPastDueInvoices types.Bool `tfsdk:"has_past_due_invoices"` + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Plan types.String `tfsdk:"plan"` + SingleTenancy types.Bool `tfsdk:"single_tenancy"` + SleepingDatabaseCount types.Float64 `tfsdk:"sleeping_database_count"` + Sso types.Bool `tfsdk:"sso"` + SsoDirectory types.Bool `tfsdk:"sso_directory"` + SsoPortalUrl types.String `tfsdk:"sso_portal_url"` + UpdatedAt types.String `tfsdk:"updated_at"` + ValidBillingInfo types.Bool `tfsdk:"valid_billing_info"` + IdpManagedRoles types.Bool `tfsdk:"idp_managed_roles"` +} + +func organizationFromClient(org *planetscale.Organization) *organizationDataSourceModel { + if org == nil { + return nil + } + return &organizationDataSourceModel{ + Features: featuresFromClient(org.Features), + Flags: flagsFromClient(org.Flags), + AdminOnlyProductionAccess: types.BoolValue(org.AdminOnlyProductionAccess), + BillingEmail: types.StringPointerValue(org.BillingEmail), + CanCreateDatabases: types.BoolValue(org.CanCreateDatabases), + CreatedAt: types.StringValue(org.CreatedAt), + DatabaseCount: types.Float64Value(org.DatabaseCount), + FreeDatabasesRemaining: types.Float64Value(org.FreeDatabasesRemaining), + HasPastDueInvoices: types.BoolValue(org.HasPastDueInvoices), + Id: types.StringValue(org.Id), + Name: types.StringValue(org.Name), + Plan: types.StringValue(org.Plan), + SingleTenancy: types.BoolValue(org.SingleTenancy), + SleepingDatabaseCount: types.Float64Value(org.SleepingDatabaseCount), + Sso: types.BoolValue(org.Sso), + SsoDirectory: types.BoolValue(org.SsoDirectory), + SsoPortalUrl: types.StringPointerValue(org.SsoPortalUrl), + UpdatedAt: types.StringValue(org.UpdatedAt), + ValidBillingInfo: types.BoolValue(org.ValidBillingInfo), + IdpManagedRoles: types.BoolValue(org.IdpManagedRoles), + } +} + +var featuresDataSourceSchemaAttribute = map[string]schema.Attribute{ + "insights": schema.BoolAttribute{Computed: true}, + "single_tenancy": schema.BoolAttribute{Computed: true}, + "sso": schema.BoolAttribute{Computed: true}, +} + +type featuresDataSourceModel struct { + Insights types.Bool `tfsdk:"insights"` + SingleTenancy types.Bool `tfsdk:"single_tenancy"` + Sso types.Bool `tfsdk:"sso"` +} + +func featuresFromClient(features *planetscale.Features) *featuresDataSourceModel { + if features == nil { + return nil + } + return &featuresDataSourceModel{ + Insights: types.BoolPointerValue(features.Insights), + SingleTenancy: types.BoolPointerValue(features.SingleTenancy), + Sso: types.BoolPointerValue(features.Sso), + } +} + +var flagsDataSourceSchemaAttribute = map[string]schema.Attribute{ + "example_flag": schema.StringAttribute{Computed: true}, +} + +type flagsDataSourceModel struct { + ExampleFlag types.String `tfsdk:"example_flag"` +} + +func flagsFromClient(flags *planetscale.Flags) *flagsDataSourceModel { + if flags == nil { + return nil + } + return &flagsDataSourceModel{ + ExampleFlag: types.StringPointerValue(flags.ExampleFlag), + } +} + +type dataSourceDataSourceModel struct { + Database types.String `tfsdk:"database"` + Hostname types.String `tfsdk:"hostname"` + Port types.String `tfsdk:"port"` +} + +func dataSourceFromClient(dataSource planetscale.DataSource) dataSourceDataSourceModel { + return dataSourceDataSourceModel{ + Database: types.StringValue(dataSource.Database), + Hostname: types.StringValue(dataSource.Hostname), + Port: types.StringValue(dataSource.Port), + } +} + +type dataImportDataSourceModel struct { + DataSource dataSourceDataSourceModel `tfsdk:"data_source"` + FinishedAt types.String `tfsdk:"finished_at"` + ImportCheckErrors types.String `tfsdk:"import_check_errors"` + StartedAt types.String `tfsdk:"started_at"` + State types.String `tfsdk:"state"` +} + +func dataImportFromClient(dataImport *planetscale.DataImport) *dataImportDataSourceModel { + if dataImport == nil { + return nil + } + return &dataImportDataSourceModel{ + DataSource: dataSourceFromClient(dataImport.DataSource), + FinishedAt: types.StringValue(dataImport.FinishedAt), + ImportCheckErrors: types.StringValue(dataImport.ImportCheckErrors), + StartedAt: types.StringValue(dataImport.StartedAt), + State: types.StringValue(dataImport.State), + } +} + +func databaseDataSourceSchemaAttribute(computedName bool) map[string]schema.Attribute { + return map[string]schema.Attribute{ + "organization": schema.StringAttribute{ + Description: "The organization this database belongs to.", + Required: !computedName, Computed: computedName, + }, + "name": schema.StringAttribute{ + Description: "The name of this database.", + Required: !computedName, Computed: computedName, + }, + "id": schema.StringAttribute{ + Description: "The ID of the database.", + Computed: true, + }, + "allow_data_branching": schema.BoolAttribute{ + Description: "Whether seeding branches with data is enabled for all branches.", + Computed: true, Optional: true, + }, + "at_backup_restore_branches_limit": schema.BoolAttribute{ + Description: "If the database has reached its backup restored branch limit.", + Computed: true, + }, + "at_development_branch_limit": schema.BoolAttribute{ + Description: "If the database has reached its development branch limit.", + Computed: true, + }, + "automatic_migrations": schema.BoolAttribute{ + Description: "Whether to automatically manage Rails migrations during deploy requests.", + Computed: true, Optional: true, + }, + "branches_count": schema.Float64Attribute{ + Description: "The total number of database branches.", + Computed: true, + }, + "branches_url": schema.StringAttribute{ + Description: "The URL to retrieve this database's branches via the API.", + Computed: true, + }, + "created_at": schema.StringAttribute{ + Description: "When the database was created.", + Computed: true, + }, + "data_import": schema.SingleNestedAttribute{ + Description: "If the database was created from an import, describes the import process.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "data_source": schema.SingleNestedAttribute{ + Description: "Connection information for the source of the data for the import.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "database": schema.StringAttribute{ + Description: "The name of the database imported from.", + Required: true, + }, + "hostname": schema.StringAttribute{ + Description: "The hostname where the database lives.", + Required: true, + }, + "port": schema.StringAttribute{ + Description: "The port on which the database listens on the host.", + Required: true, + }, + }, + }, + "finished_at": schema.StringAttribute{ + Description: "When the import finished.", + Computed: true, + }, + "import_check_errors": schema.StringAttribute{ + Description: "Errors encountered while preparing the import.", + Computed: true, + }, + "started_at": schema.StringAttribute{ + Description: "When the import started.", + Computed: true, + }, + "state": schema.StringAttribute{ + Description: "The state of the import, one of: pending, queued, in_progress, complete, cancelled, error.", + Computed: true, + }, + }, + }, + "default_branch": schema.StringAttribute{ + Description: "The default branch for the database.", + Computed: true, Optional: true, + }, + "default_branch_read_only_regions_count": schema.Float64Attribute{ + Description: "Number of read only regions in the default branch.", + Computed: true, + }, + "default_branch_shard_count": schema.Float64Attribute{ + Description: "Number of shards in the default branch.", + Computed: true, + }, + "default_branch_table_count": schema.Float64Attribute{ + Description: "Number of tables in the default branch schema.", + Computed: true, + }, + "development_branches_count": schema.Float64Attribute{ + Description: "The total number of database development branches.", + Computed: true, + }, + "html_url": schema.StringAttribute{ + Description: "The total number of database development branches.", + Computed: true, + }, + "insights_raw_queries": schema.BoolAttribute{ + Description: "The URL to see this database's branches in the web UI.", + Computed: true, Optional: true, + }, + "issues_count": schema.Float64Attribute{ + Description: "The total number of ongoing issues within a database.", + Computed: true, Optional: true, + }, + "migration_framework": schema.StringAttribute{ + Description: "Framework used for applying migrations.", + Computed: true, Optional: true, + }, + "migration_table_name": schema.StringAttribute{ + Description: "Table name to use for copying schema migration data.", + Computed: true, Optional: true, + }, + "multiple_admins_required_for_deletion": schema.BoolAttribute{ + Description: "If the database requires multiple admins for deletion.", + Computed: true, Optional: true, + }, + "plan": schema.StringAttribute{ + Description: "The database plan.", + Computed: true, Optional: true, + }, + "production_branch_web_console": schema.BoolAttribute{ + Description: "Whether web console is enabled for production branches.", + Computed: true, Optional: true, + }, + "production_branches_count": schema.Float64Attribute{ + Description: "The total number of database production branches.", + Computed: true, + }, + "ready": schema.BoolAttribute{ + Description: "If the database is ready to be used.", + Computed: true, + }, + "region": schema.SingleNestedAttribute{ + Description: "The region the database lives in.", + Computed: true, Optional: true, + Attributes: regionDataSourceSchemaAttribute, + }, + "require_approval_for_deploy": schema.BoolAttribute{ + Description: "Whether an approval is required to deploy schema changes to this database.", + Computed: true, Optional: true, + }, + "restrict_branch_region": schema.BoolAttribute{ + Description: "Whether to restrict branch creation to one region.", + Computed: true, Optional: true, + }, + "schema_last_updated_at": schema.StringAttribute{ + Description: "When the default branch schema was last changed.", + Computed: true, + }, + "sharded": schema.BoolAttribute{ + Description: "If the database is sharded.", + Computed: true, + }, + "state": schema.StringAttribute{ + Description: "State of the database.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the database was last updated.", + Computed: true, + }, + "url": schema.StringAttribute{ + Description: "The URL to the database API endpoint.", + Computed: true, + }, + } +} + +type databaseDataSourceModel struct { + Organization string `tfsdk:"organization"` + AllowDataBranching types.Bool `tfsdk:"allow_data_branching"` + AtBackupRestoreBranchesLimit types.Bool `tfsdk:"at_backup_restore_branches_limit"` + AtDevelopmentBranchLimit types.Bool `tfsdk:"at_development_branch_limit"` + AutomaticMigrations types.Bool `tfsdk:"automatic_migrations"` + BranchesCount types.Float64 `tfsdk:"branches_count"` + BranchesUrl types.String `tfsdk:"branches_url"` + CreatedAt types.String `tfsdk:"created_at"` + DataImport *dataImportDataSourceModel `tfsdk:"data_import"` + DefaultBranch types.String `tfsdk:"default_branch"` + DefaultBranchReadOnlyRegionsCount types.Float64 `tfsdk:"default_branch_read_only_regions_count"` + DefaultBranchShardCount types.Float64 `tfsdk:"default_branch_shard_count"` + DefaultBranchTableCount types.Float64 `tfsdk:"default_branch_table_count"` + DevelopmentBranchesCount types.Float64 `tfsdk:"development_branches_count"` + HtmlUrl types.String `tfsdk:"html_url"` + Id types.String `tfsdk:"id"` + InsightsRawQueries types.Bool `tfsdk:"insights_raw_queries"` + IssuesCount types.Float64 `tfsdk:"issues_count"` + MigrationFramework types.String `tfsdk:"migration_framework"` + MigrationTableName types.String `tfsdk:"migration_table_name"` + MultipleAdminsRequiredForDeletion types.Bool `tfsdk:"multiple_admins_required_for_deletion"` + Name types.String `tfsdk:"name"` + Plan types.String `tfsdk:"plan"` + ProductionBranchWebConsole types.Bool `tfsdk:"production_branch_web_console"` + ProductionBranchesCount types.Float64 `tfsdk:"production_branches_count"` + Ready types.Bool `tfsdk:"ready"` + Region *regionDataSourceModel `tfsdk:"region"` + RequireApprovalForDeploy types.Bool `tfsdk:"require_approval_for_deploy"` + RestrictBranchRegion types.Bool `tfsdk:"restrict_branch_region"` + SchemaLastUpdatedAt types.String `tfsdk:"schema_last_updated_at"` + Sharded types.Bool `tfsdk:"sharded"` + State types.String `tfsdk:"state"` + UpdatedAt types.String `tfsdk:"updated_at"` + Url types.String `tfsdk:"url"` +} + +func databaseFromClient(database *planetscale.Database, orgName string, diags diag.Diagnostics) *databaseDataSourceModel { + if database == nil { + return nil + } + return &databaseDataSourceModel{ + Organization: orgName, + DataImport: dataImportFromClient(database.DataImport), + Region: regionFromClient(&database.Region, diags), + AllowDataBranching: types.BoolValue(database.AllowDataBranching), + AtBackupRestoreBranchesLimit: types.BoolValue(database.AtBackupRestoreBranchesLimit), + AtDevelopmentBranchLimit: types.BoolValue(database.AtDevelopmentBranchLimit), + AutomaticMigrations: types.BoolPointerValue(database.AutomaticMigrations), + BranchesCount: types.Float64Value(database.BranchesCount), + BranchesUrl: types.StringValue(database.BranchesUrl), + CreatedAt: types.StringValue(database.CreatedAt), + DefaultBranch: types.StringValue(database.DefaultBranch), + DefaultBranchReadOnlyRegionsCount: types.Float64Value(database.DefaultBranchReadOnlyRegionsCount), + DefaultBranchShardCount: types.Float64Value(database.DefaultBranchShardCount), + DefaultBranchTableCount: types.Float64Value(database.DefaultBranchTableCount), + DevelopmentBranchesCount: types.Float64Value(database.DevelopmentBranchesCount), + HtmlUrl: types.StringValue(database.HtmlUrl), + Id: types.StringValue(database.Id), + InsightsRawQueries: types.BoolValue(database.InsightsRawQueries), + IssuesCount: types.Float64Value(database.IssuesCount), + MigrationFramework: types.StringPointerValue(database.MigrationFramework), + MigrationTableName: types.StringPointerValue(database.MigrationTableName), + MultipleAdminsRequiredForDeletion: types.BoolValue(database.MultipleAdminsRequiredForDeletion), + Name: types.StringValue(database.Name), + Plan: types.StringValue(database.Plan), + ProductionBranchWebConsole: types.BoolValue(database.ProductionBranchWebConsole), + ProductionBranchesCount: types.Float64Value(database.ProductionBranchesCount), + Ready: types.BoolValue(database.Ready), + RequireApprovalForDeploy: types.BoolValue(database.RequireApprovalForDeploy), + RestrictBranchRegion: types.BoolValue(database.RestrictBranchRegion), + SchemaLastUpdatedAt: types.StringPointerValue(database.SchemaLastUpdatedAt), + Sharded: types.BoolValue(database.Sharded), + State: types.StringValue(database.State), + UpdatedAt: types.StringValue(database.UpdatedAt), + Url: types.StringValue(database.Url), + } +} + +func branchDataSourceSchemaAttribute(computedName bool) map[string]schema.Attribute { + return map[string]schema.Attribute{ + "organization": schema.StringAttribute{ + Description: "The organization this branch belongs to.", + Required: !computedName, Computed: computedName, + }, + "database": schema.StringAttribute{ + Description: "The database this branch belongs to.", + Required: !computedName, Computed: computedName, + }, + "name": schema.StringAttribute{ + Description: "The name of the branch.", + Required: !computedName, Computed: computedName, + }, + + "access_host_url": schema.StringAttribute{ + Description: "The access host URL for the branch. This is a legacy field, use `mysql_edge_address`.", + Computed: true, + }, + "cluster_rate_name": schema.StringAttribute{ + Description: "The SKU representing the branch's cluster size.", + Computed: true, + }, + "created_at": schema.StringAttribute{ + Description: "When the branch was created.", + Computed: true, + }, + "html_url": schema.StringAttribute{ + Description: "Planetscale app URL for the branch.", + Computed: true, + }, + "id": schema.StringAttribute{ + Description: "The ID of the branch.", + Computed: true, + }, + "initial_restore_id": schema.StringAttribute{ + Description: "The ID of the backup from which the branch was restored.", + Computed: true, + }, + "mysql_address": schema.StringAttribute{ + Description: "The MySQL address for the branch.", + Computed: true, + }, + "mysql_edge_address": schema.StringAttribute{ + Description: "The address of the MySQL provider for the branch.", + Computed: true, + }, + "parent_branch": schema.StringAttribute{ + Description: "The name of the parent branch from which the branch was created.", + Computed: true, + }, + "production": schema.BoolAttribute{ + Description: "Whether or not the branch is a production branch.", + Computed: true, + }, + "ready": schema.BoolAttribute{ + Description: "Whether or not the branch is ready to serve queries.", + Computed: true, + }, + "restore_checklist_completed_at": schema.StringAttribute{ + Description: "When a user last marked a backup restore checklist as completed.", + Computed: true, + }, + "schema_last_updated_at": schema.StringAttribute{ + Description: "When the schema for the branch was last updated.", + Computed: true, + }, + "shard_count": schema.Float64Attribute{ + Description: "The number of shards in the branch.", + Computed: true, + }, + "sharded": schema.BoolAttribute{ + Description: "Whether or not the branch is sharded.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the branch was last updated.", + Computed: true, + }, + + "actor": schema.SingleNestedAttribute{ + Description: "The actor who created this branch.", + Computed: true, + Attributes: actorDataSourceSchemaAttribute, + }, + "region": schema.SingleNestedAttribute{ + Description: "The region in which this branch lives.", + Computed: true, + Attributes: regionDataSourceSchemaAttribute, + }, + "restored_from_branch": schema.SingleNestedAttribute{ + Description: "", + Computed: true, + Attributes: restoredFromBranchDataSourceSchemaAttribute, + }, + } +} + +type branchDataSourceModel struct { + Organization types.String `tfsdk:"organization"` + Database types.String `tfsdk:"database"` + Name types.String `tfsdk:"name"` + AccessHostUrl types.String `tfsdk:"access_host_url"` + Actor *actorDataSourceModel `tfsdk:"actor"` + ClusterRateName types.String `tfsdk:"cluster_rate_name"` + CreatedAt types.String `tfsdk:"created_at"` + HtmlUrl types.String `tfsdk:"html_url"` + Id types.String `tfsdk:"id"` + InitialRestoreId types.String `tfsdk:"initial_restore_id"` + MysqlAddress types.String `tfsdk:"mysql_address"` + MysqlEdgeAddress types.String `tfsdk:"mysql_edge_address"` + ParentBranch types.String `tfsdk:"parent_branch"` + Production types.Bool `tfsdk:"production"` + Ready types.Bool `tfsdk:"ready"` + Region *regionDataSourceModel `tfsdk:"region"` + RestoreChecklistCompletedAt types.String `tfsdk:"restore_checklist_completed_at"` + RestoredFromBranch *restoredFromBranchDataSourceModel `tfsdk:"restored_from_branch"` + SchemaLastUpdatedAt types.String `tfsdk:"schema_last_updated_at"` + ShardCount types.Float64 `tfsdk:"shard_count"` + Sharded types.Bool `tfsdk:"sharded"` + UpdatedAt types.String `tfsdk:"updated_at"` +} + +func branchFromClient(branch *planetscale.Branch, organization, database string, diags diag.Diagnostics) *branchDataSourceModel { + if branch == nil { + return nil + } + return &branchDataSourceModel{ + Organization: types.StringValue(organization), + Database: types.StringValue(database), + Actor: actorFromClient(branch.Actor), + Region: regionFromClient(branch.Region, diags), + RestoredFromBranch: restoredFromBranchFromClient(branch.RestoredFromBranch), + Name: types.StringValue(branch.Name), + AccessHostUrl: types.StringPointerValue(branch.AccessHostUrl), + ClusterRateName: types.StringValue(branch.ClusterRateName), + CreatedAt: types.StringValue(branch.CreatedAt), + HtmlUrl: types.StringValue(branch.HtmlUrl), + Id: types.StringValue(branch.Id), + InitialRestoreId: types.StringPointerValue(branch.InitialRestoreId), + MysqlAddress: types.StringValue(branch.MysqlAddress), + MysqlEdgeAddress: types.StringValue(branch.MysqlEdgeAddress), + ParentBranch: types.StringPointerValue(branch.ParentBranch), + Production: types.BoolValue(branch.Production), + Ready: types.BoolValue(branch.Ready), + RestoreChecklistCompletedAt: types.StringPointerValue(branch.RestoreChecklistCompletedAt), + SchemaLastUpdatedAt: types.StringValue(branch.SchemaLastUpdatedAt), + ShardCount: types.Float64PointerValue(branch.ShardCount), + Sharded: types.BoolValue(branch.Sharded), + UpdatedAt: types.StringValue(branch.UpdatedAt), + } +} + +var actorDataSourceSchemaAttribute = map[string]schema.Attribute{ + "avatar_url": schema.StringAttribute{ + Computed: true, Description: "The URL of the actor's avatar", + }, + "display_name": schema.StringAttribute{ + Computed: true, Description: "The name of the actor", + }, + "id": schema.StringAttribute{ + Computed: true, Description: "The ID of the actor", + }, +} + +type actorDataSourceModel struct { + AvatarUrl types.String `tfsdk:"avatar_url"` + DisplayName types.String `tfsdk:"display_name"` + Id types.String `tfsdk:"id"` +} + +func actorFromClient(actor *planetscale.Actor) *actorDataSourceModel { + if actor == nil { + return nil + } + return &actorDataSourceModel{ + AvatarUrl: types.StringValue(actor.AvatarUrl), + DisplayName: types.StringValue(actor.DisplayName), + Id: types.StringValue(actor.Id), + } +} + +var regionDataSourceSchemaAttribute = map[string]schema.Attribute{ + "display_name": schema.StringAttribute{ + Description: "Name of the region.", + Computed: true, + }, + "enabled": schema.BoolAttribute{ + Description: "Whether or not the region is currently active.", + Computed: true, + }, + "id": schema.StringAttribute{ + Description: "The ID of the region.", + Computed: true, + }, + "location": schema.StringAttribute{ + Description: "Location of the region.", + Computed: true, + }, + "provider": schema.StringAttribute{ + Description: "Provider for the region (ex. AWS).", + Computed: true, + }, + "public_ip_addresses": schema.ListAttribute{ + Description: "Public IP addresses for the region.", + Computed: true, ElementType: types.StringType, + }, + "slug": schema.StringAttribute{ + Description: "The slug of the region.", + Computed: true, + }, +} + +type regionDataSourceModel struct { + DisplayName types.String `tfsdk:"display_name"` + Enabled types.Bool `tfsdk:"enabled"` + Id types.String `tfsdk:"id"` + Location types.String `tfsdk:"location"` + Provider types.String `tfsdk:"provider"` + PublicIpAddresses types.List `tfsdk:"public_ip_addresses"` + Slug types.String `tfsdk:"slug"` +} + +func regionFromClient(region *planetscale.Region, diags diag.Diagnostics) *regionDataSourceModel { + if region == nil { + return nil + } + return ®ionDataSourceModel{ + DisplayName: types.StringValue(region.DisplayName), + Enabled: types.BoolValue(region.Enabled), + Id: types.StringValue(region.Id), + Location: types.StringValue(region.Location), + Provider: types.StringValue(region.Provider), + PublicIpAddresses: stringsToListValue(region.PublicIpAddresses, diags), + Slug: types.StringValue(region.Slug), + } +} + +var readOnlyRegionsDataSourceSchemaAttribute = map[string]schema.Attribute{ + "organization": schema.StringAttribute{ + Description: "The organization for which the read-only regions are available.", + Required: true, + }, + "name": schema.StringAttribute{ + Description: "The name of the database for which the read-only regions are available.", + Required: true, + }, + "regions": schema.ListNestedAttribute{ + Description: "The list of read-only regions available for the database.", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "actor": schema.SingleNestedAttribute{ + Description: "The actor that created the read-only region.", + Computed: true, + Attributes: actorDataSourceSchemaAttribute, + }, + "created_at": schema.StringAttribute{ + Description: "When the read-only region was created.", + Computed: true, + }, + "display_name": schema.StringAttribute{ + Description: "The name of the read-only region.", + Computed: true, + }, + "id": schema.StringAttribute{ + Description: "The ID of the read-only region.", + Computed: true, + }, + "ready": schema.BoolAttribute{ + Description: "Whether or not the read-only region is ready to serve queries.", + Computed: true, + }, + "ready_at": schema.StringAttribute{ + Description: "When the read-only region was ready to serve queries.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the read-only region was last updated.", + Computed: true, + }, + "region": schema.SingleNestedAttribute{ + Description: "The details of the read-only region.", + Computed: true, + Attributes: regionDataSourceSchemaAttribute, + }, + }, + }, + }, +} + +type readOnlyRegionDataSourceModel struct { + Actor actorDataSourceModel `tfsdk:"actor"` + CreatedAt types.String `tfsdk:"created_at"` + DisplayName types.String `tfsdk:"display_name"` + Id types.String `tfsdk:"id"` + Ready types.Bool `tfsdk:"ready"` + ReadyAt types.String `tfsdk:"ready_at"` + Region regionDataSourceModel `tfsdk:"region"` + UpdatedAt types.String `tfsdk:"updated_at"` +} + +func readOnlyRegionFromClient(readOnlyRegion *planetscale.ReadOnlyRegion, diags diag.Diagnostics) *readOnlyRegionDataSourceModel { + if readOnlyRegion == nil { + return nil + } + + return &readOnlyRegionDataSourceModel{ + Actor: *actorFromClient(&readOnlyRegion.Actor), + Region: *regionFromClient(&readOnlyRegion.Region, diags), + CreatedAt: types.StringValue(readOnlyRegion.CreatedAt), + DisplayName: types.StringValue(readOnlyRegion.DisplayName), + Id: types.StringValue(readOnlyRegion.Id), + Ready: types.BoolValue(readOnlyRegion.Ready), + ReadyAt: types.StringValue(readOnlyRegion.ReadyAt), + UpdatedAt: types.StringValue(readOnlyRegion.UpdatedAt), + } +} + +var restoredFromBranchDataSourceSchemaAttribute = map[string]schema.Attribute{ + "created_at": schema.StringAttribute{ + Description: "When the resource was created.", + Computed: true, + }, + "deleted_at": schema.StringAttribute{ + Description: "When the resource was deleted, if deleted.", + Computed: true, + }, + "id": schema.StringAttribute{ + Description: "The ID for the resource.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "The name for the resource.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the resource was last updated.", + Computed: true, + }, +} + +type restoredFromBranchDataSourceModel struct { + CreatedAt types.String `tfsdk:"created_at"` + DeletedAt types.String `tfsdk:"deleted_at"` + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + UpdatedAt types.String `tfsdk:"updated_at"` +} + +func restoredFromBranchFromClient(rfb *planetscale.RestoredFromBranch) *restoredFromBranchDataSourceModel { + if rfb == nil { + return nil + } + return &restoredFromBranchDataSourceModel{ + CreatedAt: types.StringValue(rfb.CreatedAt), + DeletedAt: types.StringValue(rfb.DeletedAt), + Id: types.StringValue(rfb.Id), + Name: types.StringValue(rfb.Name), + UpdatedAt: types.StringValue(rfb.UpdatedAt), + } +} + +var tableSchemaDataSourceSchemaAttribute = map[string]schema.Attribute{ + "html": schema.StringAttribute{ + Description: "Syntax highlighted HTML for the table's schema.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "Name of the table.", + Computed: true, + }, + "raw": schema.StringAttribute{ + Description: "The table's schema.", + Computed: true, + }, +} + +type tableSchemaDataSourceModel struct { + Html types.String `tfsdk:"html"` + Name types.String `tfsdk:"name"` + Raw types.String `tfsdk:"raw"` +} + +func tableSchemaFromClient(ts *planetscale.TableSchema) *tableSchemaDataSourceModel { + if ts == nil { + return nil + } + return &tableSchemaDataSourceModel{ + Html: types.StringValue(ts.Html), + Name: types.StringValue(ts.Name), + Raw: types.StringValue(ts.Raw), + } +} + +var lintErrorDataSourceSchemaAttribute = map[string]schema.Attribute{ + "auto_increment_column_names": schema.ListAttribute{ + Description: "A list of invalid auto-incremented columns.", + Computed: true, ElementType: types.StringType, + }, + "charset_name": schema.StringAttribute{ + Description: "The charset of the schema.", + Computed: true, + }, + "check_constraint_name": schema.StringAttribute{ + Description: "The name of the invalid check constraint.", + Computed: true, + }, + "column_name": schema.StringAttribute{ + Description: "The column in a table relevant to the error.", + Computed: true, + }, + "docs_url": schema.StringAttribute{ + Description: "A link to the documentation related to the error.", + Computed: true, + }, + "engine_name": schema.StringAttribute{ + Description: "The engine of the schema.", + Computed: true, + }, + "enum_value": schema.StringAttribute{ + Description: "The name of the invalid enum value.", + Computed: true, + }, + "error_description": schema.StringAttribute{ + Description: "A description for the error that occurred.", + Computed: true, + }, + "foreign_key_column_names": schema.ListAttribute{ + Description: "A list of invalid foreign key columns in a table.", + Computed: true, ElementType: types.StringType, + }, + "json_path": schema.StringAttribute{ + Description: "The path for an invalid JSON column.", + Computed: true, + }, + "keyspace_name": schema.StringAttribute{ + Description: "The keyspace of the schema with the error.", + Computed: true, + }, + "lint_error": schema.StringAttribute{ + Description: "Code representing.", + Computed: true, + }, + "partition_name": schema.StringAttribute{ + Description: "The name of the invalid partition in the schema.", + Computed: true, + }, + "partitioning_type": schema.StringAttribute{ + Description: "The name of the invalid partitioning type.", + Computed: true, + }, + "subject_type": schema.StringAttribute{ + Description: "The subject for the errors.", + Computed: true, + }, + "table_name": schema.StringAttribute{ + Description: "The table with the error.", + Computed: true, + }, + "vindex_name": schema.StringAttribute{ + Description: "The name of the vindex for the schema.", + Computed: true, + }, +} + +type lintErrorDataSourceModel struct { + AutoIncrementColumnNames types.List `tfsdk:"auto_increment_column_names"` + CharsetName types.String `tfsdk:"charset_name"` + CheckConstraintName types.String `tfsdk:"check_constraint_name"` + ColumnName types.String `tfsdk:"column_name"` + DocsUrl types.String `tfsdk:"docs_url"` + EngineName types.String `tfsdk:"engine_name"` + EnumValue types.String `tfsdk:"enum_value"` + ErrorDescription types.String `tfsdk:"error_description"` + ForeignKeyColumnNames types.List `tfsdk:"foreign_key_column_names"` + JsonPath types.String `tfsdk:"json_path"` + KeyspaceName types.String `tfsdk:"keyspace_name"` + LintError types.String `tfsdk:"lint_error"` + PartitionName types.String `tfsdk:"partition_name"` + PartitioningType types.String `tfsdk:"partitioning_type"` + SubjectType types.String `tfsdk:"subject_type"` + TableName types.String `tfsdk:"table_name"` + VindexName types.String `tfsdk:"vindex_name"` +} + +func lintErrorFromClient(le *planetscale.LintError, diags diag.Diagnostics) *lintErrorDataSourceModel { + if le == nil { + return nil + } + return &lintErrorDataSourceModel{ + AutoIncrementColumnNames: stringsToListValue(le.AutoIncrementColumnNames, diags), + CharsetName: types.StringValue(le.CharsetName), + CheckConstraintName: types.StringValue(le.CheckConstraintName), + ColumnName: types.StringValue(le.ColumnName), + DocsUrl: types.StringValue(le.DocsUrl), + EngineName: types.StringValue(le.EngineName), + EnumValue: types.StringValue(le.EnumValue), + ErrorDescription: types.StringValue(le.ErrorDescription), + ForeignKeyColumnNames: stringsToListValue(le.ForeignKeyColumnNames, diags), + JsonPath: types.StringValue(le.JsonPath), + KeyspaceName: types.StringValue(le.KeyspaceName), + LintError: types.StringValue(le.LintError), + PartitionName: types.StringValue(le.PartitionName), + PartitioningType: types.StringValue(le.PartitioningType), + SubjectType: types.StringValue(le.SubjectType), + TableName: types.StringValue(le.TableName), + VindexName: types.StringValue(le.VindexName), + } +} + +var oauthApplicationAttribute = map[string]schema.Attribute{ + "avatar": schema.StringAttribute{ + Description: "The image source for the OAuth application's avatar.", + Computed: true, + }, + "client_id": schema.StringAttribute{ + Description: "The OAuth application's unique client id.", + Computed: true, + }, + "created_at": schema.StringAttribute{ + Description: "When the OAuth application was created.", + Computed: true, + }, + "domain": schema.StringAttribute{ + Description: "The domain of the OAuth application. Used for verification of a valid redirect uri.", + Computed: true, + }, + "id": schema.StringAttribute{ + Description: "The ID of the OAuth application.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "The name of the OAuth application.", + Computed: true, + }, + "redirect_uri": schema.StringAttribute{ + Description: "The redirect URI of the OAuth application.", + Computed: true, + }, + "scopes": schema.ListAttribute{ + Description: "The scopes that the OAuth application requires on a user's accout.", + Computed: true, ElementType: types.StringType, + }, + "tokens": schema.Float64Attribute{ + Description: "The number of tokens issued by the OAuth application.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the OAuth application was last updated.", + Computed: true, + }, +} + +type oauthApplicationDataSourceModel struct { + Avatar types.String `tfsdk:"avatar"` + ClientId types.String `tfsdk:"client_id"` + CreatedAt types.String `tfsdk:"created_at"` + Domain types.String `tfsdk:"domain"` + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + RedirectUri types.String `tfsdk:"redirect_uri"` + Scopes types.List `tfsdk:"scopes"` + Tokens types.Float64 `tfsdk:"tokens"` + UpdatedAt types.String `tfsdk:"updated_at"` +} + +func oauthApplicationFromClient(oa *planetscale.OauthApplication, diags diag.Diagnostics) *oauthApplicationDataSourceModel { + if oa == nil { + return nil + } + return &oauthApplicationDataSourceModel{ + Avatar: types.StringPointerValue(oa.Avatar), + ClientId: types.StringValue(oa.ClientId), + CreatedAt: types.StringValue(oa.CreatedAt), + Domain: types.StringValue(oa.Domain), + Id: types.StringValue(oa.Id), + Name: types.StringValue(oa.Name), + RedirectUri: types.StringValue(oa.RedirectUri), + Scopes: stringsToListValue(oa.Scopes, diags), + Tokens: types.Float64Value(oa.Tokens), + UpdatedAt: types.StringValue(oa.UpdatedAt), + } +} + +var backupPolicyDataSourceSchemaAttribute = map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "The ID of the backup policy.", + Computed: true, + }, + "created_at": schema.StringAttribute{ + Description: "When the backup policy was created.", + Computed: true, + }, + "frequency_unit": schema.StringAttribute{ + Description: "The unit for the frequency of the backup policy.", + Computed: true, + }, + "frequency_value": schema.Float64Attribute{ + Description: "A number value for the frequency of the backup policy.", + Computed: true, + }, + "last_ran_at": schema.StringAttribute{ + Description: "When the backup was last run.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "The name of the backup policy.", + Computed: true, + }, + "next_run_at": schema.StringAttribute{ + Description: "When the backup will next run.", + Computed: true, + }, + "retention_unit": schema.StringAttribute{ + Description: "The unit for the retention period of the backup policy.", + Computed: true, + }, + "retention_value": schema.Float64Attribute{ + Description: "A number value for the retention period of the backup policy.", + Computed: true, + }, + "schedule_day": schema.StringAttribute{ + Description: "Day of the week that the backup is scheduled.", + Computed: true, + }, + "schedule_week": schema.StringAttribute{ + Description: "Week of the month that the backup is scheduled.", + Computed: true, + }, + "target": schema.StringAttribute{ + Description: "Whether the backup policy is for a production or development database, or for a database branch.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the backup policy was last updated.", + Computed: true, + }, +} + +type backupPolicyDataSourceModel struct { + CreatedAt types.String `tfsdk:"created_at"` + FrequencyUnit types.String `tfsdk:"frequency_unit"` + FrequencyValue types.Float64 `tfsdk:"frequency_value"` + Id types.String `tfsdk:"id"` + LastRanAt types.String `tfsdk:"last_ran_at"` + Name types.String `tfsdk:"name"` + NextRunAt types.String `tfsdk:"next_run_at"` + RetentionUnit types.String `tfsdk:"retention_unit"` + RetentionValue types.Float64 `tfsdk:"retention_value"` + ScheduleDay types.String `tfsdk:"schedule_day"` + ScheduleWeek types.String `tfsdk:"schedule_week"` + Target types.String `tfsdk:"target"` + UpdatedAt types.String `tfsdk:"updated_at"` +} + +func backupPolicyFromClient(backupPolicy *planetscale.BackupPolicy) *backupPolicyDataSourceModel { + if backupPolicy == nil { + return nil + } + return &backupPolicyDataSourceModel{ + CreatedAt: types.StringValue(backupPolicy.CreatedAt), + FrequencyUnit: types.StringValue(backupPolicy.FrequencyUnit), + FrequencyValue: types.Float64Value(backupPolicy.FrequencyValue), + Id: types.StringValue(backupPolicy.Id), + LastRanAt: types.StringValue(backupPolicy.LastRanAt), + Name: types.StringValue(backupPolicy.Name), + NextRunAt: types.StringValue(backupPolicy.NextRunAt), + RetentionUnit: types.StringValue(backupPolicy.RetentionUnit), + RetentionValue: types.Float64Value(backupPolicy.RetentionValue), + ScheduleDay: types.StringValue(backupPolicy.ScheduleDay), + ScheduleWeek: types.StringValue(backupPolicy.ScheduleWeek), + Target: types.StringValue(backupPolicy.Target), + UpdatedAt: types.StringValue(backupPolicy.UpdatedAt), + } +} + +var branchForPasswordDataSourceSchemaAttribute = map[string]schema.Attribute{ + "access_host_url": schema.StringAttribute{Computed: true}, + "id": schema.StringAttribute{Computed: true}, + "mysql_edge_address": schema.StringAttribute{Computed: true}, + "name": schema.StringAttribute{Computed: true}, + "production": schema.BoolAttribute{Computed: true}, +} + +type branchForPasswordDataSourceModel struct { + AccessHostUrl types.String `tfsdk:"access_host_url"` + Id types.String `tfsdk:"id"` + MysqlEdgeAddress types.String `tfsdk:"mysql_edge_address"` + Name types.String `tfsdk:"name"` + Production types.Bool `tfsdk:"production"` +} + +func branchForPasswordFromClient(branchForPassword *planetscale.BranchForPassword) *branchForPasswordDataSourceModel { + if branchForPassword == nil { + return nil + } + return &branchForPasswordDataSourceModel{ + AccessHostUrl: types.StringValue(branchForPassword.AccessHostUrl), + Id: types.StringValue(branchForPassword.Id), + MysqlEdgeAddress: types.StringValue(branchForPassword.MysqlEdgeAddress), + Name: types.StringValue(branchForPassword.Name), + Production: types.BoolValue(branchForPassword.Production), + } +} + +func passwordDataSourceSchemaAttribute(computedName bool) map[string]schema.Attribute { + return map[string]schema.Attribute{ + "organization": schema.StringAttribute{ + Description: "The organization this database branch password belongs to.", + Required: !computedName, Computed: computedName, + }, + "database": schema.StringAttribute{ + Description: "The datanase this branch password belongs to.", + Required: !computedName, Computed: computedName, + }, + "branch": schema.StringAttribute{ + Description: "The branch this password belongs to..", + Required: !computedName, Computed: computedName, + }, + "id": schema.StringAttribute{ + Description: "The ID for the password.", + Required: !computedName, Computed: computedName, + }, + "read_only_region_id": schema.StringAttribute{ + Description: "If the password is for a read-only region, the ID of the region.", + Optional: !computedName, Computed: computedName, + }, + "access_host_url": schema.StringAttribute{ + Description: "The host URL for the password.", + Computed: true, + }, + "actor": schema.SingleNestedAttribute{ + Description: "The actor that created this branch.", + Computed: true, + Attributes: actorDataSourceSchemaAttribute, + }, + "created_at": schema.StringAttribute{ + Description: "When the password was created.", + Computed: true, + }, + "database_branch": schema.SingleNestedAttribute{ + Description: "The branch this password is allowed to access.", + Computed: true, + Attributes: branchForPasswordDataSourceSchemaAttribute, + }, + "deleted_at": schema.StringAttribute{ + Description: "When the password was deleted.", + Computed: true, + }, + "expires_at": schema.StringAttribute{ + Description: "When the password will expire.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "The display name for the password.", + Computed: true, + }, + "region": schema.SingleNestedAttribute{ + Description: "The region in which this password can be used.", + Computed: true, + Attributes: regionDataSourceSchemaAttribute, + }, + "renewable": schema.BoolAttribute{ + Description: "Whether or not the password can be renewed.", + Computed: true, + }, + "role": schema.StringAttribute{ + Description: "The role for the password.", + Computed: true, + }, + "ttl_seconds": schema.Float64Attribute{ + Description: "Time to live (in seconds) for the password. The password will be invalid and unrenewable when TTL has passed.", + Computed: true, + }, + "username": schema.StringAttribute{ + Description: "The username for the password.", + Computed: true, + }, + + // manually removed from spec because currently buggy + // "integrations": schema.ListAttribute{Computed: true, ElementType: types.StringType}, + } +} + +type passwordDataSourceModel struct { + Organization types.String `tfsdk:"organization"` + Database types.String `tfsdk:"database"` + Branch types.String `tfsdk:"branch"` + ReadOnlyRegionId types.String `tfsdk:"read_only_region_id"` + Id types.String `tfsdk:"id"` + AccessHostUrl types.String `tfsdk:"access_host_url"` + Actor *actorDataSourceModel `tfsdk:"actor"` + CreatedAt types.String `tfsdk:"created_at"` + DatabaseBranch *branchForPasswordDataSourceModel `tfsdk:"database_branch"` + DeletedAt types.String `tfsdk:"deleted_at"` + ExpiresAt types.String `tfsdk:"expires_at"` + Name types.String `tfsdk:"name"` + Region *regionDataSourceModel `tfsdk:"region"` + Renewable types.Bool `tfsdk:"renewable"` + Role types.String `tfsdk:"role"` + TtlSeconds types.Float64 `tfsdk:"ttl_seconds"` + Username types.String `tfsdk:"username"` + + // manually removed from spec because currently buggy + // Integrations types.List `tfsdk:"integrations"` +} + +func passwordFromClient(password *planetscale.Password, organization, database, branch string, readOnlyRegionID *string, diags diag.Diagnostics) *passwordDataSourceModel { + if password == nil { + return nil + } + return &passwordDataSourceModel{ + Organization: types.StringValue(organization), + Database: types.StringValue(database), + Branch: types.StringValue(branch), + ReadOnlyRegionId: types.StringPointerValue(readOnlyRegionID), + AccessHostUrl: types.StringValue(password.AccessHostUrl), + Actor: actorFromClient(password.Actor), + CreatedAt: types.StringValue(password.CreatedAt), + DatabaseBranch: branchForPasswordFromClient(&password.DatabaseBranch), + DeletedAt: types.StringPointerValue(password.DeletedAt), + ExpiresAt: types.StringPointerValue(password.ExpiresAt), + Id: types.StringValue(password.Id), + Name: types.StringValue(password.Name), + Region: regionFromClient(password.Region, diags), + Renewable: types.BoolValue(password.Renewable), + Role: types.StringValue(password.Role), + TtlSeconds: types.Float64Value(password.TtlSeconds), + Username: types.StringPointerValue(password.Username), + // manually removed from spec because currently buggy + // Integrations: stringsToListValue(password.Integrations, diags), + } +} + +func backupDataSourceSchemaAttribute(computedID bool) map[string]schema.Attribute { + return map[string]schema.Attribute{ + "organization": schema.StringAttribute{ + Description: "The organization this backup belongs to.", + Required: !computedID, Computed: computedID, + }, + "database": schema.StringAttribute{ + Description: "The database this backup belongs to.", + Required: !computedID, Computed: computedID, + }, + "branch": schema.StringAttribute{ + Description: "The branch this backup belongs to.", + Required: !computedID, Computed: computedID, + }, + "id": schema.StringAttribute{ + Description: "The ID of the backup.", + Required: !computedID, Computed: computedID, + }, + "name": schema.StringAttribute{ + Description: "The name of the backup.", + Computed: true, + }, + "actor": schema.SingleNestedAttribute{ + Description: "The actor that created the backup.", + Computed: true, Attributes: actorDataSourceSchemaAttribute, + }, + "backup_policy": schema.SingleNestedAttribute{ + Description: "The backup policy being followed.", + Computed: true, + Attributes: backupPolicyDataSourceSchemaAttribute, + }, + "created_at": schema.StringAttribute{ + Description: "When the backup was created.", + Computed: true, + }, + "estimated_storage_cost": schema.StringAttribute{ + Description: "The estimated storage cost of the backup.", + Computed: true, + }, + "required": schema.BoolAttribute{ + Description: "Whether or not the backup policy is required.", + Computed: true, + }, + "restored_branches": schema.ListAttribute{ + Description: "Branches that have been restored with this backup.", + Computed: true, ElementType: types.StringType, + }, + "size": schema.Float64Attribute{ + Description: "The size of the backup.", + Computed: true, + }, + "state": schema.StringAttribute{ + Description: "The current state of the backup.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the backup was last updated.", + Computed: true, + }, + } +} + +type backupDataSourceModel struct { + Organization types.String `tfsdk:"organization"` + Database types.String `tfsdk:"database"` + Branch types.String `tfsdk:"branch"` + Name types.String `tfsdk:"name"` + Id types.String `tfsdk:"id"` + Actor *actorDataSourceModel `tfsdk:"actor"` + BackupPolicy *backupPolicyDataSourceModel `tfsdk:"backup_policy"` + CreatedAt types.String `tfsdk:"created_at"` + EstimatedStorageCost types.String `tfsdk:"estimated_storage_cost"` + Required types.Bool `tfsdk:"required"` + RestoredBranches types.List `tfsdk:"restored_branches"` + Size types.Float64 `tfsdk:"size"` + State types.String `tfsdk:"state"` + UpdatedAt types.String `tfsdk:"updated_at"` +} + +func backupFromClient(backup *planetscale.Backup, organization, database, branch string, diags diag.Diagnostics) *backupDataSourceModel { + if backup == nil { + return nil + } + restoredBranches := types.ListNull(types.StringType) + if backup.RestoredBranches != nil { + restoredBranches = stringsToListValue(*backup.RestoredBranches, diags) + } + return &backupDataSourceModel{ + Organization: types.StringValue(organization), + Database: types.StringValue(database), + Branch: types.StringValue(branch), + Name: types.StringValue(backup.Name), + Actor: actorFromClient(&backup.Actor), + BackupPolicy: backupPolicyFromClient(&backup.BackupPolicy), + CreatedAt: types.StringValue(backup.CreatedAt), + EstimatedStorageCost: types.StringValue(backup.EstimatedStorageCost), + Id: types.StringValue(backup.Id), + Required: types.BoolValue(backup.Required), + RestoredBranches: restoredBranches, + Size: types.Float64Value(backup.Size), + State: types.StringValue(backup.State), + UpdatedAt: types.StringValue(backup.UpdatedAt), + } +} + +func stringsToListValue(in []string, diags diag.Diagnostics) types.List { + out := make([]attr.Value, 0, len(in)) + for _, el := range in { + out = append(out, types.StringValue(el)) + } + lv, diag := types.ListValue(types.StringType, out) + if diag.HasError() { + diags.Append(diag...) + } + return lv +} diff --git a/internal/provider/models_resource.go b/internal/provider/models_resource.go new file mode 100644 index 0000000..7eb983a --- /dev/null +++ b/internal/provider/models_resource.go @@ -0,0 +1,217 @@ +package provider + +import ( + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/float64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +var actorResourceSchemaAttribute = map[string]schema.Attribute{ + "avatar_url": schema.StringAttribute{ + Computed: true, Description: "The URL of the actor's avatar", + }, + "display_name": schema.StringAttribute{ + Computed: true, Description: "The name of the actor", + }, + "id": schema.StringAttribute{ + Computed: true, Description: "The ID of the actor", + }, +} + +var actorResourceAttrTypes = map[string]attr.Type{ + "avatar_url": types.StringType, + "display_name": types.StringType, + "id": types.StringType, +} + +var regionResourceSchemaAttribute = map[string]schema.Attribute{ + "display_name": schema.StringAttribute{ + Description: "Name of the region.", + Computed: true, + }, + "enabled": schema.BoolAttribute{ + Description: "Whether or not the region is currently active.", + Computed: true, + }, + "id": schema.StringAttribute{ + Description: "The ID of the region.", + Computed: true, + }, + "location": schema.StringAttribute{ + Description: "Location of the region.", + Computed: true, + }, + "provider": schema.StringAttribute{ + Description: "Provider for the region (ex. AWS).", + Computed: true, + }, + "public_ip_addresses": schema.ListAttribute{ + Description: "Public IP addresses for the region.", + Computed: true, ElementType: types.StringType, + }, + "slug": schema.StringAttribute{ + Description: "The slug of the region.", + Computed: true, + }, +} + +var regionResourceAttrTypes = map[string]attr.Type{ + "display_name": types.StringType, + "enabled": types.BoolType, + "id": types.StringType, + "location": types.StringType, + "provider": types.StringType, + "public_ip_addresses": types.ListType{ElemType: types.StringType}, + "slug": types.StringType, +} + +var restoredFromBranchSchemaAttribute = map[string]schema.Attribute{ + "created_at": schema.StringAttribute{ + Description: "When the resource was created.", + Computed: true, + }, + "deleted_at": schema.StringAttribute{ + Description: "When the resource was deleted, if deleted.", + Computed: true, + }, + "id": schema.StringAttribute{ + Description: "The ID for the resource.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "The name for the resource.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the resource was last updated.", + Computed: true, + }, +} + +var restoredFromBranchResourceAttrTypes = map[string]attr.Type{ + "created_at": types.StringType, + "deleted_at": types.StringType, + "id": types.StringType, + "name": types.StringType, + "updated_at": types.StringType, +} + +type restoredFromBranchResource struct { + CreatedAt types.String `tfsdk:"created_at"` + DeletedAt types.String `tfsdk:"deleted_at"` + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + UpdatedAt types.String `tfsdk:"updated_at"` +} + +var importDataSourceResourceAttrTypes = map[string]attr.Type{ + "database": basetypes.StringType{}, + "hostname": basetypes.StringType{}, + "port": basetypes.StringType{}, +} + +var importResourceAttrTypes = map[string]attr.Type{ + "data_source": basetypes.ObjectType{AttrTypes: importDataSourceResourceAttrTypes}, + "finished_at": basetypes.StringType{}, + "import_check_errors": basetypes.StringType{}, + "started_at": basetypes.StringType{}, + "state": basetypes.StringType{}, +} + +var backupPolicyResourceAttribute = map[string]schema.Attribute{ + "retention_unit": schema.StringAttribute{ + Description: "The unit for the retention period of the backup policy.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "retention_value": schema.Float64Attribute{ + Description: "A number value for the retention period of the backup policy.", + Required: true, + PlanModifiers: []planmodifier.Float64{ + float64planmodifier.RequiresReplace(), + }, + }, + // read-only + "id": schema.StringAttribute{ + Description: "The ID of the backup policy.", + Computed: true, + }, + "created_at": schema.StringAttribute{ + Description: "When the backup policy was created.", + Computed: true, + }, + "frequency_unit": schema.StringAttribute{ + Description: "The unit for the frequency of the backup policy.", + Computed: true, + }, + "frequency_value": schema.Float64Attribute{ + Description: "A number value for the frequency of the backup policy.", + Computed: true, + }, + "last_ran_at": schema.StringAttribute{ + Description: "When the backup was last run.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "The name of the backup policy.", + Computed: true, + }, + "next_run_at": schema.StringAttribute{ + Description: "When the backup will next run.", + Computed: true, + }, + "schedule_day": schema.StringAttribute{ + Description: "Day of the week that the backup is scheduled.", + Computed: true, + }, + "schedule_week": schema.StringAttribute{ + Description: "Week of the month that the backup is scheduled.", + Computed: true, + }, + "target": schema.StringAttribute{ + Description: "Whether the backup policy is for a production or development database, or for a database branch.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the backup policy was last updated.", + Computed: true, + }, +} + +var backupPolicyResourceAttrTypes = map[string]attr.Type{ + "created_at": basetypes.StringType{}, + "frequency_unit": basetypes.StringType{}, + "frequency_value": basetypes.Float64Type{}, + "id": basetypes.StringType{}, + "last_ran_at": basetypes.StringType{}, + "name": basetypes.StringType{}, + "next_run_at": basetypes.StringType{}, + "retention_unit": basetypes.StringType{}, + "retention_value": basetypes.Float64Type{}, + "schedule_day": basetypes.StringType{}, + "schedule_week": basetypes.StringType{}, + "target": basetypes.StringType{}, + "updated_at": basetypes.StringType{}, +} + +var databaseBranchResourceAttribute = map[string]schema.Attribute{ + "access_host_url": schema.StringAttribute{Computed: true}, + "id": schema.StringAttribute{Computed: true}, + "mysql_edge_address": schema.StringAttribute{Computed: true}, + "name": schema.StringAttribute{Computed: true}, + "production": schema.BoolAttribute{Computed: true}, +} + +var databaseBranchResourceAttrTypes = map[string]attr.Type{ + "access_host_url": basetypes.StringType{}, + "id": basetypes.StringType{}, + "mysql_edge_address": basetypes.StringType{}, + "name": basetypes.StringType{}, + "production": basetypes.BoolType{}, +} diff --git a/internal/provider/oauth_applications_data_source.go b/internal/provider/oauth_applications_data_source.go new file mode 100644 index 0000000..16118e8 --- /dev/null +++ b/internal/provider/oauth_applications_data_source.go @@ -0,0 +1,98 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &oauthApplicationsDataSource{} + _ datasource.DataSourceWithConfigure = &oauthApplicationsDataSource{} +) + +func newOAuthApplicationsDataSource() datasource.DataSource { + return &oauthApplicationsDataSource{} +} + +type oauthApplicationsDataSource struct { + client *planetscale.Client +} + +type oauthApplicationsDataSourceModel struct { + Organization string `tfsdk:"organization"` + Applications []oauthApplicationDataSourceModel `tfsdk:"applications"` +} + +func (d *oauthApplicationsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_oauth_applications" +} + +func (d *oauthApplicationsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A list of PlanetScale OAuth applications. (requires feature flag)", + MarkdownDescription: "A list of PlanetScale OAuth applications. (requires feature flag)", + Attributes: map[string]schema.Attribute{ + "organization": schema.StringAttribute{Required: true}, + "applications": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: oauthApplicationAttribute, + }, + }, + }, + } +} + +func (d *oauthApplicationsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *oauthApplicationsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *oauthApplicationsDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + res, err := d.client.ListOauthApplications(ctx, data.Organization, nil, nil) + if err != nil { + resp.Diagnostics.AddError("Unable to list oauth applications", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to list oauth applications", "no data") + return + } + + state := oauthApplicationsDataSourceModel{ + Organization: data.Organization, + Applications: make([]oauthApplicationDataSourceModel, 0, len(res.Data)), + } + for _, item := range res.Data { + item := item + state.Applications = append(state.Applications, *oauthApplicationFromClient(&item, resp.Diagnostics)) + if resp.Diagnostics.HasError() { + return + } + } + + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/organization_data_source.go b/internal/provider/organization_data_source.go new file mode 100644 index 0000000..d04eb13 --- /dev/null +++ b/internal/provider/organization_data_source.go @@ -0,0 +1,75 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &organizationDataSource{} + _ datasource.DataSourceWithConfigure = &organizationDataSource{} +) + +func newOrganizationDataSource() datasource.DataSource { + return &organizationDataSource{} +} + +type organizationDataSource struct { + client *planetscale.Client +} + +func (d *organizationDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_organization" +} + +func (d *organizationDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A PlanetScale organization.", + MarkdownDescription: "A PlanetScale organization.", + Attributes: organizationDataSourceSchemaAttribute(false), + } +} + +func (d *organizationDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *organizationDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *organizationDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + res, err := d.client.GetOrganization(ctx, data.Name.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Unable to read organization", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Received a nil organization", "") + return + } + data = organizationFromClient(&res.Organization) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/organization_data_source_test.go b/internal/provider/organization_data_source_test.go new file mode 100644 index 0000000..3cbf417 --- /dev/null +++ b/internal/provider/organization_data_source_test.go @@ -0,0 +1,46 @@ +package provider + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" +) + +func TestAccOrganizationDataSource(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Read testing + { + Config: testAccOrganizationDataSourceConfig, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("data.planetscale_organization.test", "name", "planetscale-terraform-testing"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "admin_only_production_access"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "billing_email"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "can_create_databases"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "created_at"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "database_count"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "features.insights"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "free_databases_remaining"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "has_past_due_invoices"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "id"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "idp_managed_roles"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "plan"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "single_tenancy"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "sleeping_database_count"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "sso"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "sso_directory"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "updated_at"), + resource.TestCheckResourceAttrSet("data.planetscale_organization.test", "valid_billing_info"), + ), + }, + }, + }) +} + +const testAccOrganizationDataSourceConfig = ` +data "planetscale_organization" "test" { + name = "planetscale-terraform-testing" +} +` diff --git a/internal/provider/organization_regions_data_source.go b/internal/provider/organization_regions_data_source.go new file mode 100644 index 0000000..d04a7d6 --- /dev/null +++ b/internal/provider/organization_regions_data_source.go @@ -0,0 +1,100 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &organizationRegionsDataSource{} + _ datasource.DataSourceWithConfigure = &organizationRegionsDataSource{} +) + +func newOrganizationRegionsDataSource() datasource.DataSource { + return &organizationRegionsDataSource{} +} + +type organizationRegionsDataSource struct { + client *planetscale.Client +} + +type organizationRegionsDataSourceModel struct { + Organization string `tfsdk:"organization"` + Regions []regionDataSourceModel `tfsdk:"regions"` +} + +func (d *organizationRegionsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_organization_regions" +} + +func (d *organizationRegionsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A list of PlanetScale regions for the organization.", + MarkdownDescription: "A list of PlanetScale regions for the organization.", + Attributes: map[string]schema.Attribute{ + "organization": schema.StringAttribute{Required: true}, + "regions": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: regionDataSourceSchemaAttribute, + }, + }, + }, + } +} + +func (d *organizationRegionsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *organizationRegionsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *organizationRegionsDataSourceModel + + // Read Terraform prior state data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + orgName := data.Organization + + res, err := d.client.ListRegionsForOrganization(ctx, orgName, nil, nil) + if err != nil { + resp.Diagnostics.AddError("Unable to read organization regions", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to read organization regions", "no data") + return + } + state := organizationRegionsDataSourceModel{ + Organization: data.Organization, + Regions: make([]regionDataSourceModel, 0, len(res.Data)), + } + for _, item := range res.Data { + item := item + state.Regions = append(state.Regions, *regionFromClient(&item, resp.Diagnostics)) + if resp.Diagnostics.HasError() { + return + } + } + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/organization_regions_data_source_test.go b/internal/provider/organization_regions_data_source_test.go new file mode 100644 index 0000000..c73ecf5 --- /dev/null +++ b/internal/provider/organization_regions_data_source_test.go @@ -0,0 +1,53 @@ +package provider + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" +) + +var regions = []string{ + "aws-us-east-2", + "us-east", + "us-west", + "eu-west", + "ap-south", + "ap-southeast", + "ap-northeast", + "eu-central", + "aws-ap-southeast-2", + "aws-sa-east-1", + "gcp-us-central1", + "aws-eu-west-2", + "gcp-us-east4", + "gcp-northamerica-northeast1", + "gcp-asia-northeast3", +} + +func TestAccOrganizationRegionsDataSource(t *testing.T) { + orgName := "planetscale-terraform-testing" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Read testing + { + Config: testAccOrganizationRegionsDataSourceConfig(orgName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrWith("data.planetscale_organization_regions.test", "regions.#", checkIntegerMin(1)), + resource.TestCheckResourceAttrWith("data.planetscale_organization_regions.test", "regions.0.slug", checkOneOf(regions...)), + ), + }, + }, + }) +} + +func testAccOrganizationRegionsDataSourceConfig(org string) string { + return fmt.Sprintf(` + data "planetscale_organization_regions" "test" { + organization = %[1]q + }`, + org, + ) +} diff --git a/internal/provider/organizations_data_source.go b/internal/provider/organizations_data_source.go new file mode 100644 index 0000000..b976d1f --- /dev/null +++ b/internal/provider/organizations_data_source.go @@ -0,0 +1,80 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &organizationsDataSource{} + _ datasource.DataSourceWithConfigure = &organizationsDataSource{} +) + +func newOrganizationsDataSource() datasource.DataSource { + return &organizationsDataSource{} +} + +type organizationsDataSource struct { + client *planetscale.Client +} + +type organizationsDataSourceModel struct { + Organizations []organizationDataSourceModel `tfsdk:"organizations"` +} + +func (d *organizationsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_organizations" +} + +func (d *organizationsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A list of PlanetScale organizations.", + MarkdownDescription: "A list of PlanetScale organizations.", + Attributes: map[string]schema.Attribute{ + "organizations": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: organizationDataSourceSchemaAttribute(true), + }, + }, + }, + } +} + +func (d *organizationsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *organizationsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + res, err := d.client.ListOrganizations(ctx, nil, nil) + if err != nil { + resp.Diagnostics.AddError("Unable to read organizations", err.Error()) + return + } + state := organizationsDataSourceModel{ + Organizations: make([]organizationDataSourceModel, 0, len(res.Data)), + } + for _, item := range res.Data { + item := item + state.Organizations = append(state.Organizations, *organizationFromClient(&item)) + } + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/organizations_data_source_test.go b/internal/provider/organizations_data_source_test.go new file mode 100644 index 0000000..b37a705 --- /dev/null +++ b/internal/provider/organizations_data_source_test.go @@ -0,0 +1,47 @@ +package provider + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" +) + +func TestAccOrganizationsDataSource(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // Read testing + { + Config: testAccOrganizationsDataSourceConfig, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("data.planetscale_organizations.test", "organizations.#", "1"), + resource.TestCheckResourceAttr("data.planetscale_organizations.test", "organizations.0.name", "planetscale-terraform-testing"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.admin_only_production_access"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.billing_email"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.can_create_databases"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.created_at"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.database_count"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.features.insights"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.free_databases_remaining"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.has_past_due_invoices"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.id"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.idp_managed_roles"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.plan"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.single_tenancy"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.sleeping_database_count"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.sso"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.sso_directory"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.updated_at"), + resource.TestCheckResourceAttrSet("data.planetscale_organizations.test", "organizations.0.valid_billing_info"), + ), + }, + }, + }) +} + +const testAccOrganizationsDataSourceConfig = ` +data "planetscale_organizations" "test" { + +} +` diff --git a/internal/provider/password_data_source.go b/internal/provider/password_data_source.go new file mode 100644 index 0000000..1120cc8 --- /dev/null +++ b/internal/provider/password_data_source.go @@ -0,0 +1,90 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &passwordDataSource{} + _ datasource.DataSourceWithConfigure = &passwordDataSource{} +) + +func newPasswordDataSource() datasource.DataSource { + return &passwordDataSource{} +} + +type passwordDataSource struct { + client *planetscale.Client +} + +func (d *passwordDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_password" +} + +func (d *passwordDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A PlanetScale database password.", + MarkdownDescription: "A PlanetScale database password.", + Attributes: passwordDataSourceSchemaAttribute(false), + } +} + +func (d *passwordDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *passwordDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *passwordDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + res, err := d.client.GetPassword( + ctx, + data.Organization.ValueString(), + data.Database.ValueString(), + data.Branch.ValueString(), + data.Id.ValueString(), + data.ReadOnlyRegionId.ValueStringPointer(), + ) + if err != nil { + resp.Diagnostics.AddError("Unable to read database password", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to read database password", "no data") + return + } + state := passwordFromClient( + &res.Password, + data.Organization.ValueString(), + data.Database.ValueString(), + data.Branch.ValueString(), + data.ReadOnlyRegionId.ValueStringPointer(), + resp.Diagnostics, + ) + if resp.Diagnostics.HasError() { + return + } + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/password_resource.go b/internal/provider/password_resource.go new file mode 100644 index 0000000..850eaa6 --- /dev/null +++ b/internal/provider/password_resource.go @@ -0,0 +1,488 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package provider + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/float64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +// Ensure provider defined types fully satisfy framework interfaces. +var _ resource.Resource = &passwordResource{} +var _ resource.ResourceWithImportState = &passwordResource{} + +func newPasswordResource() resource.Resource { + return &passwordResource{} +} + +// passwordResource defines the resource implementation. +type passwordResource struct { + client *planetscale.Client +} + +type passwordResourceModel struct { + Organization types.String `tfsdk:"organization"` + Database types.String `tfsdk:"database"` + Branch types.String `tfsdk:"branch"` + Id types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + AccessHostUrl types.String `tfsdk:"access_host_url"` + Actor types.Object `tfsdk:"actor"` + CreatedAt types.String `tfsdk:"created_at"` + DatabaseBranch types.Object `tfsdk:"database_branch"` + DeletedAt types.String `tfsdk:"deleted_at"` + ExpiresAt types.String `tfsdk:"expires_at"` + Region types.Object `tfsdk:"region"` + Renewable types.Bool `tfsdk:"renewable"` + Role types.String `tfsdk:"role"` + TtlSeconds types.Float64 `tfsdk:"ttl_seconds"` + Username types.String `tfsdk:"username"` + + PlainText types.String `tfsdk:"plaintext"` + + // manually removed from spec because currently buggy + // Integrations types.List `tfsdk:"integrations"` +} + +func passwordResourceFromClient(ctx context.Context, password *planetscale.Password, organization, database, branch, plainText types.String, diags diag.Diagnostics) *passwordResourceModel { + if password == nil { + return nil + } + actor, diags := types.ObjectValueFrom(ctx, actorResourceAttrTypes, password.Actor) + diags.Append(diags...) + databaseBranch, diags := types.ObjectValueFrom(ctx, databaseBranchResourceAttrTypes, password.DatabaseBranch) + diags.Append(diags...) + region, diags := types.ObjectValueFrom(ctx, regionResourceAttrTypes, password.Region) + diags.Append(diags...) + return &passwordResourceModel{ + Organization: organization, + Database: database, + Branch: branch, + + Name: types.StringValue(password.Name), + AccessHostUrl: types.StringValue(password.AccessHostUrl), + Actor: actor, + CreatedAt: types.StringValue(password.CreatedAt), + DatabaseBranch: databaseBranch, + DeletedAt: types.StringPointerValue(password.DeletedAt), + ExpiresAt: types.StringPointerValue(password.ExpiresAt), + Id: types.StringValue(password.Id), + Region: region, + Renewable: types.BoolValue(password.Renewable), + Role: types.StringValue(password.Role), + TtlSeconds: types.Float64Value(password.TtlSeconds), + Username: types.StringPointerValue(password.Username), + + PlainText: plainText, + + // manually removed from spec because currently buggy + // Integrations: stringsToListValue(password.Integrations, diags), + } +} + +func passwordWithPlaintextResourceFromClient(ctx context.Context, password *planetscale.PasswordWithPlaintext, organization, database, branch types.String, diags diag.Diagnostics) *passwordResourceModel { + if password == nil { + return nil + } + actor, diags := types.ObjectValueFrom(ctx, actorResourceAttrTypes, password.Actor) + diags.Append(diags...) + databaseBranch, diags := types.ObjectValueFrom(ctx, databaseBranchResourceAttrTypes, password.DatabaseBranch) + diags.Append(diags...) + region, diags := types.ObjectValueFrom(ctx, regionResourceAttrTypes, password.Region) + diags.Append(diags...) + return &passwordResourceModel{ + Organization: organization, + Database: database, + Branch: branch, + + Name: types.StringValue(password.Name), + AccessHostUrl: types.StringValue(password.AccessHostUrl), + Actor: actor, + CreatedAt: types.StringValue(password.CreatedAt), + DatabaseBranch: databaseBranch, + DeletedAt: types.StringPointerValue(password.DeletedAt), + ExpiresAt: types.StringPointerValue(password.ExpiresAt), + Id: types.StringValue(password.Id), + Region: region, + Renewable: types.BoolValue(password.Renewable), + Role: types.StringValue(password.Role), + TtlSeconds: types.Float64Value(password.TtlSeconds), + Username: types.StringPointerValue(password.Username), + + PlainText: types.StringValue(password.PlainText), + + // manually removed from spec because currently buggy + // Integrations: stringsToListValue(password.Integrations, diags), + } +} + +func (r *passwordResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_password" +} + +func (r *passwordResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A PlanetScale database password.", + MarkdownDescription: "A PlanetScale database password.", + Attributes: map[string]schema.Attribute{ + "organization": schema.StringAttribute{ + Description: "The organization this database branch password belongs to.", + Required: true, PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "database": schema.StringAttribute{ + Description: "The datanase this branch password belongs to.", + Required: true, PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "branch": schema.StringAttribute{ + Description: "The branch this password belongs to.", + Required: true, PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "role": schema.StringAttribute{ + Description: "The role for the password.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplaceIfConfigured(), + }, + }, + "ttl_seconds": schema.Float64Attribute{ + Description: "Time to live (in seconds) for the password. The password will be invalid and unrenewable when TTL has passed.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Float64{ + float64planmodifier.RequiresReplaceIfConfigured(), + }, + }, + // updatable + "name": schema.StringAttribute{ + Description: "The display name for the password.", + Optional: true, + }, + + // read-only + "id": schema.StringAttribute{ + Description: "The ID for the password.", + Computed: true, + }, + "actor": schema.SingleNestedAttribute{ + Description: "The actor that created this branch.", + Computed: true, Attributes: actorResourceSchemaAttribute, + }, + "database_branch": schema.SingleNestedAttribute{ + Description: "The branch this password is allowed to access.", + Computed: true, Attributes: databaseBranchResourceAttribute, + }, + "region": schema.SingleNestedAttribute{ + Description: "The region in which this password can be used.", + Computed: true, Attributes: regionResourceSchemaAttribute, + }, + "access_host_url": schema.StringAttribute{ + Description: "The host URL for the password.", + Computed: true, + }, + "created_at": schema.StringAttribute{ + Description: "When the password was created.", + Computed: true, + }, + "deleted_at": schema.StringAttribute{ + Description: "When the password was deleted.", + Computed: true, + }, + "expires_at": schema.StringAttribute{ + Description: "When the password will expire.", + Computed: true, + }, + "renewable": schema.BoolAttribute{ + Description: "Whether or not the password can be renewed.", + Computed: true, + }, + "username": schema.StringAttribute{ + Description: "The username for the password.", + Computed: true, + }, + + // read-only, sensitive + "plaintext": schema.StringAttribute{ + Description: "The plaintext password, only available if the password was created by this provider.", + Sensitive: true, Computed: true}, + + // manually removed from spec because currently buggy + // "integrations": schema.ListAttribute{Computed: true, ElementType: types.StringType}, + }, + } +} + +func (r *passwordResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + r.client = client +} + +func (r *passwordResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var data *passwordResourceModel + tflog.Info(ctx, "getting current password resource from plan") + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + org := data.Organization + database := data.Database + branch := data.Branch + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if database.IsNull() || database.IsUnknown() || database.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("database"), "database is required", "a database must be provided and cannot be empty") + return + } + if branch.IsNull() || branch.IsUnknown() || branch.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("branch"), "branch is required", "a branch must be provided and cannot be empty") + return + } + + name := data.Name + role := data.Role + ttl := data.TtlSeconds + + createReq := planetscale.CreatePasswordReq{ + Name: name.ValueStringPointer(), + Role: role.ValueStringPointer(), + Ttl: ttl.ValueFloat64Pointer(), + } + res, err := r.client.CreatePassword(ctx, org.ValueString(), database.ValueString(), branch.ValueString(), createReq) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to create password, got error: %s", err)) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to create passwords", "no data") + return + } + data = passwordWithPlaintextResourceFromClient( + ctx, + &res.PasswordWithPlaintext, + data.Organization, + data.Database, + data.Branch, + resp.Diagnostics, + ) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *passwordResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var data *passwordResourceModel + + tflog.Info(ctx, "getting current password resource from state") + // Read Terraform prior state data into the model + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + org := data.Organization + database := data.Database + branch := data.Branch + id := data.Id + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if database.IsNull() || database.IsUnknown() || database.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("database"), "database is required", "a database must be provided and cannot be empty") + return + } + if branch.IsNull() || branch.IsUnknown() || branch.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("branch"), "branch is required", "a branch must be provided and cannot be empty") + return + } + if id.IsNull() || id.IsUnknown() || id.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("id"), "id is required", "an ID must be provided and cannot be empty") + return + } + + res, err := r.client.GetPassword(ctx, + org.ValueString(), + database.ValueString(), + branch.ValueString(), + id.ValueString(), + nil, // not sure why this would need a region id + ) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to read password, got error: %s", err)) + return + } + data = passwordResourceFromClient( + ctx, + &res.Password, + data.Organization, + data.Database, + data.Branch, + data.PlainText, + resp.Diagnostics, + ) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *passwordResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var ( + old *passwordResourceModel + data *passwordResourceModel + ) + resp.Diagnostics.Append(req.State.Get(ctx, &old)...) + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + org := data.Organization + database := data.Database + branch := data.Branch + id := data.Id + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if database.IsNull() || database.IsUnknown() || database.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("database"), "database is required", "a database must be provided and cannot be empty") + return + } + if branch.IsNull() || branch.IsUnknown() || branch.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("branch"), "branch is required", "a branch must be provided and cannot be empty") + return + } + if id.IsNull() || id.IsUnknown() || id.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("id"), "id is required", "an ID must be provided and cannot be empty") + return + } + + changedUpdatableSettings := false + name := stringIfDifferent(old.Name, data.Name, &changedUpdatableSettings) + + var state *passwordResourceModel + if changedUpdatableSettings && name != nil { + updateReq := planetscale.UpdatePasswordReq{ + Name: *name, + } + res, err := r.client.UpdatePassword( + ctx, + org.ValueString(), + database.ValueString(), + branch.ValueString(), + id.ValueString(), + updateReq, + ) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to update password settings, got error: %s", err)) + return + } + state = passwordResourceFromClient( + ctx, + &res.Password, + data.Organization, + data.Database, + data.Branch, + data.PlainText, + resp.Diagnostics, + ) + if resp.Diagnostics.HasError() { + return + } + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *passwordResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var data *passwordResourceModel + + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + org := data.Organization + database := data.Database + branch := data.Branch + id := data.Id + + if org.IsNull() || org.IsUnknown() || org.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("organization"), "organization is required", "an organization must be provided and cannot be empty") + return + } + if database.IsNull() || database.IsUnknown() || database.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("database"), "database is required", "a database must be provided and cannot be empty") + return + } + if branch.IsNull() || branch.IsUnknown() || branch.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("branch"), "branch is required", "a branch must be provided and cannot be empty") + return + } + if id.IsNull() || id.IsUnknown() || id.ValueString() == "" { + resp.Diagnostics.AddAttributeError(path.Root("id"), "id is required", "an ID must be provided and cannot be empty") + return + } + + res, err := r.client.DeletePassword(ctx, org.ValueString(), database.ValueString(), branch.ValueString(), id.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to delete password, got error: %s", err)) + return + } + _ = res +} + +func (r *passwordResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, ",") + if len(idParts) != 4 || idParts[0] == "" || idParts[1] == "" || idParts[2] == "" || idParts[3] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: organization,database,name,id. Got: %q", req.ID), + ) + return + } + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("organization"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("database"), idParts[1])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("branch"), idParts[2])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), idParts[3])...) +} diff --git a/internal/provider/passwords_data_source.go b/internal/provider/passwords_data_source.go new file mode 100644 index 0000000..386cb6a --- /dev/null +++ b/internal/provider/passwords_data_source.go @@ -0,0 +1,121 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &passwordsDataSource{} + _ datasource.DataSourceWithConfigure = &passwordsDataSource{} +) + +func newPasswordsDataSource() datasource.DataSource { + return &passwordsDataSource{} +} + +type passwordsDataSource struct { + client *planetscale.Client +} + +type passwordsDataSourceModel struct { + Organization types.String `tfsdk:"organization"` + Database types.String `tfsdk:"database"` + Branch types.String `tfsdk:"branch"` + ReadOnlyRegionId types.String `tfsdk:"read_only_region_id"` + Passwords []passwordDataSourceModel `tfsdk:"passwords"` +} + +func (d *passwordsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_passwords" +} + +func (d *passwordsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A list of PlanetScale database passwords.", + MarkdownDescription: "A list of PlanetScale database passwords.", + Attributes: map[string]schema.Attribute{ + "organization": schema.StringAttribute{Required: true}, + "database": schema.StringAttribute{Required: true}, + "branch": schema.StringAttribute{Required: true}, + "read_only_region_id": schema.StringAttribute{Optional: true}, + "passwords": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: passwordDataSourceSchemaAttribute(true), + }, + }, + }, + } +} + +func (d *passwordsDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *passwordsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *passwordsDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + res, err := d.client.ListPasswords( + ctx, + data.Organization.ValueString(), + data.Database.ValueString(), + data.Branch.ValueString(), + data.ReadOnlyRegionId.ValueStringPointer(), + nil, + nil, + ) + if err != nil { + resp.Diagnostics.AddError("Unable to read database passwords", err.Error()) + return + } + if res == nil { + resp.Diagnostics.AddError("Unable to read database passwords", "no data") + return + } + state := passwordsDataSourceModel{ + Organization: data.Organization, + Database: data.Database, + Branch: data.Branch, + ReadOnlyRegionId: data.ReadOnlyRegionId, + Passwords: make([]passwordDataSourceModel, 0, len(res.Data)), + } + for _, item := range res.Data { + item := item + state.Passwords = append(state.Passwords, *passwordFromClient( + &item, + data.Organization.ValueString(), + data.Database.ValueString(), + data.Branch.ValueString(), + data.ReadOnlyRegionId.ValueStringPointer(), + resp.Diagnostics, + )) + if resp.Diagnostics.HasError() { + return + } + } + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go new file mode 100644 index 0000000..de2833b --- /dev/null +++ b/internal/provider/provider.go @@ -0,0 +1,213 @@ +package provider + +import ( + "context" + "net/http" + "net/http/httputil" + "net/url" + "os" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/pkg/errors" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" + "golang.org/x/oauth2" +) + +var _ provider.Provider = &PlanetScaleProvider{} + +type PlanetScaleProvider struct { + version string +} + +type PlanetScaleProviderModel struct { + Endpoint types.String `tfsdk:"endpoint"` + + ServiceTokenName types.String `tfsdk:"service_token_name"` +} + +func (p *PlanetScaleProvider) Metadata(ctx context.Context, req provider.MetadataRequest, resp *provider.MetadataResponse) { + resp.TypeName = "planetscale" + resp.Version = p.version +} + +func (p *PlanetScaleProvider) Schema(ctx context.Context, req provider.SchemaRequest, resp *provider.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "endpoint": schema.StringAttribute{ + MarkdownDescription: "Example provider attribute", + Optional: true, + }, + "service_token_name": schema.StringAttribute{ + MarkdownDescription: "Name of the service token to use", + Optional: true, + }, + }, + } +} + +func (p *PlanetScaleProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + var data PlanetScaleProviderModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + var ( + initrt = debugRoundTripper(func(req, res []byte) { + tflog.Debug(ctx, "roundtripper", map[string]interface{}{ + "req": string(req), + "res": string(res), + }) + }, http.DefaultTransport) + rt http.RoundTripper + baseURL *url.URL + ) + if !data.Endpoint.IsNull() { + u, err := url.Parse(data.Endpoint.ValueString()) + if err != nil { + resp.Diagnostics.AddAttributeError(path.Root("endpoint"), "invalid URL", err.Error()) + return + } + baseURL = u + } + var ( + accessToken = os.Getenv("PLANETSCALE_ACCESS_TOKEN") + serviceTokenName = os.Getenv("PLANETSCALE_SERVICE_TOKEN_NAME") + serviceTokenValue = os.Getenv("PLANETSCALE_SERVICE_TOKEN") + ) + if !data.ServiceTokenName.IsNull() { + serviceTokenName = data.ServiceTokenName.ValueString() + } + switch { + case accessToken != "" && serviceTokenName == "" && serviceTokenValue == "": + tok := &oauth2.Token{AccessToken: accessToken} + rt = &oauth2.Transport{Base: initrt, Source: oauth2.StaticTokenSource(tok)} + case accessToken == "" && serviceTokenName != "" && serviceTokenValue != "": + rt = roundTripperFunc(func(r *http.Request) (*http.Response, error) { + r.Header.Set("Authorization", serviceTokenName+":"+serviceTokenValue) + return initrt.RoundTrip(r) + }) + case accessToken == "" && serviceTokenName == "" && serviceTokenValue == "": + resp.Diagnostics.AddError("Missing PlanetScale credentials.", + "You must set either of:\n"+ + "- `PLANETSCALE_ACCESS_TOKEN`\n"+ + "- `PLANETSCALE_SERVICE_TOKEN_NAME` and `PLANETSCALE_SERVICE_TOKEN`") + case accessToken == "" && serviceTokenName != "" && serviceTokenValue == "", + accessToken == "" && serviceTokenName == "" && serviceTokenValue != "": + resp.Diagnostics.AddError("Incomplete PlanetScale service token credentials.", + "Both of `PLANETSCALE_SERVICE_TOKEN_NAME` and `PLANETSCALE_SERVICE_TOKEN` must be set.") + default: + resp.Diagnostics.AddError("Ambiguous PlanetScale credentials.", "You must set only either of an access token or a service token, but not both:\n"+ + "- `PLANETSCALE_ACCESS_TOKEN`\n"+ + "- `PLANETSCALE_SERVICE_TOKEN_NAME` and `PLANETSCALE_SERVICE_TOKEN`") + } + if resp.Diagnostics.HasError() { + return + } + + client := planetscale.NewClient(&http.Client{Transport: rt}, baseURL) + + resp.DataSourceData = client + resp.ResourceData = client +} + +func (p *PlanetScaleProvider) Resources(ctx context.Context) []func() resource.Resource { + return []func() resource.Resource{ + newDatabaseResource, + newBranchResource, + newBackupResource, + newPasswordResource, + } +} + +func (p *PlanetScaleProvider) DataSources(ctx context.Context) []func() datasource.DataSource { + return []func() datasource.DataSource{ + newOrganizationsDataSource, + newOrganizationDataSource, + newOrganizationRegionsDataSource, + newDatabasesDataSource, + newDatabaseDataSource, + newDatabaseRegionsDataSource, + newDatabaseReadOnlyRegionsDataSource, + newBranchesDataSource, + newBranchDataSource, + newBranchSchemaDataSource, + newBranchSchemaLintDataSource, + newBackupDataSource, + newBackupsDataSource, + newPasswordDataSource, + newPasswordsDataSource, + newOAuthApplicationsDataSource, + newUserDataSource, + } +} + +func New(version string) func() provider.Provider { + return func() provider.Provider { + return &PlanetScaleProvider{ + version: version, + } + } +} + +func debugRoundTripper(log func(req, res []byte), tpt http.RoundTripper) http.RoundTripper { + return roundTripperFunc(func(r *http.Request) (*http.Response, error) { + debugReq, err := httputil.DumpRequestOut(r, true) + if err != nil { + return nil, errors.Wrap(err, "dumping request output") + } + res, err := tpt.RoundTrip(r) + if res == nil { + return res, err + } + debugRes, err := httputil.DumpResponse(res, true) + if err != nil { + return nil, errors.Wrap(err, "dumping response output") + } + log(debugReq, debugRes) + return res, err + }) +} + +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (fn roundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return fn(req) +} + +func boolIfDifferent(oldBool, newBool types.Bool, wasChanged *bool) *bool { + if oldBool.Equal(newBool) { + return nil + } + *wasChanged = true + return boolValueIfKnown(newBool) +} + +func stringIfDifferent(oldString, newString types.String, wasChanged *bool) *string { + if oldString == newString { + return nil + } + *wasChanged = true + return stringValueIfKnown(newString) +} + +func boolValueIfKnown(v basetypes.BoolValue) *bool { + if v.IsUnknown() || v.IsNull() { + return nil + } + return v.ValueBoolPointer() +} + +func stringValueIfKnown(v basetypes.StringValue) *string { + if v.IsUnknown() || v.IsNull() { + return nil + } + return v.ValueStringPointer() +} diff --git a/internal/provider/provider_test.go b/internal/provider/provider_test.go new file mode 100644 index 0000000..9884e38 --- /dev/null +++ b/internal/provider/provider_test.go @@ -0,0 +1,55 @@ +package provider + +import ( + "fmt" + "os" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" +) + +var testAccProtoV6ProviderFactories = map[string]func() (tfprotov6.ProviderServer, error){ + "planetscale": providerserver.NewProtocol6WithError(New("test")()), +} + +func testAccPreCheck(t *testing.T) { + var ( + accessToken = os.Getenv("PLANETSCALE_ACCESS_TOKEN") + serviceTokenName = os.Getenv("PLANETSCALE_SERVICE_TOKEN_NAME") + serviceTokenValue = os.Getenv("PLANETSCALE_SERVICE_TOKEN") + ) + switch { + case accessToken != "": + case serviceTokenName != "" && serviceTokenValue != "": + default: + t.Fatalf("must have either PLANETSCALE_ACCESS_TOKEN or both of (PLANETSCALE_SERVICE_TOKEN_NAME, PLANETSCALE_SERVICE_TOKEN)") + } +} + +func checkIntegerMin(min int) resource.CheckResourceAttrWithFunc { + return func(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + if v < min { + return fmt.Errorf("value %d is less than %d", v, min) + } + return nil + } +} + +func checkOneOf(values ...string) resource.CheckResourceAttrWithFunc { + return func(value string) error { + for _, valid := range values { + if value == valid { + return nil + } + } + return fmt.Errorf("valud %q is not one of %s", value, strings.Join(values, ", ")) + } +} diff --git a/internal/provider/user_data_source.go b/internal/provider/user_data_source.go new file mode 100644 index 0000000..e05041d --- /dev/null +++ b/internal/provider/user_data_source.go @@ -0,0 +1,157 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/planetscale/terraform-provider-planetscale/internal/client/planetscale" +) + +var ( + _ datasource.DataSource = &userDataSource{} + _ datasource.DataSourceWithConfigure = &userDataSource{} +) + +func newUserDataSource() datasource.DataSource { + return &userDataSource{} +} + +type userDataSource struct { + client *planetscale.Client +} + +type userDataSourceModel struct { + Id *string `tfsdk:"id"` + Name *string `tfsdk:"name"` + AvatarUrl *string `tfsdk:"avatar_url"` + CreatedAt *string `tfsdk:"created_at"` + DefaultOrganizationId *string `tfsdk:"default_organization_id"` + DirectoryManaged *bool `tfsdk:"directory_managed"` + DisplayName *string `tfsdk:"display_name"` + Email *string `tfsdk:"email"` + EmailVerified *bool `tfsdk:"email_verified"` + Managed *bool `tfsdk:"managed"` + Sso *bool `tfsdk:"sso"` + TwoFactorAuthConfigured *bool `tfsdk:"two_factor_auth_configured"` + UpdatedAt *string `tfsdk:"updated_at"` +} + +func (d *userDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_user" +} + +func (d *userDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "A PlanetScale user.", + MarkdownDescription: "A PlanetScale user.", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "The ID of the user.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "The name of the user.", + Computed: true, + }, + "avatar_url": schema.StringAttribute{ + Description: "The URL source of the user's avatar.", + Computed: true, + }, + "created_at": schema.StringAttribute{ + Description: "When the user was created.", + Computed: true, + }, + "default_organization_id": schema.StringAttribute{ + Description: "The default organization for the user.", + Computed: true, + }, + "directory_managed": schema.BoolAttribute{ + Description: "Whether or not the user is managed by a WorkOS directory.", + Computed: true, + }, + "display_name": schema.StringAttribute{ + Description: "The display name of the user.", + Computed: true, + }, + "email": schema.StringAttribute{ + Description: "The email of the user.", + Computed: true, + }, + "email_verified": schema.BoolAttribute{ + Description: "Whether or not the user is verified by email.", + Computed: true, + }, + "managed": schema.BoolAttribute{ + Description: "Whether or not the user is managed by an authentication provider.", + Computed: true, + }, + "sso": schema.BoolAttribute{ + Description: "Whether or not the user is managed by WorkOS.", + Computed: true, + }, + "two_factor_auth_configured": schema.BoolAttribute{ + Description: "Whether or not the user has configured two factor authentication.", + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Description: "When the user was last updated.", + Computed: true, + }, + }, + } +} + +func (d *userDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*planetscale.Client) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *planetscale.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.client = client +} + +func (d *userDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *userDataSourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + res200, err := d.client.GetCurrentUser(ctx) + if err != nil { + resp.Diagnostics.AddError("Unable to read user", err.Error()) + return + } + if res200 == nil { + resp.Diagnostics.AddError("Unable to read user", "no data") + return + } + state := userDataSourceModel{ + AvatarUrl: res200.AvatarUrl, + CreatedAt: res200.CreatedAt, + DefaultOrganizationId: res200.DefaultOrganizationId, + DirectoryManaged: res200.DirectoryManaged, + DisplayName: res200.DisplayName, + Email: res200.Email, + EmailVerified: res200.EmailVerified, + Id: res200.Id, + Managed: res200.Managed, + Name: res200.Name, + Sso: res200.Sso, + TwoFactorAuthConfigured: res200.TwoFactorAuthConfigured, + UpdatedAt: res200.UpdatedAt, + } + + diags := resp.State.Set(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..cf11dab --- /dev/null +++ b/main.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "context" + "flag" + "log" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/planetscale/terraform-provider-planetscale/internal/provider" +) + +// Run "go generate" to format example terraform files and generate the docs for the registry/website + +// If you do not have terraform installed, you can remove the formatting command, but its suggested to +// ensure the documentation is formatted properly. +//go:generate terraform fmt -recursive ./examples/ + +//go:generate script/generate + +// Run the docs generation tool, check its repository for more information on how it works and how docs +// can be customized. +//go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs + +var ( + // these will be set by the goreleaser configuration + // to appropriate values for the compiled binary. + version string = "dev" + + // goreleaser can pass other information to the main package, such as the specific commit + // https://goreleaser.com/cookbooks/using-main.version/ +) + +func main() { + var debug bool + + flag.BoolVar(&debug, "debug", false, "set to true to run the provider with support for debuggers like delve") + flag.Parse() + + opts := providerserver.ServeOpts{ + Address: "registry.terraform.io/planetscale/planetscale", + Debug: debug, + } + + err := providerserver.Serve(context.Background(), provider.New(version), opts) + + if err != nil { + log.Fatal(err.Error()) + } +} diff --git a/openapi-spec.json b/openapi-spec.json new file mode 100644 index 0000000..4107a58 --- /dev/null +++ b/openapi-spec.json @@ -0,0 +1,5504 @@ +{ + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "schemes": [ + "https" + ], + "swagger": "2.0", + "info": { + "description": "\n

PlanetScale API

\n© 2023 PlanetScale, Inc.", + "title": "PlanetScale API", + "version": "v1", + "x-copyright": "© 2023 PlanetScale, Inc." + }, + "host": "api.planetscale.com", + "basePath": "/v1", + "paths": { + "/organizations": { + "get": { + "description": "When using a service token, returns the list of organizations the service token has access to. When using an OAuth token, returns the list of organizations the user has access to.\n### Authorization\nA OAuth token must have at least one of the following scopes in order to use this API endpoint:\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| User | `read_organizations` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Organizations" + ], + "summary": "List organizations", + "operationId": "list-organizations", + "parameters": [ + { + "type": "number", + "default": 1, + "description": "If provided, specifies the page offset of returned results", + "name": "page", + "in": "query" + }, + { + "type": "number", + "default": 25, + "description": "If provided, specifies the number of returned results", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Gets the organizations for the current user", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/organization" + } + } + }, + "additionalProperties": false + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{name}": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_organization`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| User | `read_organizations` |\n| Organization | `read_organization` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Organizations" + ], + "summary": "Get an organization", + "operationId": "get-an-organization", + "parameters": [ + { + "type": "string", + "description": "The name of the organization", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns an organization", + "schema": { + "$ref": "#/definitions/organization" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + }, + "patch": { + "description": "\n### Authorization\nA OAuth token must have at least one of the following scopes in order to use this API endpoint:\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `write_organization` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Organizations" + ], + "summary": "Update an organization", + "operationId": "update-an-organization", + "parameters": [ + { + "type": "string", + "description": "The name of the organization", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "billing_email": { + "description": "The billing email for the organization", + "type": "string" + }, + "idp_managed_roles": { + "description": "Whether or not the IdP provider is be responsible for managing roles in PlanetScale", + "type": "boolean" + }, + "require_admin_for_production_access": { + "description": "Whether or not only admins can access production", + "type": "boolean" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "200": { + "description": "Returns the updated organization", + "schema": { + "$ref": "#/definitions/organization" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{name}/regions": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_organization`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| User | `read_organizations` |\n| Organization | `read_organization` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Organizations" + ], + "summary": "List regions for an organization", + "operationId": "list-regions-for-an-organization", + "parameters": [ + { + "type": "string", + "description": "The name of the organization", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "number", + "default": 1, + "description": "If provided, specifies the page offset of returned results", + "name": "page", + "in": "query" + }, + { + "type": "number", + "default": 25, + "description": "If provided, specifies the number of returned results", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Returns the organization's regions", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/region" + } + } + }, + "additionalProperties": false + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_database`, `delete_database`, `write_database`, `read_branch`, `delete_branch`, `create_branch`, `delete_production_branch`, `connect_branch`, `connect_production_branch`, `delete_branch_password`, `delete_production_branch_password`, `read_deploy_request`, `create_deploy_request`, `approve_deploy_request`, `read_comment`, `create_comment`, `restore_backup`, `restore_production_branch_backup`, `read_backups`, `write_backups`, `delete_backups`, `delete_production_branch_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_databases` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Databases" + ], + "summary": "List databases", + "operationId": "list-databases", + "parameters": [ + { + "type": "string", + "description": "The name of the organization the database belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "number", + "default": 1, + "description": "If provided, specifies the page offset of returned results", + "name": "page", + "in": "query" + }, + { + "type": "number", + "default": 25, + "description": "If provided, specifies the number of returned results", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Retrieves the databases for an organization", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/database" + } + } + }, + "additionalProperties": false + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + }, + "post": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `create_databases`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `create_databases` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Databases" + ], + "summary": "Create a database", + "operationId": "create-a-database", + "parameters": [ + { + "type": "string", + "description": "The name of the organization the database belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "cluster_size": { + "description": "The database cluster size. This is required for Scaler Pro databases. Options: PS_10, PS_20, PS_40, PS_80, PS_160, PS_320, PS_400, PS_640, PS_700, PS_900, PS_1280, PS_1400, PS_1800, PS_2100, PS_2560, PS_2700, PS_2800.", + "type": "string" + }, + "name": { + "description": "Name of the database", + "type": "string" + }, + "plan": { + "description": "The database billing plan. Options: 'hobby', 'scaler', or 'scaler_pro'.", + "type": "string" + }, + "region": { + "description": "The region the database will be deployed in. If left blank, defaults to the organization's default region.", + "type": "string" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "201": { + "description": "Returns the created database", + "schema": { + "$ref": "#/definitions/database" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{database}/branches": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_branch`, `delete_branch`, `create_branch`, `connect_production_branch`, `connect_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_branches` |\n| Database | `read_branches` |\n| Branch | `read_branch` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branches" + ], + "summary": "List branches", + "operationId": "list-branches", + "parameters": [ + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "number", + "default": 1, + "description": "If provided, specifies the page offset of returned results", + "name": "page", + "in": "query" + }, + { + "type": "number", + "default": 25, + "description": "If provided, specifies the number of returned results", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Returns database branches", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/branch" + } + } + }, + "additionalProperties": false + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + }, + "post": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `create_branch`, `restore_production_branch_backup`, `restore_backup`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `write_branches`, `restore_production_branch_backups`, `restore_backups` |\n| Database | `write_branches`, `restore_production_branch_backups`, `restore_backups` |\n| Branch | `restore_backups` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branches" + ], + "summary": "Create a branch", + "operationId": "create-a-branch", + "parameters": [ + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "required": [ + "name", + "parent_branch" + ], + "properties": { + "backup_id": { + "description": "If provided, restores the backup's schema and data to the new branch. Must have `restore_production_branch_backup(s)` or `restore_backup(s)` access to do this.", + "type": "string" + }, + "name": { + "description": "The name of the branch", + "type": "string" + }, + "parent_branch": { + "description": "Parent branch", + "type": "string" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "201": { + "description": "Returns the created branch", + "schema": { + "$ref": "#/definitions/branch" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{database}/branches/{branch}/backups": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_backups` |\n| Database | `read_backups` |\n| Branch | `read_backups` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Backups" + ], + "summary": "List backups", + "operationId": "list-backups", + "parameters": [ + { + "type": "string", + "description": "The name of the branch", + "name": "branch", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "number", + "default": 1, + "description": "If provided, specifies the page offset of returned results", + "name": "page", + "in": "query" + }, + { + "type": "number", + "default": 25, + "description": "If provided, specifies the number of returned results", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Returns database branch backups", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/backup" + } + } + }, + "additionalProperties": false + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + }, + "post": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `write_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `write_backups` |\n| Database | `write_backups` |\n| Branch | `write_backups` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Backups" + ], + "summary": "Create a backup", + "operationId": "create-a-backup", + "parameters": [ + { + "type": "string", + "description": "The name of the branch", + "name": "branch", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "name": { + "description": "Name for the backup", + "type": "string" + }, + "retention_unit": { + "description": "Unit for the retention period of the backup", + "type": "string", + "enum": [ + "hour", + "day", + "week", + "month", + "year" + ] + }, + "retention_value": { + "description": "Value between `1`` and `1000`` for the retention period of the backup (i.e retention_value `6`` and retention_unit `hour` means 6 hours)", + "type": "number" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "201": { + "description": "Returns the created database branch backup", + "schema": { + "$ref": "#/definitions/backup" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{database}/branches/{branch}/backups/{id}": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_backups` |\n| Database | `read_backups` |\n| Branch | `read_backups` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Backups" + ], + "summary": "Get a backup", + "operationId": "get-a-backup", + "parameters": [ + { + "type": "string", + "description": "The name of the branch", + "name": "branch", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The ID for the backup", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns a database branch backup", + "schema": { + "$ref": "#/definitions/backup" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + }, + "delete": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `delete_backups`, `delete_production_branch_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `delete_backups`, `delete_production_branch_backups` |\n| Database | `delete_backups`, `delete_production_branch_backups` |\n| Branch | `delete_backups` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Backups" + ], + "summary": "Delete a backup", + "operationId": "delete-a-backup", + "parameters": [ + { + "type": "string", + "description": "The name of the branch", + "name": "branch", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The ID of the backup", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Delete a backup" + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{database}/branches/{branch}/passwords": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_branch`, `delete_branch`, `create_branch`, `connect_production_branch`, `connect_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `manage_passwords`, `manage_production_branch_passwords` |\n| Database | `manage_passwords`, `manage_production_branch_passwords` |\n| Branch | `manage_passwords` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branch passwords" + ], + "summary": "List passwords", + "operationId": "list-passwords", + "parameters": [ + { + "type": "string", + "description": "The name of the branch the password belongs to", + "name": "branch", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the password belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the password belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "A read-only region of the database branch. If present, the password results will be filtered to only those in the region", + "name": "read_only_region_id", + "in": "query" + }, + { + "type": "number", + "default": 1, + "description": "If provided, specifies the page offset of returned results", + "name": "page", + "in": "query" + }, + { + "type": "number", + "default": 25, + "description": "If provided, specifies the number of returned results", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Gets the passwords for the database branch", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/password" + } + } + }, + "additionalProperties": false + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + }, + "post": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `connect_production_branch`, `create_branch_password`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `manage_passwords`, `manage_production_branch_passwords` |\n| Database | `manage_passwords`, `manage_production_branch_passwords` |\n| Branch | `manage_passwords` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branch passwords" + ], + "summary": "Create a password", + "operationId": "create-a-password", + "parameters": [ + { + "type": "string", + "description": "The name of the branch the password belongs to", + "name": "branch", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the password belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the password belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "name": { + "description": "Optional name of the password", + "type": "string" + }, + "role": { + "description": "The database role of the password (i.e. admin)", + "type": "string", + "enum": [ + "reader", + "writer", + "admin", + "readwriter" + ] + }, + "ttl": { + "description": "Time to live (in seconds) for the password. The password will be invalid when TTL has passed", + "type": "number" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "201": { + "description": "Creates a password", + "schema": { + "$ref": "#/definitions/password_with_plaintext" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "422": { + "description": "Unprocessable Content" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_branch`, `delete_branch`, `create_branch`, `connect_production_branch`, `connect_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `manage_passwords`, `manage_production_branch_passwords` |\n| Database | `manage_passwords`, `manage_production_branch_passwords` |\n| Branch | `manage_passwords` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branch passwords" + ], + "summary": "Get a password", + "operationId": "get-a-password", + "parameters": [ + { + "type": "string", + "description": "The ID of the password", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the branch the password belongs to", + "name": "branch", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the password belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the password belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "A read-only region of the database branch. If present, the password results will be filtered to only those in the region", + "name": "read_only_region_id", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Gets the password", + "schema": { + "$ref": "#/definitions/password" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + }, + "delete": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `delete_production_branch_password`, `delete_branch_password`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `manage_passwords`, `manage_production_branch_passwords` |\n| Database | `manage_passwords`, `manage_production_branch_passwords` |\n| Branch | `manage_passwords` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branch passwords" + ], + "summary": "Delete a password", + "operationId": "delete-a-password", + "parameters": [ + { + "type": "string", + "description": "The ID of the password", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the branch the password belongs to", + "name": "branch", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the password belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the password belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Deletes the password" + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + }, + "patch": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `connect_production_branch`, `create_branch_password`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `manage_passwords`, `manage_production_branch_passwords` |\n| Database | `manage_passwords`, `manage_production_branch_passwords` |\n| Branch | `manage_passwords` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branch passwords" + ], + "summary": "Update a password", + "operationId": "update-a-password", + "parameters": [ + { + "type": "string", + "description": "The ID of the password", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the branch the password belongs to", + "name": "branch", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the password belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the password belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "The name for the password", + "type": "string" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "200": { + "description": "Updates a password", + "schema": { + "$ref": "#/definitions/password" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}/renew": { + "post": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `connect_production_branch`, `create_branch_password`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `manage_passwords`, `manage_production_branch_passwords` |\n| Database | `manage_passwords`, `manage_production_branch_passwords` |\n| Branch | `manage_passwords` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branch passwords" + ], + "summary": "Renew a password", + "operationId": "renew-a-password", + "parameters": [ + { + "type": "string", + "description": "The ID of the password", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the branch the password belongs to", + "name": "branch", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the password belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the password belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "read_only_region_id": { + "description": "A read-only region of the database branch. If present, the password results will be filtered to only those in the region", + "type": "string" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "200": { + "description": "Updates a password", + "schema": { + "$ref": "#/definitions/password_with_plaintext" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{database}/branches/{name}": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_branch`, `delete_branch`, `create_branch`, `connect_production_branch`, `connect_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_branches` |\n| Database | `read_branches` |\n| Branch | `read_branch` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branches" + ], + "summary": "Get a branch", + "operationId": "get-a-branch", + "parameters": [ + { + "type": "string", + "description": "The name of the branch", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns information about a branch", + "schema": { + "$ref": "#/definitions/branch" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + }, + "delete": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `delete_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `delete_branches`, `delete_production_branches` |\n| Database | `delete_branches`, `delete_production_branches` |\n| Branch | `delete_branch` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branches" + ], + "summary": "Delete a branch", + "operationId": "delete-a-branch", + "parameters": [ + { + "type": "string", + "description": "The name of the branch", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Delete a branch" + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{database}/branches/{name}/demote": { + "post": { + "description": "Demotes a branch from production to development\n### Authorization\nA OAuth token must have at least one of the following scopes in order to use this API endpoint:\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `demote_branches` |\n| Database | `demote_branches` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branches" + ], + "summary": "Demote a branch", + "operationId": "demote-a-branch", + "parameters": [ + { + "type": "string", + "description": "The name of the branch", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns a development branch", + "schema": { + "$ref": "#/definitions/branch" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{database}/branches/{name}/promote": { + "post": { + "description": "Promotes a branch from development to production\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `connect_production_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `promote_branches` |\n| Database | `promote_branches` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branches" + ], + "summary": "Promote a branch", + "operationId": "promote-a-branch", + "parameters": [ + { + "type": "string", + "description": "The name of the branch", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns a production branch", + "schema": { + "$ref": "#/definitions/branch" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{database}/branches/{name}/safe-migrations": { + "post": { + "description": "\n", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branches" + ], + "summary": "Enable safe migrations for a branch", + "operationId": "enable-safe-migrations-for-a-branch", + "parameters": [ + { + "type": "string", + "description": "The name of the branch", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns the branch with safe migrations enabled", + "schema": { + "$ref": "#/definitions/branch" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + }, + "delete": { + "description": "\n", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branches" + ], + "summary": "Disable safe migrations for a branch", + "operationId": "disable-safe-migrations-for-a-branch", + "parameters": [ + { + "type": "string", + "description": "The name of the branch", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns the branch with safe migrations disabled", + "schema": { + "$ref": "#/definitions/branch" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{database}/branches/{name}/schema": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_branch`, `delete_branch`, `create_branch`, `connect_production_branch`, `connect_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_branches` |\n| Database | `read_branches` |\n| Branch | `read_branch` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branches" + ], + "summary": "Get a branch schema", + "operationId": "get-a-branch-schema", + "parameters": [ + { + "type": "string", + "description": "The name of the branch", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "If provided, the schema for this keyspace is returned", + "name": "keyspace", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Gets the schema for the branch", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/table_schema" + } + } + }, + "additionalProperties": false + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{database}/branches/{name}/schema/lint": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_branch`, `delete_branch`, `create_branch`, `connect_production_branch`, `connect_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_branches` |\n| Database | `read_branches` |\n| Branch | `read_branch` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Database branches" + ], + "summary": "Lint a branch schema", + "operationId": "lint-a-branch-schema", + "parameters": [ + { + "type": "string", + "description": "The name of the branch", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the branch belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the branch belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "number", + "default": 1, + "description": "If provided, specifies the page offset of returned results", + "name": "page", + "in": "query" + }, + { + "type": "number", + "default": 25, + "description": "If provided, specifies the number of returned results", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Returns a list of schema errors for a branch", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/lint_error" + } + } + }, + "additionalProperties": false + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{database}/deploy-queue": { + "get": { + "description": "The deploy queue returns the current list of deploy requests in the order they will be deployed.\n", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "Get the deploy queue", + "operationId": "get-the-deploy-queue", + "parameters": [ + { + "type": "string", + "description": "The name of the deploy request's database", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's organization", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns the deploy queue for a database", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/queued_deploy_request" + } + } + }, + "additionalProperties": false + } + } + } + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests": { + "get": { + "description": "List deploy requests for a database\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_deploy_requests` |\n| Database | `read_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "List deploy requests", + "operationId": "list-deploy-requests", + "parameters": [ + { + "type": "string", + "description": "The name of the deploy request's database", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's organization", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "number", + "default": 1, + "description": "If provided, specifies the page offset of returned results", + "name": "page", + "in": "query" + }, + { + "type": "number", + "default": 25, + "description": "If provided, specifies the number of returned results", + "name": "per_page", + "in": "query" + }, + { + "type": "string", + "description": "Filter by state of the deploy request (open, closed, deployed)", + "name": "state", + "in": "query" + }, + { + "type": "string", + "description": "Filter by the name of the branch the deploy request is created from", + "name": "branch", + "in": "query" + }, + { + "type": "string", + "description": "Filter by the name of the branch the deploy request will be merged into", + "name": "into_branch", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Returns a list of deploy requests for a database", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/deploy_request" + } + } + }, + "additionalProperties": false + } + } + } + }, + "post": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_requests`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `write_deploy_requests` |\n| Database | `write_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "Create a deploy request", + "operationId": "create-a-deploy-request", + "parameters": [ + { + "type": "string", + "description": "The name of the deploy request's database", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's organization", + "name": "organization", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "branch": { + "description": "The name of the branch the deploy request is created from", + "type": "string" + }, + "into_branch": { + "description": "The name of the branch the deploy request will be merged into", + "type": "string" + }, + "notes": { + "description": "Notes about the deploy request", + "type": "string" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "201": { + "description": "Returns the created deploy request", + "schema": { + "$ref": "#/definitions/deploy_request_with_deployment" + } + } + } + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_deploy_requests` |\n| Database | `read_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "Get a deploy request", + "operationId": "get-a-deploy-request", + "parameters": [ + { + "type": "string", + "description": "The number of the deploy request", + "name": "number", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's database", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's organization", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns information about a deploy request", + "schema": { + "$ref": "#/definitions/deploy_request_with_deployment" + } + } + } + }, + "patch": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `write_deploy_requests` |\n| Database | `write_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "Close a deploy request", + "operationId": "close-a-deploy-request", + "parameters": [ + { + "type": "string", + "description": "The number of the deploy request", + "name": "number", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's database", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's organization", + "name": "organization", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "state": { + "description": "The deploy request will be updated to this state", + "type": "string", + "enum": [ + "closed" + ] + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "200": { + "description": "Returns the updated deploy request", + "schema": { + "$ref": "#/definitions/deploy_request_with_deployment" + } + } + } + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/apply-deploy": { + "post": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "Complete a gated deploy request", + "operationId": "complete-a-gated-deploy-request", + "parameters": [ + { + "type": "string", + "description": "The number of the deploy request", + "name": "number", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's database", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's organization", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns the deploy request whose deployment has been completed", + "schema": { + "$ref": "#/definitions/deploy_request" + } + } + } + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/auto-apply": { + "put": { + "description": "Enables or disabled the auto-apply setting for a deploy request\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "Update auto-apply for deploy request", + "operationId": "update-auto-apply-for-deploy-request", + "parameters": [ + { + "type": "string", + "description": "The number of the deploy request", + "name": "number", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's database", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's organization", + "name": "organization", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "enable": { + "description": "Whether or not to enable auto-apply for the deploy request", + "type": "boolean" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "200": { + "description": "Returns the deploy request whose auto-apply setting was updated", + "schema": { + "$ref": "#/definitions/deploy_request" + } + } + } + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/cancel": { + "post": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "Cancel a queued deploy request", + "operationId": "cancel-a-queued-deploy-request", + "parameters": [ + { + "type": "string", + "description": "The number of the deploy request", + "name": "number", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's database", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's organization", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns the deploy request whose deployment was canceled", + "schema": { + "$ref": "#/definitions/deploy_request" + } + } + } + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/complete-deploy": { + "post": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "Complete an errored deploy", + "operationId": "complete-an-errored-deploy", + "parameters": [ + { + "type": "string", + "description": "The number of the deploy request", + "name": "number", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's database", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's organization", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns the completed deploy request", + "schema": { + "$ref": "#/definitions/deploy_request" + } + } + } + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/deploy": { + "post": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "Queue a deploy request", + "operationId": "queue-a-deploy-request", + "parameters": [ + { + "type": "string", + "description": "The number of the deploy request", + "name": "number", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the deploy request belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the deploy request belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns the deployed deploy request", + "schema": { + "$ref": "#/definitions/deploy_request" + } + } + } + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/deployment": { + "get": { + "description": "Get the deployment for a deploy request\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_deploy_requests` |\n| Database | `read_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "Get a deployment", + "operationId": "get-a-deployment", + "parameters": [ + { + "type": "string", + "description": "The number of the deploy request", + "name": "number", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's database", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's organization", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns the deployment for a deploy request", + "schema": { + "$ref": "#/definitions/deployment" + } + } + } + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/operations": { + "get": { + "description": "List deploy operations for a deploy request\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_deploy_requests` |\n| Database | `read_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "List deploy operations", + "operationId": "list-deploy-operations", + "parameters": [ + { + "type": "string", + "description": "The number of the deploy request", + "name": "number", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the deploy request belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the deploy request belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "number", + "default": 1, + "description": "If provided, specifies the page offset of returned results", + "name": "page", + "in": "query" + }, + { + "type": "number", + "default": 25, + "description": "If provided, specifies the number of returned results", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Returns a list of deploy operations for the deploy request", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/deploy_operation" + } + } + }, + "additionalProperties": false + } + } + } + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/revert": { + "post": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "Complete a revert", + "operationId": "complete-a-revert", + "parameters": [ + { + "type": "string", + "description": "The number of the deploy request", + "name": "number", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's database", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's organization", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns the deploy request that was reverted", + "schema": { + "$ref": "#/definitions/deploy_request" + } + } + } + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/reviews": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_deploy_requests` |\n| Database | `read_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "List deploy request reviews", + "operationId": "list-deploy-request-reviews", + "parameters": [ + { + "type": "string", + "description": "The number of the deploy request", + "name": "number", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the deploy request belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the deploy request belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns an array of deploy request reviews", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/deploy_review" + } + } + }, + "additionalProperties": false + } + } + } + }, + "post": { + "description": "Review a deploy request by either approving or commenting on the deploy request\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `approve_deploy_request`, `review_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `approve_deploy_requests` |\n| Database | `approve_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "Review a deploy request", + "operationId": "review-a-deploy-request", + "parameters": [ + { + "type": "string", + "description": "The number of the deploy request", + "name": "number", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the database the deploy request belongs to", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the deploy request belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "body": { + "description": "Deploy request review comments", + "type": "string" + }, + "state": { + "description": "Whether the review is a comment or approval. Service tokens must have corresponding access (either `approve_deploy_request` or `review_deploy_request`)", + "type": "string", + "enum": [ + "commented", + "approved" + ] + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "201": { + "description": "Returns the created deploy request review", + "schema": { + "$ref": "#/definitions/deploy_review" + } + } + } + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/skip-revert": { + "post": { + "description": "Skips the revert period for a deploy request\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Deploy requests" + ], + "summary": "Skip revert period", + "operationId": "skip-revert-period", + "parameters": [ + { + "type": "string", + "description": "The number of the deploy request", + "name": "number", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's database", + "name": "database", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the deploy request's organization", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns the deploy request whose deploy revert was skipped", + "schema": { + "$ref": "#/definitions/deploy_request" + } + } + } + } + }, + "/organizations/{organization}/databases/{name}": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_database`, `delete_database`, `write_database`, `read_branch`, `delete_branch`, `create_branch`, `delete_production_branch`, `connect_branch`, `connect_production_branch`, `delete_branch_password`, `delete_production_branch_password`, `read_deploy_request`, `create_deploy_request`, `approve_deploy_request`, `read_comment`, `create_comment`, `restore_backup`, `restore_production_branch_backup`, `read_backups`, `write_backups`, `delete_backups`, `delete_production_branch_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_databases` |\n| Database | `read_database` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Databases" + ], + "summary": "Get a database", + "operationId": "get-a-database", + "parameters": [ + { + "type": "string", + "description": "The name of the database", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the database belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns a database", + "schema": { + "$ref": "#/definitions/database" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + }, + "delete": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `delete_database`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `delete_databases` |\n| Database | `delete_database` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Databases" + ], + "summary": "Delete a database", + "operationId": "delete-a-database", + "parameters": [ + { + "type": "string", + "description": "The name of the database", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the database belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Deletes a database" + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + }, + "patch": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `write_database`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `write_databases` |\n| Database | `write_database` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Databases" + ], + "summary": "Update database settings", + "operationId": "update-database-settings", + "parameters": [ + { + "type": "string", + "description": "The name of the database", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the database belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "allow_data_branching": { + "description": "Whether or not data branching is allowed on the database", + "type": "boolean" + }, + "automatic_migrations": { + "description": "Whether or not to copy migration data to new branches and in deploy requests.", + "type": "boolean" + }, + "default_branch": { + "description": "The default branch of the database", + "type": "string" + }, + "insights_raw_queries": { + "description": "Whether or not full queries should be collected from the database", + "type": "boolean" + }, + "migration_framework": { + "description": "A migration framework to use on the database", + "type": "string" + }, + "migration_table_name": { + "description": "Name of table to use as migration table for the database", + "type": "string" + }, + "production_branch_web_console": { + "description": "Whether or not the web console can be used on the production branch of the database", + "type": "boolean" + }, + "require_approval_for_deploy": { + "description": "Whether or not deploy requests must be approved by a database administrator other than the request creator", + "type": "boolean" + }, + "restrict_branch_region": { + "description": "Whether or not to limit branch creation to the AWS us-east-1 region.", + "type": "boolean" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "200": { + "description": "Returns the updated database", + "schema": { + "$ref": "#/definitions/database" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{name}/read-only-regions": { + "get": { + "description": "List read-only regions for the database's default branch\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_database`, `delete_database`, `write_database`, `read_branch`, `delete_branch`, `create_branch`, `delete_production_branch`, `connect_branch`, `connect_production_branch`, `delete_branch_password`, `delete_production_branch_password`, `read_deploy_request`, `create_deploy_request`, `approve_deploy_request`, `read_comment`, `create_comment`, `restore_backup`, `restore_production_branch_backup`, `read_backups`, `write_backups`, `delete_backups`, `delete_production_branch_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_branches` |\n| Database | `read_branches` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Databases" + ], + "summary": "List read-only regions", + "operationId": "list-read-only-regions", + "parameters": [ + { + "type": "string", + "description": "The name of the database", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the database belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "number", + "default": 1, + "description": "If provided, specifies the page offset of returned results", + "name": "page", + "in": "query" + }, + { + "type": "number", + "default": 25, + "description": "If provided, specifies the number of returned results", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "List of the database's read-only regions", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/read_only_region" + } + } + }, + "additionalProperties": false + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/databases/{name}/regions": { + "get": { + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_database`, `delete_database`, `write_database`, `read_branch`, `delete_branch`, `create_branch`, `delete_production_branch`, `connect_branch`, `connect_production_branch`, `delete_branch_password`, `delete_production_branch_password`, `read_deploy_request`, `create_deploy_request`, `approve_deploy_request`, `read_comment`, `create_comment`, `restore_backup`, `restore_production_branch_backup`, `read_backups`, `write_backups`, `delete_backups`, `delete_production_branch_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_databases` |\n| Database | `read_database` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Databases" + ], + "summary": "List database regions", + "operationId": "list-database-regions", + "parameters": [ + { + "type": "string", + "description": "The name of the database", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the database belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "number", + "default": 1, + "description": "If provided, specifies the page offset of returned results", + "name": "page", + "in": "query" + }, + { + "type": "number", + "default": 25, + "description": "If provided, specifies the number of returned results", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Returns the available regions for a database", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/region" + } + } + }, + "additionalProperties": false + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/oauth-applications": { + "get": { + "description": "\n### Authorization\nA service token must have at least one of the following access in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_oauth_applications`\n\n", + "consumes": [ + "application/json" + ], + "tags": [ + "OAuth applications" + ], + "summary": "List OAuth applications", + "operationId": "list-oauth-applications", + "parameters": [ + { + "type": "string", + "description": "The name of the organization the OAuth applications belong to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "number", + "default": 1, + "description": "If provided, specifies the page offset of returned results", + "name": "page", + "in": "query" + }, + { + "type": "number", + "default": 25, + "description": "If provided, specifies the number of returned results", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Returns a list of the organization's oauth applications", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/oauth_application" + } + } + }, + "additionalProperties": false + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/oauth-applications/{application_id}": { + "get": { + "description": "\n### Authorization\nA service token must have at least one of the following access in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_oauth_applications`\n\n", + "consumes": [ + "application/json" + ], + "tags": [ + "OAuth applications" + ], + "summary": "Get an OAuth application", + "operationId": "get-an-oauth-application", + "parameters": [ + { + "type": "string", + "description": "The ID of the OAuth application", + "name": "application_id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the OAuth application belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns information abuot an OAuth application", + "schema": { + "$ref": "#/definitions/oauth_application" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/oauth-applications/{application_id}/tokens": { + "get": { + "description": "List OAuth tokens created by an OAuth application\n### Authorization\nA service token must have at least one of the following access in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_oauth_tokens`\n\n", + "consumes": [ + "application/json" + ], + "tags": [ + "OAuth applications" + ], + "summary": "List OAuth tokens", + "operationId": "list-oauth-tokens", + "parameters": [ + { + "type": "string", + "description": "The ID of the OAuth application", + "name": "application_id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the OAuth application belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "type": "number", + "default": 1, + "description": "If provided, specifies the page offset of returned results", + "name": "page", + "in": "query" + }, + { + "type": "number", + "default": 25, + "description": "If provided, specifies the number of returned results", + "name": "per_page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "Returns a list of OAuth tokens issued on behalf of the OAuth application", + "schema": { + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/oauth_token" + } + } + }, + "additionalProperties": false + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/oauth-applications/{application_id}/tokens/{token_id}": { + "get": { + "description": "\n### Authorization\nA service token must have at least one of the following access in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_oauth_tokens`\n\n", + "consumes": [ + "application/json" + ], + "tags": [ + "OAuth applications" + ], + "summary": "Get an OAuth token", + "operationId": "get-an-oauth-token", + "parameters": [ + { + "type": "string", + "description": "The ID of the OAuth application token", + "name": "token_id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The ID of the OAuth application", + "name": "application_id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the OAuth application belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "Returns an OAuth token that was issued on behalf of the OAuth application", + "schema": { + "$ref": "#/definitions/oauth_token_with_details" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + }, + "delete": { + "description": "\n### Authorization\nA service token must have at least one of the following access in order to use this API endpoint:\n\n**Service Token Accesses**\n `delete_oauth_tokens`\n\n", + "consumes": [ + "application/json" + ], + "tags": [ + "OAuth applications" + ], + "summary": "Delete an OAuth token", + "operationId": "delete-an-oauth-token", + "parameters": [ + { + "type": "string", + "description": "The ID of the OAuth application token", + "name": "token_id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The ID of the OAuth application", + "name": "application_id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the OAuth application belongs to", + "name": "organization", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "Deletes an OAuth application's OAuth token" + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/organizations/{organization}/oauth-applications/{id}/token": { + "post": { + "description": "Create an OAuth token from an authorization grant code, or refresh an OAuth token from a refresh token\n### Authorization\nA service token must have at least one of the following access in order to use this API endpoint:\n\n**Service Token Accesses**\n `write_oauth_tokens`\n\n", + "consumes": [ + "application/json" + ], + "tags": [ + "OAuth tokens" + ], + "summary": "Create or renew an OAuth token", + "operationId": "create-or-renew-an-oauth-token", + "parameters": [ + { + "type": "string", + "description": "The ID of the OAuth application", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The name of the organization the OAuth application belongs to", + "name": "organization", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "required": [ + "client_id", + "client_secret", + "grant_type" + ], + "properties": { + "client_id": { + "description": "The OAuth application's client ID", + "type": "string" + }, + "client_secret": { + "description": "The OAuth applicatin's client secret", + "type": "string" + }, + "code": { + "description": "The OAuth grant code provided to your OAuth application's redirect URI. Required when grant_type is authorization_code", + "type": "string" + }, + "grant_type": { + "description": "Whether an OAuth grant code or a refresh token is being exchanged for an OAuth token", + "type": "string", + "enum": [ + "authorization_code", + "refresh_token" + ] + }, + "redirect_uri": { + "description": "The OAuth application's redirect URI. Required when grant_type is authorization_code", + "type": "string" + }, + "refresh_token": { + "description": "The refresh token from the original OAuth token grant. Required when grant_type is refresh_token", + "type": "string" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "200": { + "description": "Returns the created OAuth token", + "schema": { + "$ref": "#/definitions/created_oauth_token" + } + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "422": { + "description": "Unprocessable Entity" + }, + "500": { + "description": "Internal Server Error" + } + } + } + }, + "/user": { + "get": { + "description": "Get the user associated with this service token\n### Authorization\nA OAuth token must have at least one of the following scopes in order to use this API endpoint:\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| User | `read_user` |", + "consumes": [ + "application/json" + ], + "tags": [ + "Users" + ], + "summary": "Get current user", + "operationId": "get-current-user", + "responses": { + "200": { + "description": "Returns the current user that is associated with this service token", + "schema": { + "$ref": "#/definitions/user" + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Not Found" + }, + "500": { + "description": "Internal Server Error" + } + } + } + } + }, + "definitions": { + "actor": { + "type": "object", + "required": [ + "id", + "display_name", + "avatar_url" + ], + "properties": { + "avatar_url": { + "description": "The URL of the actor's avatar", + "type": "string" + }, + "display_name": { + "description": "The name of the actor", + "type": "string" + }, + "id": { + "description": "The ID of the actor", + "type": "string" + } + }, + "additionalProperties": false + }, + "backup": { + "type": "object", + "required": [ + "id", + "actor", + "name", + "state", + "size", + "estimated_storage_cost", + "created_at", + "updated_at", + "required", + "backup_policy", + "schema_snapshot" + ], + "properties": { + "actor": { + "$ref": "#/definitions/actor" + }, + "backup_policy": { + "$ref": "#/definitions/backup_policy" + }, + "created_at": { + "description": "When the backup was created", + "type": "string" + }, + "estimated_storage_cost": { + "description": "The estimated storage cost of the backup", + "type": "string" + }, + "id": { + "description": "The ID of the backup", + "type": "string" + }, + "name": { + "description": "The name of the backup", + "type": "string" + }, + "required": { + "description": "Whether or not the backup policy is required", + "type": "boolean" + }, + "restored_branches": { + "description": "Branches that have been restored with this backup", + "type": "array", + "items": { + "type": "string" + } + }, + "schema_snapshot": { + "$ref": "#/definitions/schema_snapshot" + }, + "size": { + "description": "The size of the backup", + "type": "number" + }, + "state": { + "description": "The current state of the backup", + "type": "string", + "enum": [ + "pending", + "running", + "success", + "failed", + "canceled", + "ignored" + ] + }, + "updated_at": { + "description": "When the backup was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "backup_policy": { + "type": "object", + "required": [ + "id", + "name", + "target", + "retention_value", + "retention_unit", + "frequency_value", + "frequency_unit", + "schedule_day", + "schedule_week", + "created_at", + "updated_at", + "last_ran_at", + "next_run_at" + ], + "properties": { + "created_at": { + "description": "When the backup policy was created", + "type": "string" + }, + "frequency_unit": { + "description": "The unit for the frequency of the backup policy", + "type": "string" + }, + "frequency_value": { + "description": "A number value for the frequency of the backup policy", + "type": "number" + }, + "id": { + "description": "The ID of the backup policy", + "type": "string" + }, + "last_ran_at": { + "description": "When the backup was last run", + "type": "string" + }, + "name": { + "description": "The name of the backup policy", + "type": "string" + }, + "next_run_at": { + "description": "When the backup will next run", + "type": "string" + }, + "retention_unit": { + "description": "The unit for the retention period of the backup policy", + "type": "string" + }, + "retention_value": { + "description": "A number value for the retention period of the backup policy", + "type": "number" + }, + "schedule_day": { + "description": "Day of the week that the backup is scheduled", + "type": "string", + "enum": [ + "0..6" + ] + }, + "schedule_week": { + "description": "Week of the month that the backup is scheduled", + "type": "string", + "enum": [ + "0..3" + ] + }, + "target": { + "description": "Whether the backup policy is for a production or development database, or for a database branch", + "type": "string", + "enum": [ + "production", + "development", + "branch" + ] + }, + "updated_at": { + "description": "When the backup policy was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "branch": { + "type": "object", + "required": [ + "id", + "name", + "created_at", + "updated_at", + "schema_last_updated_at", + "mysql_address", + "mysql_edge_address", + "ready", + "production", + "sharded", + "html_url", + "cluster_rate_name" + ], + "properties": { + "access_host_url": { + "description": "The access host URL for the branch. This is a legacy field, use `mysql_edge_address`", + "type": "string" + }, + "actor": { + "$ref": "#/definitions/actor" + }, + "cluster_rate_name": { + "description": "The SKU representing the branch's cluster size", + "type": "string" + }, + "created_at": { + "description": "When the branch was created", + "type": "string" + }, + "html_url": { + "description": "Planetscale app URL for the branch", + "type": "string" + }, + "id": { + "description": "The ID of the branch", + "type": "string" + }, + "initial_restore_id": { + "description": "The ID of the backup from which the branch was restored", + "type": "string" + }, + "mysql_address": { + "description": "The MySQL address for the branch", + "type": "string" + }, + "mysql_edge_address": { + "description": "The address of the MySQL provider for the branch", + "type": "string" + }, + "name": { + "description": "The name of the branch", + "type": "string" + }, + "parent_branch": { + "description": "The name of the parent branch from which the branch was created", + "type": "string" + }, + "production": { + "description": "Whether or not the branch is a production branch", + "type": "boolean" + }, + "ready": { + "description": "Whether or not the branch is ready to serve queries", + "type": "boolean" + }, + "region": { + "$ref": "#/definitions/region" + }, + "restore_checklist_completed_at": { + "description": "When a user last marked a backup restore checklist as completed", + "type": "string" + }, + "restored_from_branch": { + "$ref": "#/definitions/restored_from_branch" + }, + "schema_last_updated_at": { + "description": "When the schema for the branch was last updated", + "type": "string" + }, + "shard_count": { + "description": "The number of shards in the branch", + "type": "number" + }, + "sharded": { + "description": "Whether or not the branch is sharded", + "type": "boolean" + }, + "updated_at": { + "description": "When the branch was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "branch_for_password": { + "type": "object", + "required": [ + "name", + "id", + "production", + "access_host_url", + "mysql_edge_address" + ], + "properties": { + "access_host_url": { + "description": "The host URL for the password", + "type": "string" + }, + "id": { + "description": "The ID for the database branch", + "type": "string" + }, + "mysql_edge_address": { + "description": "The address of the MySQL provider for the branch", + "type": "string" + }, + "name": { + "description": "The name for the database branch", + "type": "string" + }, + "production": { + "description": "Whether or not the database branch is a production database branch", + "type": "boolean" + } + }, + "additionalProperties": false + }, + "created_oauth_token": { + "type": "object", + "properties": { + "actor_display_name": { + "description": "The name of the actor on whose behalf the token was issued", + "type": "string" + }, + "actor_id": { + "description": "The ID of the actor on whose behalf the token was issued", + "type": "string" + }, + "display_name": { + "description": "The display name of the OAuth token", + "type": "string" + }, + "name": { + "description": "The name of the OAuth token", + "type": "string" + }, + "plain_text_refresh_token": { + "description": "The refresh token used to refresh this OAuth token", + "type": "string" + }, + "service_token_accesses": { + "description": "The accesses issued to this OAuth token", + "type": "array", + "items": { + "type": "string" + } + }, + "token": { + "description": "The plain text OAuth token", + "type": "string" + } + }, + "additionalProperties": false + }, + "data_import": { + "type": "object", + "required": [ + "state", + "import_check_errors", + "started_at", + "finished_at", + "data_source" + ], + "properties": { + "data_source": { + "$ref": "#/definitions/data_source" + }, + "finished_at": { + "type": "string" + }, + "import_check_errors": { + "type": "string" + }, + "started_at": { + "type": "string" + }, + "state": { + "type": "string" + } + }, + "additionalProperties": false + }, + "data_source": { + "type": "object", + "required": [ + "hostname", + "port", + "database" + ], + "properties": { + "database": { + "type": "string" + }, + "hostname": { + "type": "string" + }, + "port": { + "type": "string" + } + }, + "additionalProperties": false + }, + "database": { + "type": "object", + "required": [ + "id", + "type", + "url", + "branches_url", + "branches_count", + "development_branches_count", + "production_branches_count", + "issues_count", + "multiple_admins_required_for_deletion", + "ready", + "at_development_branch_limit", + "at_backup_restore_branches_limit", + "region", + "html_url", + "name", + "state", + "sharded", + "default_branch_shard_count", + "default_branch_read_only_regions_count", + "default_branch_table_count", + "default_branch", + "require_approval_for_deploy", + "allow_data_branching", + "restrict_branch_region", + "insights_raw_queries", + "plan", + "production_branch_web_console", + "created_at", + "updated_at" + ], + "properties": { + "allow_data_branching": { + "description": "Whether seeding branches with data is enabled for all branches", + "type": "boolean" + }, + "at_backup_restore_branches_limit": { + "description": "If the database has reached its backup restored branch limit", + "type": "boolean" + }, + "at_development_branch_limit": { + "description": "If the database has reached its development branch limit", + "type": "boolean" + }, + "automatic_migrations": { + "description": "Whether to automatically manage Rails migrations during deploy requests", + "type": "boolean" + }, + "branches_count": { + "description": "The total number of database branches", + "type": "number" + }, + "branches_url": { + "description": "The URL to retrieve this database's branches via the API", + "type": "string" + }, + "created_at": { + "type": "string" + }, + "data_import": { + "$ref": "#/definitions/data_import" + }, + "default_branch": { + "description": "The default branch for the database", + "type": "string" + }, + "default_branch_read_only_regions_count": { + "description": "Number of read only regions in the default branch", + "type": "number" + }, + "default_branch_shard_count": { + "description": "Number of shards in the default branch", + "type": "number" + }, + "default_branch_table_count": { + "description": "Number of tables in the default branch schema", + "type": "number" + }, + "development_branches_count": { + "description": "The total number of database development branches", + "type": "number" + }, + "html_url": { + "description": "The URL to see this database's branches in the web UI", + "type": "string" + }, + "id": { + "description": "The ID of the database", + "type": "string" + }, + "insights_raw_queries": { + "description": "Whether raw SQL queries are collected", + "type": "boolean" + }, + "issues_count": { + "description": "The total number of ongoing issues within a database", + "type": "number" + }, + "migration_framework": { + "description": "Framework used for applying migrations", + "type": "string" + }, + "migration_table_name": { + "description": "Table name to use for copying schema migration data", + "type": "string" + }, + "multiple_admins_required_for_deletion": { + "description": "If the database requires multiple admins for deletion", + "type": "boolean" + }, + "name": { + "description": "Name of the database", + "type": "string" + }, + "plan": { + "description": "The database plan", + "type": "string" + }, + "production_branch_web_console": { + "description": "Whether web console is enabled for production branches", + "type": "boolean" + }, + "production_branches_count": { + "description": "The total number of database production branches", + "type": "number" + }, + "ready": { + "description": "If the database is ready to be used", + "type": "boolean" + }, + "region": { + "$ref": "#/definitions/region" + }, + "require_approval_for_deploy": { + "description": "Whether an approval is required to deploy schema changes to this database", + "type": "boolean" + }, + "restrict_branch_region": { + "description": "Whether to restrict branch creation to one region", + "type": "boolean" + }, + "schema_last_updated_at": { + "description": "When the default branch schema was last changed.", + "type": "string" + }, + "sharded": { + "description": "If the database is sharded", + "type": "boolean" + }, + "state": { + "description": "State of the database", + "type": "string" + }, + "type": { + "description": "The object type", + "type": "string" + }, + "updated_at": { + "type": "string" + }, + "url": { + "description": "The URL to the database API endpoint", + "type": "string" + } + }, + "additionalProperties": false + }, + "deploy_operation": { + "type": "object", + "required": [ + "id", + "state", + "keyspace_name", + "table_name", + "operation_name", + "eta_seconds", + "progress_percentage", + "deploy_error_docs_url", + "ddl_statement", + "syntax_highlighted_ddl", + "created_at", + "updated_at", + "can_drop_data", + "table_recently_used", + "table_recently_used_at", + "deploy_errors" + ], + "properties": { + "can_drop_data": { + "description": "Whether or not the deploy operation is capable of dropping data", + "type": "boolean" + }, + "created_at": { + "description": "When the deploy operation was created", + "type": "string" + }, + "ddl_statement": { + "description": "The DDL statement for the deploy operation", + "type": "string" + }, + "deploy_error_docs_url": { + "description": "A link to documentation explaining the deploy error, if present", + "type": "string" + }, + "deploy_errors": { + "description": "Deploy errors for the deploy operation", + "type": "array", + "items": { + "type": "string" + } + }, + "eta_seconds": { + "description": "The estimated seconds until completion for the deploy operation", + "type": "number" + }, + "id": { + "description": "The ID for the deploy operation", + "type": "string" + }, + "keyspace_name": { + "description": "The keyspace modified by the deploy operation", + "type": "string" + }, + "operation_name": { + "description": "The operation name of the deploy operation", + "type": "string" + }, + "progress_percentage": { + "description": "The percent completion for the deploy operation", + "type": "number" + }, + "state": { + "description": "The state of the deploy operation", + "type": "string", + "enum": [ + "pending", + "queued", + "in_progress", + "complete", + "cancelled", + "error" + ] + }, + "syntax_highlighted_ddl": { + "description": "A syntax-highlighted DDL statement for the deploy operation", + "type": "string" + }, + "table_name": { + "description": "The name of the table modifed by the deploy operation", + "type": "string" + }, + "table_recently_used": { + "description": "Whether or not the table modified by the deploy operation was recently used", + "type": "boolean" + }, + "table_recently_used_at": { + "description": "When the table modified by the deploy operation was last used", + "type": "string" + }, + "updated_at": { + "description": "When the deploy operation was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "deploy_request": { + "type": "object", + "required": [ + "number", + "id", + "actor", + "closed_by", + "branch", + "branch_deleted", + "branch_deleted_by", + "branch_deleted_at", + "into_branch", + "into_branch_sharded", + "into_branch_shard_count", + "approved", + "state", + "deployment_state", + "html_url", + "notes", + "html_body", + "created_at", + "updated_at", + "closed_at", + "deployed_at" + ], + "properties": { + "actor": { + "$ref": "#/definitions/actor" + }, + "approved": { + "description": "Whether or not the deploy request is approved", + "type": "boolean" + }, + "branch": { + "description": "The name of the branch the deploy request was created from", + "type": "string" + }, + "branch_deleted": { + "description": "Whether or not the deploy request branch was deleted", + "type": "boolean" + }, + "branch_deleted_at": { + "description": "When the deploy request branch was deleted", + "type": "string" + }, + "branch_deleted_by": { + "$ref": "#/definitions/actor" + }, + "closed_at": { + "description": "When the deploy request was closed", + "type": "string" + }, + "closed_by": { + "$ref": "#/definitions/actor" + }, + "created_at": { + "description": "When the deploy request was created", + "type": "string" + }, + "deployed_at": { + "description": "When the deploy request was deployed", + "type": "string" + }, + "deployment_state": { + "description": "The deployment state of the deploy request", + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ] + }, + "html_body": { + "description": "The HTML body of the deploy request", + "type": "string" + }, + "html_url": { + "description": "The PlanetScale app address for the deploy request", + "type": "string" + }, + "id": { + "description": "The ID of the deploy request", + "type": "string" + }, + "into_branch": { + "description": "The name of the branch the deploy request will be merged into", + "type": "string" + }, + "into_branch_shard_count": { + "description": "The number of shards the branch the deploy request will be merged into has", + "type": "number" + }, + "into_branch_sharded": { + "description": "Whether or not the branch the deploy request will be merged into is sharded", + "type": "boolean" + }, + "notes": { + "description": "Notes on the deploy request", + "type": "string" + }, + "number": { + "description": "The number of the deploy request", + "type": "number" + }, + "state": { + "description": "Whether the deploy request is open or closed", + "type": "string", + "enum": [ + "open", + "closed" + ] + }, + "updated_at": { + "description": "When the deploy request was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "deploy_request_with_deployment": { + "type": "object", + "required": [ + "number", + "id", + "actor", + "closed_by", + "branch", + "branch_deleted", + "branch_deleted_by", + "branch_deleted_at", + "into_branch", + "into_branch_sharded", + "into_branch_shard_count", + "approved", + "state", + "deployment_state", + "html_url", + "notes", + "html_body", + "created_at", + "updated_at", + "closed_at", + "deployed_at", + "deployment" + ], + "properties": { + "actor": { + "$ref": "#/definitions/actor" + }, + "approved": { + "description": "Whether or not the deploy request is approved", + "type": "boolean" + }, + "branch": { + "description": "The name of the branch the deploy request was created from", + "type": "string" + }, + "branch_deleted": { + "description": "Whether or not the deploy request branch was deleted", + "type": "boolean" + }, + "branch_deleted_at": { + "description": "When the deploy request branch was deleted", + "type": "string" + }, + "branch_deleted_by": { + "$ref": "#/definitions/actor" + }, + "closed_at": { + "description": "When the deploy request was closed", + "type": "string" + }, + "closed_by": { + "$ref": "#/definitions/actor" + }, + "created_at": { + "description": "When the deploy request was created", + "type": "string" + }, + "deployed_at": { + "description": "When the deploy request was deployed", + "type": "string" + }, + "deployment": { + "$ref": "#/definitions/deployment" + }, + "deployment_state": { + "description": "The deployment state of the deploy request", + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ] + }, + "html_body": { + "description": "The HTML body of the deploy request", + "type": "string" + }, + "html_url": { + "description": "The PlanetScale app address for the deploy request", + "type": "string" + }, + "id": { + "description": "The ID of the deploy request", + "type": "string" + }, + "into_branch": { + "description": "The name of the branch the deploy request will be merged into", + "type": "string" + }, + "into_branch_shard_count": { + "description": "The number of shards the branch the deploy request will be merged into has", + "type": "number" + }, + "into_branch_sharded": { + "description": "Whether or not the branch the deploy request will be merged into is sharded", + "type": "boolean" + }, + "notes": { + "description": "Notes on the deploy request", + "type": "string" + }, + "number": { + "description": "The number of the deploy request", + "type": "number" + }, + "state": { + "description": "Whether the deploy request is open or closed", + "type": "string", + "enum": [ + "open", + "closed" + ] + }, + "updated_at": { + "description": "When the deploy request was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "deploy_review": { + "type": "object", + "required": [ + "id", + "actor", + "body", + "html_body", + "state", + "created_at", + "updated_at" + ], + "properties": { + "actor": { + "$ref": "#/definitions/actor" + }, + "body": { + "description": "The text body of the review", + "type": "string" + }, + "created_at": { + "description": "When the review was created", + "type": "string" + }, + "html_body": { + "description": "The HTML body of the review", + "type": "string" + }, + "id": { + "description": "The id of the review", + "type": "string" + }, + "state": { + "description": "Whether the review is a comment or approval", + "type": "string", + "enum": [ + "commented", + "approved" + ] + }, + "updated_at": { + "description": "When the review was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "deployment": { + "type": "object", + "required": [ + "id", + "auto_cutover", + "created_at", + "cutover_expiring", + "state", + "submitted_at", + "updated_at" + ], + "properties": { + "auto_cutover": { + "description": "Whether or not to automatically cutover once deployment is finished", + "type": "boolean" + }, + "created_at": { + "description": "When the deployment was created", + "type": "string" + }, + "cutover_at": { + "description": "When the cutover for the deployment was initiated", + "type": "string" + }, + "cutover_expiring": { + "description": "Whether or not the deployment cutover will expire soon and be cancelled", + "type": "boolean" + }, + "deploy_check_errors": { + "description": "Deploy check errors for the deployment", + "type": "string" + }, + "finished_at": { + "description": "When the deployment was finished", + "type": "string" + }, + "id": { + "description": "The ID for a deployment", + "type": "string" + }, + "queued_at": { + "description": "When the deployment was queued", + "type": "string" + }, + "ready_to_cutover_at": { + "description": "When the deployment was ready for cutover", + "type": "string" + }, + "started_at": { + "description": "When the deployment was started", + "type": "string" + }, + "state": { + "description": "The state the deployment is in", + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ] + }, + "submitted_at": { + "description": "When the deployment was submitted", + "type": "string" + }, + "updated_at": { + "description": "When the deployment was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "features": { + "type": "object", + "properties": { + "insights": { + "type": "boolean" + }, + "single_tenancy": { + "type": "boolean" + }, + "sso": { + "type": "boolean" + } + }, + "additionalProperties": false + }, + "flags": { + "type": "object", + "properties": { + "example_flag": { + "type": "string", + "enum": [ + "full", + "on" + ] + } + }, + "additionalProperties": false + }, + "lint_error": { + "type": "object", + "required": [ + "lint_error", + "subject_type", + "keyspace_name", + "table_name", + "error_description", + "docs_url", + "column_name", + "foreign_key_column_names", + "auto_increment_column_names", + "charset_name", + "engine_name", + "vindex_name", + "json_path", + "check_constraint_name", + "enum_value", + "partitioning_type", + "partition_name" + ], + "properties": { + "auto_increment_column_names": { + "description": "A list of invalid auto-incremented columns", + "type": "array", + "items": { + "type": "string" + } + }, + "charset_name": { + "description": "The charset of the schema", + "type": "string" + }, + "check_constraint_name": { + "description": "The name of the invalid check constraint", + "type": "string" + }, + "column_name": { + "description": "The column in a table relevant to the error", + "type": "string" + }, + "docs_url": { + "description": "A link to the documentation related to the error", + "type": "string" + }, + "engine_name": { + "description": "The engine of the schema", + "type": "string" + }, + "enum_value": { + "description": "The name of the invalid enum value", + "type": "string" + }, + "error_description": { + "description": "A description for the error that occurred", + "type": "string" + }, + "foreign_key_column_names": { + "description": "A list of invalid foreign key columns in a table", + "type": "array", + "items": { + "type": "string" + } + }, + "json_path": { + "description": "The path for an invalid JSON column", + "type": "string" + }, + "keyspace_name": { + "description": "The keyspace of the schema with the error", + "type": "string" + }, + "lint_error": { + "description": "Code representing", + "type": "string" + }, + "partition_name": { + "description": "The name of the invalid partition in the schema", + "type": "string" + }, + "partitioning_type": { + "description": "The name of the invalid partitioning type", + "type": "string" + }, + "subject_type": { + "description": "The subject for the errors", + "type": "string", + "enum": [ + "table", + "vschema", + "routing_rules" + ] + }, + "table_name": { + "description": "The table with the error", + "type": "string" + }, + "vindex_name": { + "description": "The name of the vindex for the schema", + "type": "string" + } + }, + "additionalProperties": false + }, + "oauth_accesses_by_resource": { + "type": "object", + "required": [ + "user", + "organization", + "database", + "branch" + ], + "properties": { + "branch": { + "$ref": "#/definitions/oauth_branch_accesses" + }, + "database": { + "$ref": "#/definitions/oauth_database_accesses" + }, + "organization": { + "$ref": "#/definitions/oauth_organization_accesses" + }, + "user": { + "$ref": "#/definitions/oauth_user_accesses" + } + }, + "additionalProperties": false + }, + "oauth_application": { + "type": "object", + "required": [ + "id", + "name", + "redirect_uri", + "domain", + "created_at", + "updated_at", + "scopes", + "client_id", + "tokens" + ], + "properties": { + "avatar": { + "description": "The image source for the OAuth application's avatar", + "type": "string" + }, + "client_id": { + "description": "The OAuth application's unique client id", + "type": "string" + }, + "created_at": { + "description": "When the OAuth application was created", + "type": "string" + }, + "domain": { + "description": "The domain of the OAuth application. Used for verification of a valid redirect uri", + "type": "string" + }, + "id": { + "description": "The ID of the OAuth application", + "type": "string" + }, + "name": { + "description": "The name of the OAuth application", + "type": "string" + }, + "redirect_uri": { + "description": "The redirect URI of the OAuth application", + "type": "string" + }, + "scopes": { + "description": "The scopes that the OAuth application requires on a user's accout", + "type": "array", + "items": { + "type": "string" + } + }, + "tokens": { + "description": "The number of tokens issued by the OAuth application", + "type": "number" + }, + "updated_at": { + "description": "When the OAuth application was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "oauth_branch_accesses": { + "type": "object", + "required": [ + "branches", + "accesses" + ], + "properties": { + "accesses": { + "description": "Accesses the token has on the resources", + "type": "array", + "items": { + "type": "string" + } + }, + "branches": { + "description": "Branches the token has access to", + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "oauth_database_accesses": { + "type": "object", + "required": [ + "databases", + "accesses" + ], + "properties": { + "accesses": { + "description": "Accesses the token has on the resources", + "type": "array", + "items": { + "type": "string" + } + }, + "databases": { + "description": "Databases the token has access to", + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "oauth_organization_accesses": { + "type": "object", + "required": [ + "organizations", + "accesses" + ], + "properties": { + "accesses": { + "description": "Accesses the token has on the resources", + "type": "array", + "items": { + "type": "string" + } + }, + "organizations": { + "description": "Organizations the token has access to", + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "oauth_token": { + "type": "object", + "required": [ + "id", + "display_name", + "avatar_url", + "created_at", + "updated_at", + "expires_at", + "last_used_at", + "name", + "actor_id", + "actor_display_name", + "actor_type" + ], + "properties": { + "actor_display_name": { + "description": "The name of the actor on whose behalf the service token was created", + "type": "string" + }, + "actor_id": { + "description": "The ID of the actor on whose behalf the service token was created", + "type": "string" + }, + "actor_type": { + "description": "The type of the actor on whose behalf the service token was created", + "type": "string" + }, + "avatar_url": { + "description": "The image source for the avatar of the service token", + "type": "string" + }, + "created_at": { + "description": "When the service token was created", + "type": "string" + }, + "display_name": { + "description": "The display name of the service token", + "type": "string" + }, + "expires_at": { + "description": "When the service token will expire", + "type": "string" + }, + "id": { + "description": "The ID of the service token", + "type": "string" + }, + "last_used_at": { + "description": "When the service token was last used", + "type": "string" + }, + "name": { + "description": "The name of the service token", + "type": "string" + }, + "updated_at": { + "description": "When the service token was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "oauth_token_with_details": { + "type": "object", + "required": [ + "id", + "display_name", + "avatar_url", + "created_at", + "updated_at", + "expires_at", + "last_used_at", + "name", + "actor_id", + "actor_display_name", + "actor_type", + "oauth_accesses_by_resource" + ], + "properties": { + "actor_display_name": { + "description": "The name of the actor on whose behalf the service token was created", + "type": "string" + }, + "actor_id": { + "description": "The ID of the actor on whose behalf the service token was created", + "type": "string" + }, + "actor_type": { + "description": "The type of the actor on whose behalf the service token was created", + "type": "string" + }, + "avatar_url": { + "description": "The image source for the avatar of the service token", + "type": "string" + }, + "created_at": { + "description": "When the service token was created", + "type": "string" + }, + "display_name": { + "description": "The display name of the service token", + "type": "string" + }, + "expires_at": { + "description": "When the service token will expire", + "type": "string" + }, + "id": { + "description": "The ID of the service token", + "type": "string" + }, + "last_used_at": { + "description": "When the service token was last used", + "type": "string" + }, + "name": { + "description": "The name of the service token", + "type": "string" + }, + "oauth_accesses_by_resource": { + "$ref": "#/definitions/oauth_accesses_by_resource" + }, + "updated_at": { + "description": "When the service token was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "oauth_user_accesses": { + "type": "object", + "required": [ + "users", + "accesses" + ], + "properties": { + "accesses": { + "description": "Accesses the token has on the resources", + "type": "array", + "items": { + "type": "string" + } + }, + "users": { + "description": "Users the token has access to", + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false + }, + "organization": { + "type": "object", + "required": [ + "id", + "name", + "created_at", + "updated_at", + "plan", + "valid_billing_info", + "sso", + "sso_directory", + "single_tenancy", + "has_past_due_invoices", + "can_create_databases", + "free_databases_remaining", + "database_count", + "sleeping_database_count", + "admin_only_production_access", + "idp_managed_roles" + ], + "properties": { + "admin_only_production_access": { + "description": "Whether or not only administrators can access production branches in the organization", + "type": "boolean" + }, + "billing_email": { + "description": "The billing email of the organization", + "type": "string" + }, + "can_create_databases": { + "description": "Whether or not more databases can be created in the organization", + "type": "boolean" + }, + "created_at": { + "description": "When the organization was created", + "type": "string" + }, + "database_count": { + "description": "The number of databases in the organization", + "type": "number" + }, + "features": { + "$ref": "#/definitions/features" + }, + "flags": { + "$ref": "#/definitions/flags" + }, + "free_databases_remaining": { + "description": "The number of remaining free databases that can be created in the organization", + "type": "number" + }, + "has_past_due_invoices": { + "description": "Whether or not the organization has past due billing invoices", + "type": "boolean" + }, + "id": { + "description": "The ID for the organization", + "type": "string" + }, + "idp_managed_roles": { + "description": "Whether or not the IdP provider is be responsible for managing roles in PlanetScale", + "type": "boolean" + }, + "name": { + "description": "The name of the organization", + "type": "string" + }, + "plan": { + "description": "The billing plan of the organization", + "type": "string" + }, + "single_tenancy": { + "description": "Whether or not the organization has single tenancy enabled", + "type": "boolean" + }, + "sleeping_database_count": { + "description": "The number of sleeping databases in the organization", + "type": "number" + }, + "sso": { + "description": "Whether or not SSO is enabled on the organization", + "type": "boolean" + }, + "sso_directory": { + "description": "Whether or not the organization uses a WorkOS directory", + "type": "boolean" + }, + "sso_portal_url": { + "description": "The URL of the organization's SSO portal", + "type": "string" + }, + "updated_at": { + "description": "When the organization was last updated", + "type": "string" + }, + "valid_billing_info": { + "description": "Whether or not the organization's billing information is valid", + "type": "boolean" + } + }, + "additionalProperties": false + }, + "password": { + "type": "object", + "required": [ + "id", + "name", + "role", + "created_at", + "ttl_seconds", + "access_host_url", + "renewable", + "database_branch" + ], + "properties": { + "access_host_url": { + "description": "The host URL for the password", + "type": "string" + }, + "actor": { + "$ref": "#/definitions/actor" + }, + "created_at": { + "description": "When the password was created", + "type": "string" + }, + "database_branch": { + "$ref": "#/definitions/branch_for_password" + }, + "deleted_at": { + "description": "When the password was deleted", + "type": "string" + }, + "expires_at": { + "description": "When the password will expire", + "type": "string" + }, + "id": { + "description": "The ID for the password", + "type": "string" + }, + "name": { + "description": "The display name for the password", + "type": "string" + }, + "region": { + "$ref": "#/definitions/region" + }, + "renewable": { + "description": "Whether or not the password can be renewed", + "type": "boolean" + }, + "role": { + "description": "The role for the password", + "type": "string" + }, + "ttl_seconds": { + "description": "Time to live (in seconds) for the password. The password will be invalid when TTL has passed", + "type": "number" + }, + "username": { + "description": "The username for the password", + "type": "string" + } + }, + "additionalProperties": false + }, + "password_with_plaintext": { + "type": "object", + "required": [ + "id", + "name", + "role", + "created_at", + "ttl_seconds", + "access_host_url", + "renewable", + "database_branch", + "plain_text" + ], + "properties": { + "access_host_url": { + "description": "The host URL for the password", + "type": "string" + }, + "actor": { + "$ref": "#/definitions/actor" + }, + "created_at": { + "description": "When the password was created", + "type": "string" + }, + "database_branch": { + "$ref": "#/definitions/branch_for_password" + }, + "deleted_at": { + "description": "When the password was deleted", + "type": "string" + }, + "expires_at": { + "description": "When the password will expire", + "type": "string" + }, + "id": { + "description": "The ID for the password", + "type": "string" + }, + "name": { + "description": "The display name for the password", + "type": "string" + }, + "plain_text": { + "description": "The plain text password", + "type": "string" + }, + "region": { + "$ref": "#/definitions/region" + }, + "renewable": { + "description": "Whether or not the password can be renewed", + "type": "boolean" + }, + "role": { + "description": "The role for the password", + "type": "string" + }, + "ttl_seconds": { + "description": "Time to live (in seconds) for the password. The password will be invalid when TTL has passed", + "type": "number" + }, + "username": { + "description": "The username for the password", + "type": "string" + } + }, + "additionalProperties": false + }, + "queued_deploy_request": { + "type": "object", + "required": [ + "id", + "auto_cutover", + "created_at", + "cutover_expiring", + "state", + "submitted_at", + "updated_at" + ], + "properties": { + "auto_cutover": { + "description": "Whether or not to automatically cutover once deployment is finished", + "type": "boolean" + }, + "created_at": { + "description": "When the deployment was created", + "type": "string" + }, + "cutover_at": { + "description": "When the cutover for the deployment was initiated", + "type": "string" + }, + "cutover_expiring": { + "description": "Whether or not the deployment cutover will expire soon and be cancelled", + "type": "boolean" + }, + "deploy_check_errors": { + "description": "Deploy check errors for the deployment", + "type": "string" + }, + "finished_at": { + "description": "When the deployment was finished", + "type": "string" + }, + "id": { + "description": "The ID for a deployment", + "type": "string" + }, + "queued_at": { + "description": "When the deployment was queued", + "type": "string" + }, + "ready_to_cutover_at": { + "description": "When the deployment was ready for cutover", + "type": "string" + }, + "started_at": { + "description": "When the deployment was started", + "type": "string" + }, + "state": { + "description": "The state the deployment is in", + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ] + }, + "submitted_at": { + "description": "When the deployment was submitted", + "type": "string" + }, + "updated_at": { + "description": "When the deployment was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "read_only_region": { + "type": "object", + "required": [ + "id", + "created_at", + "display_name", + "ready_at", + "updated_at", + "ready", + "actor", + "region" + ], + "properties": { + "actor": { + "$ref": "#/definitions/actor" + }, + "created_at": { + "description": "When the read-only region was created", + "type": "string" + }, + "display_name": { + "description": "The name of the read-only region", + "type": "string" + }, + "id": { + "description": "The ID of the read-only region", + "type": "string" + }, + "ready": { + "description": "Whether or not the read-only region is ready to serve queries", + "type": "boolean" + }, + "ready_at": { + "description": "When the read-only region was ready to serve queries", + "type": "string" + }, + "region": { + "$ref": "#/definitions/region" + }, + "updated_at": { + "description": "When the read-only region was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "region": { + "type": "object", + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ], + "properties": { + "display_name": { + "description": "Name of the region", + "type": "string" + }, + "enabled": { + "description": "Whether or not the region is currently active", + "type": "boolean" + }, + "id": { + "description": "The ID of the region", + "type": "string" + }, + "location": { + "description": "Location of the region", + "type": "string" + }, + "provider": { + "description": "Provider for the region (ex. AWS)", + "type": "string" + }, + "public_ip_addresses": { + "description": "Public IP addresses for the region", + "type": "array", + "items": { + "type": "string" + } + }, + "slug": { + "description": "The slug of the region", + "type": "string" + } + }, + "additionalProperties": false + }, + "restored_from_branch": { + "type": "object", + "required": [ + "id", + "name", + "created_at", + "updated_at", + "deleted_at" + ], + "properties": { + "created_at": { + "description": "When the resource was created", + "type": "string" + }, + "deleted_at": { + "description": "When the resource was deleted, if deleted", + "type": "string" + }, + "id": { + "description": "The ID for the resource", + "type": "string" + }, + "name": { + "description": "The name for the resource", + "type": "string" + }, + "updated_at": { + "description": "When the resource was last updated", + "type": "string" + } + }, + "additionalProperties": false + }, + "schema_snapshot": { + "type": "object", + "required": [ + "id", + "url", + "name", + "created_at", + "updated_at" + ], + "properties": { + "created_at": { + "description": "When the schema snapshot was created", + "type": "string" + }, + "id": { + "description": "The ID of the schema snapshot", + "type": "string" + }, + "name": { + "description": "The name of the schema snapshot", + "type": "string" + }, + "updated_at": { + "description": "When the schema snapshot was last updated", + "type": "string" + }, + "url": { + "description": "The URL to the schema snapshot in the PlanetScale app", + "type": "string" + } + }, + "additionalProperties": false + }, + "table_schema": { + "type": "object", + "required": [ + "name", + "html", + "raw" + ], + "properties": { + "html": { + "description": "Syntax highlighted HTML for the table's schema", + "type": "string" + }, + "name": { + "description": "Name of the table", + "type": "string" + }, + "raw": { + "description": "The table's schema", + "type": "string" + } + }, + "additionalProperties": false + }, + "user": { + "type": "object", + "properties": { + "avatar_url": { + "description": "The URL source of the user's avatar", + "type": "string" + }, + "created_at": { + "description": "When the user was created", + "type": "string" + }, + "default_organization_id": { + "description": "The default organization for the user", + "type": "string" + }, + "directory_managed": { + "description": "Whether or not the user is managed by a WorkOS directory", + "type": "boolean" + }, + "display_name": { + "description": "The display name of the user", + "type": "string" + }, + "email": { + "description": "The email of the user", + "type": "string" + }, + "email_verified": { + "description": "Whether or not the user is verified by email", + "type": "boolean" + }, + "id": { + "description": "The ID of the user", + "type": "string" + }, + "managed": { + "description": "Whether or not the user is managed by an authentication provider", + "type": "boolean" + }, + "name": { + "description": "The name of the user", + "type": "string" + }, + "sso": { + "description": "Whether or not the user is managed by WorkOS", + "type": "boolean" + }, + "two_factor_auth_configured": { + "description": "Whether or not the user has configured two factor authentication", + "type": "boolean" + }, + "updated_at": { + "description": "When the user was last updated", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "securityDefinitions": { + "ApiKeyHeader": { + "type": "apiKey", + "name": "Authorization", + "in": "header" + } + }, + "security": [ + { + "ApiKeyHeader": [ + "Authorization" + ] + } + ], + "tags": [ + { + "description": " API endpoints for managing database branch backups.\n", + "name": "Backups" + }, + { + "description": " API endpoints for managing databases within an organization.\n", + "name": "Databases" + }, + { + "description": " API endpoints for managing database branch passwords.\n", + "name": "Database branch passwords" + }, + { + "description": " API endpoints for fetching OAuth applications.\n", + "name": "OAuth applications" + }, + { + "description": " API endpoints for managing OAuth tokens.\n", + "name": "OAuth tokens" + }, + { + "description": " API endpoints for managing organizations.\n", + "name": "Organizations" + }, + { + "description": " API endpoints for fetching user information.\n", + "name": "Users" + }, + { + "description": " API endpoints for managing database deploy requests.\n", + "name": "Deploy requests" + } + ], + "x-readme": { + "explorer-enabled": false + } +} diff --git a/openapi/extract-ref-cfg.json b/openapi/extract-ref-cfg.json new file mode 100644 index 0000000..5e26cbf --- /dev/null +++ b/openapi/extract-ref-cfg.json @@ -0,0 +1,209 @@ +{ + "extractions": [ + {"path": "/organizations", "method": "get", "responses": 200, "prop": "data.features","become_ref": "features"}, + {"path": "/organizations", "method": "get", "responses": 200, "prop": "data.flags", "become_ref": "flags"}, + {"path": "/organizations", "method": "get", "responses": 200, "prop": "data", "become_ref": "organization"}, + + {"path": "/organizations/{name}", "method": "get", "responses": 200, "prop": "features", "become_ref": "features"}, + {"path": "/organizations/{name}", "method": "get", "responses": 200, "prop": "flags", "become_ref": "flags"}, + {"path": "/organizations/{name}", "method": "get", "responses": 200, "prop": "", "become_ref": "organization"}, + + {"path": "/organizations/{name}", "method": "patch", "responses": 200, "prop": "features", "become_ref": "features"}, + {"path": "/organizations/{name}", "method": "patch", "responses": 200, "prop": "flags", "become_ref": "flags"}, + {"path": "/organizations/{name}", "method": "patch", "responses": 200, "prop": "", "become_ref": "organization"}, + + {"path": "/organizations/{name}/regions", "method": "get", "responses": 200, "prop": "data", "become_ref": "region"}, + + {"path": "/organizations/{organization}/databases", "method": "get", "responses": 200, "prop": "data.region", "become_ref": "region"}, + {"path": "/organizations/{organization}/databases", "method": "get", "responses": 200, "prop": "data.data_import.data_source", "become_ref": "data_source"}, + {"path": "/organizations/{organization}/databases", "method": "get", "responses": 200, "prop": "data.data_import", "become_ref": "data_import"}, + {"path": "/organizations/{organization}/databases", "method": "get", "responses": 200, "prop": "data", "become_ref": "database"}, + + {"path": "/organizations/{organization}/databases", "method": "post", "responses": 201, "prop": "region", "become_ref": "region"}, + {"path": "/organizations/{organization}/databases", "method": "post", "responses": 201, "prop": "data_import.data_source", "become_ref": "data_source"}, + {"path": "/organizations/{organization}/databases", "method": "post", "responses": 201, "prop": "data_import", "become_ref": "data_import"}, + {"path": "/organizations/{organization}/databases", "method": "post", "responses": 201, "prop": "", "become_ref": "database"}, + + {"path": "/organizations/{organization}/databases/{name}", "method": "get", "responses": 200, "prop": "region", "become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{name}", "method": "get", "responses": 200, "prop": "data_import.data_source", "become_ref": "data_source"}, + {"path": "/organizations/{organization}/databases/{name}", "method": "get", "responses": 200, "prop": "data_import", "become_ref": "data_import"}, + {"path": "/organizations/{organization}/databases/{name}", "method": "get", "responses": 200, "prop": "", "become_ref": "database"}, + + {"path": "/organizations/{organization}/databases/{name}", "method": "patch", "responses": 200, "prop": "region", "become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{name}", "method": "patch", "responses": 200, "prop": "data_import.data_source", "become_ref": "data_source"}, + {"path": "/organizations/{organization}/databases/{name}", "method": "patch", "responses": 200, "prop": "data_import", "become_ref": "data_import"}, + {"path": "/organizations/{organization}/databases/{name}", "method": "patch", "responses": 200, "prop": "", "become_ref": "database"}, + + {"path": "/organizations/{organization}/databases/{name}/read-only-regions", "method": "get", "responses": 200, "prop": "data.actor", "become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{name}/read-only-regions", "method": "get", "responses": 200, "prop": "data.region", "become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{name}/read-only-regions", "method": "get", "responses": 200, "prop": "data", "become_ref": "read_only_region"}, + + {"path": "/organizations/{organization}/databases/{name}/regions", "method": "get", "responses": 200, "prop": "data", "become_ref": "region"}, + + {"path": "/organizations/{organization}/databases/{database}/branches","method": "get","responses": 200,"prop": "data.region","become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{database}/branches","method": "get","responses": 200,"prop": "data.restored_from_branch","become_ref": "restored_from_branch"}, + {"path": "/organizations/{organization}/databases/{database}/branches","method": "get","responses": 200,"prop": "data.actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches","method": "get","responses": 200,"prop": "data","become_ref": "branch"}, + + {"path": "/organizations/{organization}/databases/{database}/branches","method": "post","responses": 201,"prop": "region","become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{database}/branches","method": "post","responses": 201,"prop": "restored_from_branch","become_ref": "restored_from_branch"}, + {"path": "/organizations/{organization}/databases/{database}/branches","method": "post","responses": 201,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches","method": "post","responses": 201,"prop": "","become_ref": "branch"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{name}","method": "get","responses": 200,"prop": "region","become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}","method": "get","responses": 200,"prop": "restored_from_branch","become_ref": "restored_from_branch"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}","method": "get","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}","method": "get","responses": 200,"prop": "","become_ref": "branch"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/demote","method": "post","responses": 200,"prop": "region","become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/demote","method": "post","responses": 200,"prop": "restored_from_branch","become_ref": "restored_from_branch"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/demote","method": "post","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/demote","method": "post","responses": 200,"prop": "","become_ref": "branch"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/promote","method": "post","responses": 200,"prop": "region","become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/promote","method": "post","responses": 200,"prop": "restored_from_branch","become_ref": "restored_from_branch"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/promote","method": "post","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/promote","method": "post","responses": 200,"prop": "","become_ref": "branch"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/safe-migrations","method": "post","responses": 200,"prop": "region","become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/safe-migrations","method": "post","responses": 200,"prop": "restored_from_branch","become_ref": "restored_from_branch"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/safe-migrations","method": "post","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/safe-migrations","method": "post","responses": 200,"prop": "","become_ref": "branch"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/safe-migrations","method": "delete","responses": 200,"prop": "region","become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/safe-migrations","method": "delete","responses": 200,"prop": "restored_from_branch","become_ref": "restored_from_branch"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/safe-migrations","method": "delete","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/safe-migrations","method": "delete","responses": 200,"prop": "","become_ref": "branch"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/schema","method": "get","responses": 200,"prop": "data","become_ref": "table_schema"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{name}/schema/lint","method": "get","responses": 200,"prop": "data","become_ref": "lint_error"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-queue","method": "get","responses": 200,"prop": "data","become_ref": "queued_deploy_request"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests","method": "get","responses": 200,"prop": "data.actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests","method": "get","responses": 200,"prop": "data.branch_deleted_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests","method": "get","responses": 200,"prop": "data.closed_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests","method": "get","responses": 200,"prop": "data","become_ref": "deploy_request"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests","method": "post","responses": 201,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests","method": "post","responses": 201,"prop": "branch_deleted_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests","method": "post","responses": 201,"prop": "closed_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests","method": "post","responses": 201,"prop": "deployment","become_ref": "deployment"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests","method": "post","responses": 201,"prop": "","become_ref": "deploy_request_with_deployment"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}","method": "get","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}","method": "get","responses": 200,"prop": "branch_deleted_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}","method": "get","responses": 200,"prop": "closed_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}","method": "get","responses": 200,"prop": "deployment","become_ref": "deployment"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}","method": "get","responses": 200,"prop": "","become_ref": "deploy_request_with_deployment"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}","method": "patch","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}","method": "patch","responses": 200,"prop": "branch_deleted_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}","method": "patch","responses": 200,"prop": "closed_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}","method": "patch","responses": 200,"prop": "deployment","become_ref": "deployment"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}","method": "patch","responses": 200,"prop": "","become_ref": "deploy_request_with_deployment"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/apply-deploy","method": "post","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/apply-deploy","method": "post","responses": 200,"prop": "branch_deleted_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/apply-deploy","method": "post","responses": 200,"prop": "closed_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/apply-deploy","method": "post","responses": 200,"prop": "","become_ref": "deploy_request"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/auto-apply","method": "put","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/auto-apply","method": "put","responses": 200,"prop": "branch_deleted_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/auto-apply","method": "put","responses": 200,"prop": "closed_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/auto-apply","method": "put","responses": 200,"prop": "","become_ref": "deploy_request"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/cancel","method": "post","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/cancel","method": "post","responses": 200,"prop": "branch_deleted_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/cancel","method": "post","responses": 200,"prop": "closed_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/cancel","method": "post","responses": 200,"prop": "","become_ref": "deploy_request"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/complete-deploy","method": "post","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/complete-deploy","method": "post","responses": 200,"prop": "branch_deleted_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/complete-deploy","method": "post","responses": 200,"prop": "closed_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/complete-deploy","method": "post","responses": 200,"prop": "","become_ref": "deploy_request"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/deploy","method": "post","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/deploy","method": "post","responses": 200,"prop": "branch_deleted_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/deploy","method": "post","responses": 200,"prop": "closed_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/deploy","method": "post","responses": 200,"prop": "","become_ref": "deploy_request"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/deployment","method": "get","responses": 200,"prop": "","become_ref": "deployment"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/operations","method": "get","responses": 200,"prop": "data","become_ref": "deploy_operation"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/revert","method": "post","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/revert","method": "post","responses": 200,"prop": "branch_deleted_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/revert","method": "post","responses": 200,"prop": "closed_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/revert","method": "post","responses": 200,"prop": "","become_ref": "deploy_request"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/reviews","method": "get","responses": 200,"prop": "data.actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/reviews","method": "get","responses": 200,"prop": "data","become_ref": "deploy_review"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/reviews","method": "post","responses": 201,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/reviews","method": "post","responses": 201,"prop": "","become_ref": "deploy_review"}, + + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/skip-revert","method": "post","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/skip-revert","method": "post","responses": 200,"prop": "branch_deleted_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/skip-revert","method": "post","responses": 200,"prop": "closed_by","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/deploy-requests/{number}/skip-revert","method": "post","responses": 200,"prop": "","become_ref": "deploy_request"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/backups/{id}", "method": "get", "responses": 200, "prop": "actor", "become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/backups/{id}", "method": "get", "responses": 200, "prop": "backup_policy", "become_ref": "backup_policy"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/backups/{id}", "method": "get", "responses": 200, "prop": "schema_snapshot", "become_ref": "schema_snapshot"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/backups/{id}", "method": "get", "responses": 200, "prop": "", "become_ref": "backup"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/backups","method": "get","responses": 200,"prop": "data.actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/backups","method": "get","responses": 200,"prop": "data.backup_policy","become_ref": "backup_policy"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/backups","method": "get","responses": 200,"prop": "data.schema_snapshot","become_ref": "schema_snapshot"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/backups","method": "get","responses": 200,"prop": "data","become_ref": "backup"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/backups","method": "post","responses": 201,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/backups","method": "post","responses": 201,"prop": "backup_policy","become_ref": "backup_policy"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/backups","method": "post","responses": 201,"prop": "schema_snapshot","become_ref": "schema_snapshot"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/backups","method": "post","responses": 201,"prop": "","become_ref": "backup"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords","method": "get","responses": 200,"prop": "data.actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords","method": "get","responses": 200,"prop": "data.database_branch","become_ref": "branch_for_password"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords","method": "get","responses": 200,"prop": "data.region","become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords","method": "get","responses": 200,"prop": "data","become_ref": "password"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords","method": "post","responses": 201,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords","method": "post","responses": 201,"prop": "database_branch","become_ref": "branch_for_password"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords","method": "post","responses": 201,"prop": "region","become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords","method": "post","responses": 201,"prop": "","become_ref": "password_with_plaintext"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}","method": "get","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}","method": "get","responses": 200,"prop": "database_branch","become_ref": "branch_for_password"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}","method": "get","responses": 200,"prop": "region","become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}","method": "get","responses": 200,"prop": "","become_ref": "password"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}","method": "patch","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}","method": "patch","responses": 200,"prop": "database_branch","become_ref": "branch_for_password"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}","method": "patch","responses": 200,"prop": "region","become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}","method": "patch","responses": 200,"prop": "","become_ref": "password"}, + + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}/renew","method": "post","responses": 200,"prop": "actor","become_ref": "actor"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}/renew","method": "post","responses": 200,"prop": "database_branch","become_ref": "branch_for_password"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}/renew","method": "post","responses": 200,"prop": "region","become_ref": "region"}, + {"path": "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}/renew","method": "post","responses": 200,"prop": "","become_ref": "password_with_plaintext"}, + + {"path": "/organizations/{organization}/oauth-applications", "method": "get", "responses": 200, "prop": "data", "become_ref": "oauth_application"}, + + {"path": "/organizations/{organization}/oauth-applications/{application_id}", "method": "get", "responses": 200, "prop": "", "become_ref": "oauth_application"}, + + {"path": "/organizations/{organization}/oauth-applications/{application_id}/tokens", "method": "get", "responses": 200, "prop": "data", "become_ref": "oauth_token"}, + + {"path": "/organizations/{organization}/oauth-applications/{application_id}/tokens/{token_id}", "method": "get", "responses": 200, "prop": "oauth_accesses_by_resource.branch", "become_ref": "oauth_branch_accesses"}, + {"path": "/organizations/{organization}/oauth-applications/{application_id}/tokens/{token_id}", "method": "get", "responses": 200, "prop": "oauth_accesses_by_resource.database", "become_ref": "oauth_database_accesses"}, + {"path": "/organizations/{organization}/oauth-applications/{application_id}/tokens/{token_id}", "method": "get", "responses": 200, "prop": "oauth_accesses_by_resource.organization", "become_ref": "oauth_organization_accesses"}, + {"path": "/organizations/{organization}/oauth-applications/{application_id}/tokens/{token_id}", "method": "get", "responses": 200, "prop": "oauth_accesses_by_resource.user", "become_ref": "oauth_user_accesses"}, + {"path": "/organizations/{organization}/oauth-applications/{application_id}/tokens/{token_id}", "method": "get", "responses": 200, "prop": "oauth_accesses_by_resource", "become_ref": "oauth_accesses_by_resource"}, + {"path": "/organizations/{organization}/oauth-applications/{application_id}/tokens/{token_id}", "method": "get", "responses": 200, "prop": "", "become_ref": "oauth_token_with_details"}, + + {"path": "/organizations/{organization}/oauth-applications/{id}/token", "method": "post", "responses": 200, "prop": "", "become_ref": "created_oauth_token"}, + + {"path": "/user", "method": "get", "responses": 200, "prop": "", "become_ref": "user"} + ] +} \ No newline at end of file diff --git a/openapi/openapi-spec.json b/openapi/openapi-spec.json new file mode 100644 index 0000000..faac24c --- /dev/null +++ b/openapi/openapi-spec.json @@ -0,0 +1,11011 @@ +{ + "swagger": "2.0", + "info": { + "title": "PlanetScale API", + "description": "\n

PlanetScale API

\n© 2023 PlanetScale, Inc.", + "version": "v1", + "x-copyright": "© 2023 PlanetScale, Inc." + }, + "basePath": "/v1", + "consumes": [ + "application/json" + ], + "paths": { + "/organizations": { + "get": { + "tags": [ + "Organizations" + ], + "consumes": [ + "application/json" + ], + "operationId": "list-organizations", + "summary": "List organizations", + "parameters": [ + { + "name": "page", + "type": "number", + "in": "query", + "default": 1, + "description": "If provided, specifies the page offset of returned results" + }, + { + "name": "per_page", + "type": "number", + "in": "query", + "default": 25, + "description": "If provided, specifies the number of returned results" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Gets the organizations for the current user", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the organization" + }, + "name": { + "type": "string", + "description": "The name of the organization" + }, + "billing_email": { + "type": "string", + "description": "The billing email of the organization" + }, + "created_at": { + "type": "string", + "description": "When the organization was created" + }, + "updated_at": { + "type": "string", + "description": "When the organization was last updated" + }, + "plan": { + "type": "string", + "description": "The billing plan of the organization" + }, + "valid_billing_info": { + "type": "boolean", + "description": "Whether or not the organization's billing information is valid" + }, + "sso": { + "type": "boolean", + "description": "Whether or not SSO is enabled on the organization" + }, + "sso_directory": { + "type": "boolean", + "description": "Whether or not the organization uses a WorkOS directory" + }, + "single_tenancy": { + "type": "boolean", + "description": "Whether or not the organization has single tenancy enabled" + }, + "has_past_due_invoices": { + "type": "boolean", + "description": "Whether or not the organization has past due billing invoices" + }, + "can_create_databases": { + "type": "boolean", + "description": "Whether or not more databases can be created in the organization" + }, + "free_databases_remaining": { + "type": "number", + "description": "The number of remaining free databases that can be created in the organization" + }, + "database_count": { + "type": "number", + "description": "The number of databases in the organization" + }, + "sleeping_database_count": { + "type": "number", + "description": "The number of sleeping databases in the organization" + }, + "admin_only_production_access": { + "type": "boolean", + "description": "Whether or not only administrators can access production branches in the organization" + }, + "sso_portal_url": { + "type": "string", + "description": "The URL of the organization's SSO portal" + }, + "flags": { + "type": "object", + "properties": { + "example_flag": { + "type": "string", + "enum": [ + "full", + "on" + ] + } + }, + "additionalProperties": false + }, + "features": { + "type": "object", + "properties": { + "insights": { + "type": "boolean" + }, + "sso": { + "type": "boolean" + }, + "single_tenancy": { + "type": "boolean" + } + }, + "additionalProperties": false + }, + "idp_managed_roles": { + "type": "boolean", + "description": "Whether or not the IdP provider is be responsible for managing roles in PlanetScale" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "plan", + "valid_billing_info", + "sso", + "sso_directory", + "single_tenancy", + "has_past_due_invoices", + "can_create_databases", + "free_databases_remaining", + "database_count", + "sleeping_database_count", + "admin_only_production_access", + "idp_managed_roles" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "When using a service token, returns the list of organizations the service token has access to. When using an OAuth token, returns the list of organizations the user has access to.\n### Authorization\nA OAuth token must have at least one of the following scopes in order to use this API endpoint:\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| User | `read_organizations` |" + } + }, + "/organizations/{name}": { + "get": { + "tags": [ + "Organizations" + ], + "consumes": [ + "application/json" + ], + "operationId": "get-an-organization", + "summary": "Get an organization", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns an organization", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the organization" + }, + "name": { + "type": "string", + "description": "The name of the organization" + }, + "billing_email": { + "type": "string", + "description": "The billing email of the organization" + }, + "created_at": { + "type": "string", + "description": "When the organization was created" + }, + "updated_at": { + "type": "string", + "description": "When the organization was last updated" + }, + "plan": { + "type": "string", + "description": "The billing plan of the organization" + }, + "valid_billing_info": { + "type": "boolean", + "description": "Whether or not the organization's billing information is valid" + }, + "sso": { + "type": "boolean", + "description": "Whether or not SSO is enabled on the organization" + }, + "sso_directory": { + "type": "boolean", + "description": "Whether or not the organization uses a WorkOS directory" + }, + "single_tenancy": { + "type": "boolean", + "description": "Whether or not the organization has single tenancy enabled" + }, + "has_past_due_invoices": { + "type": "boolean", + "description": "Whether or not the organization has past due billing invoices" + }, + "can_create_databases": { + "type": "boolean", + "description": "Whether or not more databases can be created in the organization" + }, + "free_databases_remaining": { + "type": "number", + "description": "The number of remaining free databases that can be created in the organization" + }, + "database_count": { + "type": "number", + "description": "The number of databases in the organization" + }, + "sleeping_database_count": { + "type": "number", + "description": "The number of sleeping databases in the organization" + }, + "admin_only_production_access": { + "type": "boolean", + "description": "Whether or not only administrators can access production branches in the organization" + }, + "sso_portal_url": { + "type": "string", + "description": "The URL of the organization's SSO portal" + }, + "flags": { + "type": "object", + "properties": { + "example_flag": { + "type": "string", + "enum": [ + "full", + "on" + ] + } + }, + "additionalProperties": false + }, + "features": { + "type": "object", + "properties": { + "insights": { + "type": "boolean" + }, + "sso": { + "type": "boolean" + }, + "single_tenancy": { + "type": "boolean" + } + }, + "additionalProperties": false + }, + "idp_managed_roles": { + "type": "boolean", + "description": "Whether or not the IdP provider is be responsible for managing roles in PlanetScale" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "plan", + "valid_billing_info", + "sso", + "sso_directory", + "single_tenancy", + "has_past_due_invoices", + "can_create_databases", + "free_databases_remaining", + "database_count", + "sleeping_database_count", + "admin_only_production_access", + "idp_managed_roles" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_organization`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| User | `read_organizations` |\n| Organization | `read_organization` |" + }, + "patch": { + "tags": [ + "Organizations" + ], + "consumes": [ + "application/json" + ], + "operationId": "update-an-organization", + "summary": "Update an organization", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization" + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "billing_email": { + "type": "string", + "description": "The billing email for the organization" + }, + "require_admin_for_production_access": { + "type": "boolean", + "description": "Whether or not only admins can access production" + }, + "idp_managed_roles": { + "type": "boolean", + "description": "Whether or not the IdP provider is be responsible for managing roles in PlanetScale" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns the updated organization", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the organization" + }, + "name": { + "type": "string", + "description": "The name of the organization" + }, + "billing_email": { + "type": "string", + "description": "The billing email of the organization" + }, + "created_at": { + "type": "string", + "description": "When the organization was created" + }, + "updated_at": { + "type": "string", + "description": "When the organization was last updated" + }, + "plan": { + "type": "string", + "description": "The billing plan of the organization" + }, + "valid_billing_info": { + "type": "boolean", + "description": "Whether or not the organization's billing information is valid" + }, + "sso": { + "type": "boolean", + "description": "Whether or not SSO is enabled on the organization" + }, + "sso_directory": { + "type": "boolean", + "description": "Whether or not the organization uses a WorkOS directory" + }, + "single_tenancy": { + "type": "boolean", + "description": "Whether or not the organization has single tenancy enabled" + }, + "has_past_due_invoices": { + "type": "boolean", + "description": "Whether or not the organization has past due billing invoices" + }, + "can_create_databases": { + "type": "boolean", + "description": "Whether or not more databases can be created in the organization" + }, + "free_databases_remaining": { + "type": "number", + "description": "The number of remaining free databases that can be created in the organization" + }, + "database_count": { + "type": "number", + "description": "The number of databases in the organization" + }, + "sleeping_database_count": { + "type": "number", + "description": "The number of sleeping databases in the organization" + }, + "admin_only_production_access": { + "type": "boolean", + "description": "Whether or not only administrators can access production branches in the organization" + }, + "sso_portal_url": { + "type": "string", + "description": "The URL of the organization's SSO portal" + }, + "flags": { + "type": "object", + "properties": { + "example_flag": { + "type": "string", + "enum": [ + "full", + "on" + ] + } + }, + "additionalProperties": false + }, + "features": { + "type": "object", + "properties": { + "insights": { + "type": "boolean" + }, + "sso": { + "type": "boolean" + }, + "single_tenancy": { + "type": "boolean" + } + }, + "additionalProperties": false + }, + "idp_managed_roles": { + "type": "boolean", + "description": "Whether or not the IdP provider is be responsible for managing roles in PlanetScale" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "plan", + "valid_billing_info", + "sso", + "sso_directory", + "single_tenancy", + "has_past_due_invoices", + "can_create_databases", + "free_databases_remaining", + "database_count", + "sleeping_database_count", + "admin_only_production_access", + "idp_managed_roles" + ] + } + } + }, + "description": "\n### Authorization\nA OAuth token must have at least one of the following scopes in order to use this API endpoint:\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `write_organization` |" + } + }, + "/organizations/{name}/regions": { + "get": { + "tags": [ + "Organizations" + ], + "consumes": [ + "application/json" + ], + "operationId": "list-regions-for-an-organization", + "summary": "List regions for an organization", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization" + }, + { + "name": "page", + "type": "number", + "in": "query", + "default": 1, + "description": "If provided, specifies the page offset of returned results" + }, + { + "name": "per_page", + "type": "number", + "in": "query", + "default": 25, + "description": "If provided, specifies the number of returned results" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns the organization's regions", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_organization`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| User | `read_organizations` |\n| Organization | `read_organization` |" + } + }, + "/organizations/{organization}/databases": { + "get": { + "tags": [ + "Databases" + ], + "consumes": [ + "application/json" + ], + "operationId": "list-databases", + "summary": "List databases", + "parameters": [ + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the database belongs to" + }, + { + "name": "page", + "type": "number", + "in": "query", + "default": 1, + "description": "If provided, specifies the page offset of returned results" + }, + { + "name": "per_page", + "type": "number", + "in": "query", + "default": 25, + "description": "If provided, specifies the number of returned results" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Retrieves the databases for an organization", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the database" + }, + "type": { + "type": "string", + "description": "The object type" + }, + "url": { + "type": "string", + "description": "The URL to the database API endpoint" + }, + "branches_url": { + "type": "string", + "description": "The URL to retrieve this database's branches via the API" + }, + "branches_count": { + "type": "number", + "description": "The total number of database branches" + }, + "development_branches_count": { + "type": "number", + "description": "The total number of database development branches" + }, + "production_branches_count": { + "type": "number", + "description": "The total number of database production branches" + }, + "issues_count": { + "type": "number", + "description": "The total number of ongoing issues within a database" + }, + "multiple_admins_required_for_deletion": { + "type": "boolean", + "description": "If the database requires multiple admins for deletion" + }, + "ready": { + "type": "boolean", + "description": "If the database is ready to be used" + }, + "at_development_branch_limit": { + "type": "boolean", + "description": "If the database has reached its development branch limit" + }, + "at_backup_restore_branches_limit": { + "type": "boolean", + "description": "If the database has reached its backup restored branch limit" + }, + "data_import": { + "type": "object", + "properties": { + "state": { + "type": "string" + }, + "import_check_errors": { + "type": "string" + }, + "started_at": { + "type": "string" + }, + "finished_at": { + "type": "string" + }, + "data_source": { + "type": "object", + "properties": { + "hostname": { + "type": "string" + }, + "port": { + "type": "string" + }, + "database": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "hostname", + "port", + "database" + ] + } + }, + "additionalProperties": false, + "required": [ + "state", + "import_check_errors", + "started_at", + "finished_at", + "data_source" + ] + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "html_url": { + "type": "string", + "description": "The URL to see this database's branches in the web UI" + }, + "name": { + "type": "string", + "description": "Name of the database" + }, + "state": { + "type": "string", + "description": "State of the database" + }, + "sharded": { + "type": "boolean", + "description": "If the database is sharded" + }, + "default_branch_shard_count": { + "type": "number", + "description": "Number of shards in the default branch" + }, + "default_branch_read_only_regions_count": { + "type": "number", + "description": "Number of read only regions in the default branch" + }, + "default_branch_table_count": { + "type": "number", + "description": "Number of tables in the default branch schema" + }, + "default_branch": { + "type": "string", + "description": "The default branch for the database" + }, + "require_approval_for_deploy": { + "type": "boolean", + "description": "Whether an approval is required to deploy schema changes to this database" + }, + "allow_data_branching": { + "type": "boolean", + "description": "Whether seeding branches with data is enabled for all branches" + }, + "automatic_migrations": { + "type": "boolean", + "description": "Whether to automatically manage Rails migrations during deploy requests" + }, + "restrict_branch_region": { + "type": "boolean", + "description": "Whether to restrict branch creation to one region" + }, + "insights_raw_queries": { + "type": "boolean", + "description": "Whether raw SQL queries are collected" + }, + "plan": { + "type": "string", + "description": "The database plan" + }, + "production_branch_web_console": { + "type": "boolean", + "description": "Whether web console is enabled for production branches" + }, + "migration_table_name": { + "type": "string", + "description": "Table name to use for copying schema migration data" + }, + "migration_framework": { + "type": "string", + "description": "Framework used for applying migrations" + }, + "created_at": { + "type": "string" + }, + "updated_at": { + "type": "string" + }, + "schema_last_updated_at": { + "type": "string", + "description": "When the default branch schema was last changed." + } + }, + "additionalProperties": false, + "required": [ + "id", + "type", + "url", + "branches_url", + "branches_count", + "development_branches_count", + "production_branches_count", + "issues_count", + "multiple_admins_required_for_deletion", + "ready", + "at_development_branch_limit", + "at_backup_restore_branches_limit", + "region", + "html_url", + "name", + "state", + "sharded", + "default_branch_shard_count", + "default_branch_read_only_regions_count", + "default_branch_table_count", + "default_branch", + "require_approval_for_deploy", + "allow_data_branching", + "restrict_branch_region", + "insights_raw_queries", + "plan", + "production_branch_web_console", + "created_at", + "updated_at" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_database`, `delete_database`, `write_database`, `read_branch`, `delete_branch`, `create_branch`, `delete_production_branch`, `connect_branch`, `connect_production_branch`, `delete_branch_password`, `delete_production_branch_password`, `read_deploy_request`, `create_deploy_request`, `approve_deploy_request`, `read_comment`, `create_comment`, `restore_backup`, `restore_production_branch_backup`, `read_backups`, `write_backups`, `delete_backups`, `delete_production_branch_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_databases` |" + }, + "post": { + "tags": [ + "Databases" + ], + "consumes": [ + "application/json" + ], + "operationId": "create-a-database", + "summary": "Create a database", + "parameters": [ + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the database belongs to" + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the database" + }, + "region": { + "type": "string", + "description": "The region the database will be deployed in. If left blank, defaults to the organization's default region." + }, + "plan": { + "type": "string", + "description": "The database billing plan. Options: 'hobby', 'scaler', or 'scaler_pro'." + }, + "cluster_size": { + "type": "string", + "description": "The database cluster size. This is required for Scaler Pro databases. Options: PS_10, PS_20, PS_40, PS_80, PS_160, PS_320, PS_400, PS_640, PS_700, PS_900, PS_1280, PS_1400, PS_1800, PS_2100, PS_2560, PS_2700, PS_2800." + } + }, + "additionalProperties": false, + "required": [ + "name" + ] + } + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "201": { + "description": "Returns the created database", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the database" + }, + "type": { + "type": "string", + "description": "The object type" + }, + "url": { + "type": "string", + "description": "The URL to the database API endpoint" + }, + "branches_url": { + "type": "string", + "description": "The URL to retrieve this database's branches via the API" + }, + "branches_count": { + "type": "number", + "description": "The total number of database branches" + }, + "development_branches_count": { + "type": "number", + "description": "The total number of database development branches" + }, + "production_branches_count": { + "type": "number", + "description": "The total number of database production branches" + }, + "issues_count": { + "type": "number", + "description": "The total number of ongoing issues within a database" + }, + "multiple_admins_required_for_deletion": { + "type": "boolean", + "description": "If the database requires multiple admins for deletion" + }, + "ready": { + "type": "boolean", + "description": "If the database is ready to be used" + }, + "at_development_branch_limit": { + "type": "boolean", + "description": "If the database has reached its development branch limit" + }, + "at_backup_restore_branches_limit": { + "type": "boolean", + "description": "If the database has reached its backup restored branch limit" + }, + "data_import": { + "type": "object", + "properties": { + "state": { + "type": "string" + }, + "import_check_errors": { + "type": "string" + }, + "started_at": { + "type": "string" + }, + "finished_at": { + "type": "string" + }, + "data_source": { + "type": "object", + "properties": { + "hostname": { + "type": "string" + }, + "port": { + "type": "string" + }, + "database": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "hostname", + "port", + "database" + ] + } + }, + "additionalProperties": false, + "required": [ + "state", + "import_check_errors", + "started_at", + "finished_at", + "data_source" + ] + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "html_url": { + "type": "string", + "description": "The URL to see this database's branches in the web UI" + }, + "name": { + "type": "string", + "description": "Name of the database" + }, + "state": { + "type": "string", + "description": "State of the database" + }, + "sharded": { + "type": "boolean", + "description": "If the database is sharded" + }, + "default_branch_shard_count": { + "type": "number", + "description": "Number of shards in the default branch" + }, + "default_branch_read_only_regions_count": { + "type": "number", + "description": "Number of read only regions in the default branch" + }, + "default_branch_table_count": { + "type": "number", + "description": "Number of tables in the default branch schema" + }, + "default_branch": { + "type": "string", + "description": "The default branch for the database" + }, + "require_approval_for_deploy": { + "type": "boolean", + "description": "Whether an approval is required to deploy schema changes to this database" + }, + "allow_data_branching": { + "type": "boolean", + "description": "Whether seeding branches with data is enabled for all branches" + }, + "automatic_migrations": { + "type": "boolean", + "description": "Whether to automatically manage Rails migrations during deploy requests" + }, + "restrict_branch_region": { + "type": "boolean", + "description": "Whether to restrict branch creation to one region" + }, + "insights_raw_queries": { + "type": "boolean", + "description": "Whether raw SQL queries are collected" + }, + "plan": { + "type": "string", + "description": "The database plan" + }, + "production_branch_web_console": { + "type": "boolean", + "description": "Whether web console is enabled for production branches" + }, + "migration_table_name": { + "type": "string", + "description": "Table name to use for copying schema migration data" + }, + "migration_framework": { + "type": "string", + "description": "Framework used for applying migrations" + }, + "created_at": { + "type": "string" + }, + "updated_at": { + "type": "string" + }, + "schema_last_updated_at": { + "type": "string", + "description": "When the default branch schema was last changed." + } + }, + "additionalProperties": false, + "required": [ + "id", + "type", + "url", + "branches_url", + "branches_count", + "development_branches_count", + "production_branches_count", + "issues_count", + "multiple_admins_required_for_deletion", + "ready", + "at_development_branch_limit", + "at_backup_restore_branches_limit", + "region", + "html_url", + "name", + "state", + "sharded", + "default_branch_shard_count", + "default_branch_read_only_regions_count", + "default_branch_table_count", + "default_branch", + "require_approval_for_deploy", + "allow_data_branching", + "restrict_branch_region", + "insights_raw_queries", + "plan", + "production_branch_web_console", + "created_at", + "updated_at" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `create_databases`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `create_databases` |" + } + }, + "/organizations/{organization}/databases/{database}/branches": { + "get": { + "tags": [ + "Database branches" + ], + "consumes": [ + "application/json" + ], + "operationId": "list-branches", + "summary": "List branches", + "parameters": [ + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + }, + { + "name": "page", + "type": "number", + "in": "query", + "default": 1, + "description": "If provided, specifies the page offset of returned results" + }, + { + "name": "per_page", + "type": "number", + "in": "query", + "default": 25, + "description": "If provided, specifies the number of returned results" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns database branches", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the branch" + }, + "name": { + "type": "string", + "description": "The name of the branch" + }, + "created_at": { + "type": "string", + "description": "When the branch was created" + }, + "updated_at": { + "type": "string", + "description": "When the branch was last updated" + }, + "restore_checklist_completed_at": { + "type": "string", + "description": "When a user last marked a backup restore checklist as completed" + }, + "access_host_url": { + "type": "string", + "description": "The access host URL for the branch. This is a legacy field, use `mysql_edge_address`" + }, + "schema_last_updated_at": { + "type": "string", + "description": "When the schema for the branch was last updated" + }, + "mysql_address": { + "type": "string", + "description": "The MySQL address for the branch" + }, + "mysql_edge_address": { + "type": "string", + "description": "The address of the MySQL provider for the branch" + }, + "initial_restore_id": { + "type": "string", + "description": "The ID of the backup from which the branch was restored" + }, + "ready": { + "type": "boolean", + "description": "Whether or not the branch is ready to serve queries" + }, + "production": { + "type": "boolean", + "description": "Whether or not the branch is a production branch" + }, + "sharded": { + "type": "boolean", + "description": "Whether or not the branch is sharded" + }, + "shard_count": { + "type": "number", + "description": "The number of shards in the branch" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "restored_from_branch": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the resource" + }, + "name": { + "type": "string", + "description": "The name for the resource" + }, + "created_at": { + "type": "string", + "description": "When the resource was created" + }, + "updated_at": { + "type": "string", + "description": "When the resource was last updated" + }, + "deleted_at": { + "type": "string", + "description": "When the resource was deleted, if deleted" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "deleted_at" + ] + }, + "html_url": { + "type": "string", + "description": "Planetscale app URL for the branch" + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "parent_branch": { + "type": "string", + "description": "The name of the parent branch from which the branch was created" + }, + "cluster_rate_name": { + "type": "string", + "description": "The SKU representing the branch's cluster size" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "schema_last_updated_at", + "mysql_address", + "mysql_edge_address", + "ready", + "production", + "sharded", + "html_url", + "cluster_rate_name" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_branch`, `delete_branch`, `create_branch`, `connect_production_branch`, `connect_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_branches` |\n| Database | `read_branches` |\n| Branch | `read_branch` |" + }, + "post": { + "tags": [ + "Database branches" + ], + "consumes": [ + "application/json" + ], + "operationId": "create-a-branch", + "summary": "Create a branch", + "parameters": [ + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name of the branch" + }, + "parent_branch": { + "type": "string", + "description": "Parent branch" + }, + "backup_id": { + "type": "string", + "description": "If provided, restores the backup's schema and data to the new branch. Must have `restore_production_branch_backup(s)` or `restore_backup(s)` access to do this." + } + }, + "additionalProperties": false, + "required": [ + "name", + "parent_branch" + ] + } + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "201": { + "description": "Returns the created branch", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the branch" + }, + "name": { + "type": "string", + "description": "The name of the branch" + }, + "created_at": { + "type": "string", + "description": "When the branch was created" + }, + "updated_at": { + "type": "string", + "description": "When the branch was last updated" + }, + "restore_checklist_completed_at": { + "type": "string", + "description": "When a user last marked a backup restore checklist as completed" + }, + "access_host_url": { + "type": "string", + "description": "The access host URL for the branch. This is a legacy field, use `mysql_edge_address`" + }, + "schema_last_updated_at": { + "type": "string", + "description": "When the schema for the branch was last updated" + }, + "mysql_address": { + "type": "string", + "description": "The MySQL address for the branch" + }, + "mysql_edge_address": { + "type": "string", + "description": "The address of the MySQL provider for the branch" + }, + "initial_restore_id": { + "type": "string", + "description": "The ID of the backup from which the branch was restored" + }, + "ready": { + "type": "boolean", + "description": "Whether or not the branch is ready to serve queries" + }, + "production": { + "type": "boolean", + "description": "Whether or not the branch is a production branch" + }, + "sharded": { + "type": "boolean", + "description": "Whether or not the branch is sharded" + }, + "shard_count": { + "type": "number", + "description": "The number of shards in the branch" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "restored_from_branch": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the resource" + }, + "name": { + "type": "string", + "description": "The name for the resource" + }, + "created_at": { + "type": "string", + "description": "When the resource was created" + }, + "updated_at": { + "type": "string", + "description": "When the resource was last updated" + }, + "deleted_at": { + "type": "string", + "description": "When the resource was deleted, if deleted" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "deleted_at" + ] + }, + "html_url": { + "type": "string", + "description": "Planetscale app URL for the branch" + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "parent_branch": { + "type": "string", + "description": "The name of the parent branch from which the branch was created" + }, + "cluster_rate_name": { + "type": "string", + "description": "The SKU representing the branch's cluster size" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "schema_last_updated_at", + "mysql_address", + "mysql_edge_address", + "ready", + "production", + "sharded", + "html_url", + "cluster_rate_name" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `create_branch`, `restore_production_branch_backup`, `restore_backup`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `write_branches`, `restore_production_branch_backups`, `restore_backups` |\n| Database | `write_branches`, `restore_production_branch_backups`, `restore_backups` |\n| Branch | `restore_backups` |" + } + }, + "/organizations/{organization}/databases/{database}/branches/{branch}/backups": { + "get": { + "tags": [ + "Backups" + ], + "consumes": [ + "application/json" + ], + "operationId": "list-backups", + "summary": "List backups", + "parameters": [ + { + "name": "branch", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + }, + { + "name": "page", + "type": "number", + "in": "query", + "default": 1, + "description": "If provided, specifies the page offset of returned results" + }, + { + "name": "per_page", + "type": "number", + "in": "query", + "default": 25, + "description": "If provided, specifies the number of returned results" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns database branch backups", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the backup" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "name": { + "type": "string", + "description": "The name of the backup" + }, + "state": { + "type": "string", + "enum": [ + "pending", + "running", + "success", + "failed", + "canceled", + "ignored" + ], + "description": "The current state of the backup" + }, + "size": { + "type": "number", + "description": "The size of the backup" + }, + "estimated_storage_cost": { + "type": "string", + "description": "The estimated storage cost of the backup" + }, + "created_at": { + "type": "string", + "description": "When the backup was created" + }, + "updated_at": { + "type": "string", + "description": "When the backup was last updated" + }, + "required": { + "type": "boolean", + "description": "Whether or not the backup policy is required" + }, + "restored_branches": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Branches that have been restored with this backup" + }, + "backup_policy": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the backup policy" + }, + "name": { + "type": "string", + "description": "The name of the backup policy" + }, + "target": { + "type": "string", + "enum": [ + "production", + "development", + "branch" + ], + "description": "Whether the backup policy is for a production or development database, or for a database branch" + }, + "retention_value": { + "type": "number", + "description": "A number value for the retention period of the backup policy" + }, + "retention_unit": { + "type": "string", + "description": "The unit for the retention period of the backup policy" + }, + "frequency_value": { + "type": "number", + "description": "A number value for the frequency of the backup policy" + }, + "frequency_unit": { + "type": "string", + "description": "The unit for the frequency of the backup policy" + }, + "schedule_day": { + "type": "string", + "enum": [ + "0..6" + ], + "description": "Day of the week that the backup is scheduled" + }, + "schedule_week": { + "type": "string", + "enum": [ + "0..3" + ], + "description": "Week of the month that the backup is scheduled" + }, + "created_at": { + "type": "string", + "description": "When the backup policy was created" + }, + "updated_at": { + "type": "string", + "description": "When the backup policy was last updated" + }, + "last_ran_at": { + "type": "string", + "description": "When the backup was last run" + }, + "next_run_at": { + "type": "string", + "description": "When the backup will next run" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "target", + "retention_value", + "retention_unit", + "frequency_value", + "frequency_unit", + "schedule_day", + "schedule_week", + "created_at", + "updated_at", + "last_ran_at", + "next_run_at" + ] + }, + "schema_snapshot": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the schema snapshot" + }, + "url": { + "type": "string", + "description": "The URL to the schema snapshot in the PlanetScale app" + }, + "name": { + "type": "string", + "description": "The name of the schema snapshot" + }, + "created_at": { + "type": "string", + "description": "When the schema snapshot was created" + }, + "updated_at": { + "type": "string", + "description": "When the schema snapshot was last updated" + } + }, + "additionalProperties": false, + "required": [ + "id", + "url", + "name", + "created_at", + "updated_at" + ] + } + }, + "additionalProperties": false, + "required": [ + "id", + "actor", + "name", + "state", + "size", + "estimated_storage_cost", + "created_at", + "updated_at", + "required", + "backup_policy", + "schema_snapshot" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_backups` |\n| Database | `read_backups` |\n| Branch | `read_backups` |" + }, + "post": { + "tags": [ + "Backups" + ], + "consumes": [ + "application/json" + ], + "operationId": "create-a-backup", + "summary": "Create a backup", + "parameters": [ + { + "name": "branch", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name for the backup" + }, + "retention_unit": { + "type": "string", + "enum": [ + "hour", + "day", + "week", + "month", + "year" + ], + "description": "Unit for the retention period of the backup" + }, + "retention_value": { + "type": "number", + "description": "Value between `1`` and `1000`` for the retention period of the backup (i.e retention_value `6`` and retention_unit `hour` means 6 hours)" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "201": { + "description": "Returns the created database branch backup", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the backup" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "name": { + "type": "string", + "description": "The name of the backup" + }, + "state": { + "type": "string", + "enum": [ + "pending", + "running", + "success", + "failed", + "canceled", + "ignored" + ], + "description": "The current state of the backup" + }, + "size": { + "type": "number", + "description": "The size of the backup" + }, + "estimated_storage_cost": { + "type": "string", + "description": "The estimated storage cost of the backup" + }, + "created_at": { + "type": "string", + "description": "When the backup was created" + }, + "updated_at": { + "type": "string", + "description": "When the backup was last updated" + }, + "required": { + "type": "boolean", + "description": "Whether or not the backup policy is required" + }, + "restored_branches": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Branches that have been restored with this backup" + }, + "backup_policy": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the backup policy" + }, + "name": { + "type": "string", + "description": "The name of the backup policy" + }, + "target": { + "type": "string", + "enum": [ + "production", + "development", + "branch" + ], + "description": "Whether the backup policy is for a production or development database, or for a database branch" + }, + "retention_value": { + "type": "number", + "description": "A number value for the retention period of the backup policy" + }, + "retention_unit": { + "type": "string", + "description": "The unit for the retention period of the backup policy" + }, + "frequency_value": { + "type": "number", + "description": "A number value for the frequency of the backup policy" + }, + "frequency_unit": { + "type": "string", + "description": "The unit for the frequency of the backup policy" + }, + "schedule_day": { + "type": "string", + "enum": [ + "0..6" + ], + "description": "Day of the week that the backup is scheduled" + }, + "schedule_week": { + "type": "string", + "enum": [ + "0..3" + ], + "description": "Week of the month that the backup is scheduled" + }, + "created_at": { + "type": "string", + "description": "When the backup policy was created" + }, + "updated_at": { + "type": "string", + "description": "When the backup policy was last updated" + }, + "last_ran_at": { + "type": "string", + "description": "When the backup was last run" + }, + "next_run_at": { + "type": "string", + "description": "When the backup will next run" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "target", + "retention_value", + "retention_unit", + "frequency_value", + "frequency_unit", + "schedule_day", + "schedule_week", + "created_at", + "updated_at", + "last_ran_at", + "next_run_at" + ] + }, + "schema_snapshot": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the schema snapshot" + }, + "url": { + "type": "string", + "description": "The URL to the schema snapshot in the PlanetScale app" + }, + "name": { + "type": "string", + "description": "The name of the schema snapshot" + }, + "created_at": { + "type": "string", + "description": "When the schema snapshot was created" + }, + "updated_at": { + "type": "string", + "description": "When the schema snapshot was last updated" + } + }, + "additionalProperties": false, + "required": [ + "id", + "url", + "name", + "created_at", + "updated_at" + ] + } + }, + "additionalProperties": false, + "required": [ + "id", + "actor", + "name", + "state", + "size", + "estimated_storage_cost", + "created_at", + "updated_at", + "required", + "backup_policy", + "schema_snapshot" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `write_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `write_backups` |\n| Database | `write_backups` |\n| Branch | `write_backups` |" + } + }, + "/organizations/{organization}/databases/{database}/branches/{branch}/backups/{id}": { + "get": { + "tags": [ + "Backups" + ], + "consumes": [ + "application/json" + ], + "operationId": "get-a-backup", + "summary": "Get a backup", + "parameters": [ + { + "name": "branch", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + }, + { + "name": "id", + "type": "string", + "in": "path", + "required": true, + "description": "The ID for the backup" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns a database branch backup", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the backup" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "name": { + "type": "string", + "description": "The name of the backup" + }, + "state": { + "type": "string", + "enum": [ + "pending", + "running", + "success", + "failed", + "canceled", + "ignored" + ], + "description": "The current state of the backup" + }, + "size": { + "type": "number", + "description": "The size of the backup" + }, + "estimated_storage_cost": { + "type": "string", + "description": "The estimated storage cost of the backup" + }, + "created_at": { + "type": "string", + "description": "When the backup was created" + }, + "updated_at": { + "type": "string", + "description": "When the backup was last updated" + }, + "required": { + "type": "boolean", + "description": "Whether or not the backup policy is required" + }, + "restored_branches": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Branches that have been restored with this backup" + }, + "backup_policy": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the backup policy" + }, + "name": { + "type": "string", + "description": "The name of the backup policy" + }, + "target": { + "type": "string", + "enum": [ + "production", + "development", + "branch" + ], + "description": "Whether the backup policy is for a production or development database, or for a database branch" + }, + "retention_value": { + "type": "number", + "description": "A number value for the retention period of the backup policy" + }, + "retention_unit": { + "type": "string", + "description": "The unit for the retention period of the backup policy" + }, + "frequency_value": { + "type": "number", + "description": "A number value for the frequency of the backup policy" + }, + "frequency_unit": { + "type": "string", + "description": "The unit for the frequency of the backup policy" + }, + "schedule_day": { + "type": "string", + "enum": [ + "0..6" + ], + "description": "Day of the week that the backup is scheduled" + }, + "schedule_week": { + "type": "string", + "enum": [ + "0..3" + ], + "description": "Week of the month that the backup is scheduled" + }, + "created_at": { + "type": "string", + "description": "When the backup policy was created" + }, + "updated_at": { + "type": "string", + "description": "When the backup policy was last updated" + }, + "last_ran_at": { + "type": "string", + "description": "When the backup was last run" + }, + "next_run_at": { + "type": "string", + "description": "When the backup will next run" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "target", + "retention_value", + "retention_unit", + "frequency_value", + "frequency_unit", + "schedule_day", + "schedule_week", + "created_at", + "updated_at", + "last_ran_at", + "next_run_at" + ] + }, + "schema_snapshot": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the schema snapshot" + }, + "url": { + "type": "string", + "description": "The URL to the schema snapshot in the PlanetScale app" + }, + "name": { + "type": "string", + "description": "The name of the schema snapshot" + }, + "created_at": { + "type": "string", + "description": "When the schema snapshot was created" + }, + "updated_at": { + "type": "string", + "description": "When the schema snapshot was last updated" + } + }, + "additionalProperties": false, + "required": [ + "id", + "url", + "name", + "created_at", + "updated_at" + ] + } + }, + "additionalProperties": false, + "required": [ + "id", + "actor", + "name", + "state", + "size", + "estimated_storage_cost", + "created_at", + "updated_at", + "required", + "backup_policy", + "schema_snapshot" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_backups` |\n| Database | `read_backups` |\n| Branch | `read_backups` |" + }, + "delete": { + "tags": [ + "Backups" + ], + "consumes": [ + "application/json" + ], + "operationId": "delete-a-backup", + "summary": "Delete a backup", + "parameters": [ + { + "name": "branch", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + }, + { + "name": "id", + "type": "string", + "in": "path", + "required": true, + "description": "The ID of the backup" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "204": { + "description": "Delete a backup" + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `delete_backups`, `delete_production_branch_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `delete_backups`, `delete_production_branch_backups` |\n| Database | `delete_backups`, `delete_production_branch_backups` |\n| Branch | `delete_backups` |" + } + }, + "/organizations/{organization}/databases/{database}/branches/{branch}/passwords": { + "get": { + "tags": [ + "Database branch passwords" + ], + "consumes": [ + "application/json" + ], + "operationId": "list-passwords", + "summary": "List passwords", + "parameters": [ + { + "name": "branch", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch the password belongs to" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the password belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the password belongs to" + }, + { + "name": "read_only_region_id", + "type": "string", + "in": "query", + "description": "A read-only region of the database branch. If present, the password results will be filtered to only those in the region" + }, + { + "name": "page", + "type": "number", + "in": "query", + "default": 1, + "description": "If provided, specifies the page offset of returned results" + }, + { + "name": "per_page", + "type": "number", + "in": "query", + "default": 25, + "description": "If provided, specifies the number of returned results" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Gets the passwords for the database branch", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the password" + }, + "name": { + "type": "string", + "description": "The display name for the password" + }, + "role": { + "type": "string", + "description": "The role for the password" + }, + "created_at": { + "type": "string", + "description": "When the password was created" + }, + "deleted_at": { + "type": "string", + "description": "When the password was deleted" + }, + "expires_at": { + "type": "string", + "description": "When the password will expire" + }, + "ttl_seconds": { + "type": "number", + "description": "Time to live (in seconds) for the password. The password will be invalid when TTL has passed" + }, + "access_host_url": { + "type": "string", + "description": "The host URL for the password" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "username": { + "type": "string", + "description": "The username for the password" + }, + "renewable": { + "type": "boolean", + "description": "Whether or not the password can be renewed" + }, + "database_branch": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name for the database branch" + }, + "id": { + "type": "string", + "description": "The ID for the database branch" + }, + "production": { + "type": "boolean", + "description": "Whether or not the database branch is a production database branch" + }, + "access_host_url": { + "type": "string", + "description": "The host URL for the password" + }, + "mysql_edge_address": { + "type": "string", + "description": "The address of the MySQL provider for the branch" + } + }, + "additionalProperties": false, + "required": [ + "name", + "id", + "production", + "access_host_url", + "mysql_edge_address" + ] + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "role", + "created_at", + "ttl_seconds", + "access_host_url", + "renewable", + "database_branch" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_branch`, `delete_branch`, `create_branch`, `connect_production_branch`, `connect_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `manage_passwords`, `manage_production_branch_passwords` |\n| Database | `manage_passwords`, `manage_production_branch_passwords` |\n| Branch | `manage_passwords` |" + }, + "post": { + "tags": [ + "Database branch passwords" + ], + "consumes": [ + "application/json" + ], + "operationId": "create-a-password", + "summary": "Create a password", + "parameters": [ + { + "name": "branch", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch the password belongs to" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the password belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the password belongs to" + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Optional name of the password" + }, + "role": { + "type": "string", + "enum": [ + "reader", + "writer", + "admin", + "readwriter" + ], + "description": "The database role of the password (i.e. admin)" + }, + "ttl": { + "type": "number", + "description": "Time to live (in seconds) for the password. The password will be invalid when TTL has passed" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "422": { + "description": "Unprocessable Content" + }, + "201": { + "description": "Creates a password", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the password" + }, + "name": { + "type": "string", + "description": "The display name for the password" + }, + "role": { + "type": "string", + "description": "The role for the password" + }, + "created_at": { + "type": "string", + "description": "When the password was created" + }, + "deleted_at": { + "type": "string", + "description": "When the password was deleted" + }, + "expires_at": { + "type": "string", + "description": "When the password will expire" + }, + "ttl_seconds": { + "type": "number", + "description": "Time to live (in seconds) for the password. The password will be invalid when TTL has passed" + }, + "access_host_url": { + "type": "string", + "description": "The host URL for the password" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "username": { + "type": "string", + "description": "The username for the password" + }, + "renewable": { + "type": "boolean", + "description": "Whether or not the password can be renewed" + }, + "database_branch": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name for the database branch" + }, + "id": { + "type": "string", + "description": "The ID for the database branch" + }, + "production": { + "type": "boolean", + "description": "Whether or not the database branch is a production database branch" + }, + "access_host_url": { + "type": "string", + "description": "The host URL for the password" + }, + "mysql_edge_address": { + "type": "string", + "description": "The address of the MySQL provider for the branch" + } + }, + "additionalProperties": false, + "required": [ + "name", + "id", + "production", + "access_host_url", + "mysql_edge_address" + ] + }, + "plain_text": { + "type": "string", + "description": "The plain text password" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "role", + "created_at", + "ttl_seconds", + "access_host_url", + "renewable", + "database_branch", + "plain_text" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `connect_production_branch`, `create_branch_password`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `manage_passwords`, `manage_production_branch_passwords` |\n| Database | `manage_passwords`, `manage_production_branch_passwords` |\n| Branch | `manage_passwords` |" + } + }, + "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}": { + "get": { + "tags": [ + "Database branch passwords" + ], + "consumes": [ + "application/json" + ], + "operationId": "get-a-password", + "summary": "Get a password", + "parameters": [ + { + "name": "id", + "type": "string", + "in": "path", + "required": true, + "description": "The ID of the password" + }, + { + "name": "branch", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch the password belongs to" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the password belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the password belongs to" + }, + { + "name": "read_only_region_id", + "type": "string", + "in": "query", + "description": "A read-only region of the database branch. If present, the password results will be filtered to only those in the region" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Gets the password", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the password" + }, + "name": { + "type": "string", + "description": "The display name for the password" + }, + "role": { + "type": "string", + "description": "The role for the password" + }, + "created_at": { + "type": "string", + "description": "When the password was created" + }, + "deleted_at": { + "type": "string", + "description": "When the password was deleted" + }, + "expires_at": { + "type": "string", + "description": "When the password will expire" + }, + "ttl_seconds": { + "type": "number", + "description": "Time to live (in seconds) for the password. The password will be invalid when TTL has passed" + }, + "access_host_url": { + "type": "string", + "description": "The host URL for the password" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "username": { + "type": "string", + "description": "The username for the password" + }, + "renewable": { + "type": "boolean", + "description": "Whether or not the password can be renewed" + }, + "database_branch": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name for the database branch" + }, + "id": { + "type": "string", + "description": "The ID for the database branch" + }, + "production": { + "type": "boolean", + "description": "Whether or not the database branch is a production database branch" + }, + "access_host_url": { + "type": "string", + "description": "The host URL for the password" + }, + "mysql_edge_address": { + "type": "string", + "description": "The address of the MySQL provider for the branch" + } + }, + "additionalProperties": false, + "required": [ + "name", + "id", + "production", + "access_host_url", + "mysql_edge_address" + ] + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "role", + "created_at", + "ttl_seconds", + "access_host_url", + "renewable", + "database_branch" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_branch`, `delete_branch`, `create_branch`, `connect_production_branch`, `connect_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `manage_passwords`, `manage_production_branch_passwords` |\n| Database | `manage_passwords`, `manage_production_branch_passwords` |\n| Branch | `manage_passwords` |" + }, + "patch": { + "tags": [ + "Database branch passwords" + ], + "consumes": [ + "application/json" + ], + "operationId": "update-a-password", + "summary": "Update a password", + "parameters": [ + { + "name": "id", + "type": "string", + "in": "path", + "required": true, + "description": "The ID of the password" + }, + { + "name": "branch", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch the password belongs to" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the password belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the password belongs to" + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name for the password" + } + }, + "additionalProperties": false, + "required": [ + "name" + ] + } + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Updates a password", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the password" + }, + "name": { + "type": "string", + "description": "The display name for the password" + }, + "role": { + "type": "string", + "description": "The role for the password" + }, + "created_at": { + "type": "string", + "description": "When the password was created" + }, + "deleted_at": { + "type": "string", + "description": "When the password was deleted" + }, + "expires_at": { + "type": "string", + "description": "When the password will expire" + }, + "ttl_seconds": { + "type": "number", + "description": "Time to live (in seconds) for the password. The password will be invalid when TTL has passed" + }, + "access_host_url": { + "type": "string", + "description": "The host URL for the password" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "username": { + "type": "string", + "description": "The username for the password" + }, + "renewable": { + "type": "boolean", + "description": "Whether or not the password can be renewed" + }, + "database_branch": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name for the database branch" + }, + "id": { + "type": "string", + "description": "The ID for the database branch" + }, + "production": { + "type": "boolean", + "description": "Whether or not the database branch is a production database branch" + }, + "access_host_url": { + "type": "string", + "description": "The host URL for the password" + }, + "mysql_edge_address": { + "type": "string", + "description": "The address of the MySQL provider for the branch" + } + }, + "additionalProperties": false, + "required": [ + "name", + "id", + "production", + "access_host_url", + "mysql_edge_address" + ] + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "role", + "created_at", + "ttl_seconds", + "access_host_url", + "renewable", + "database_branch" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `connect_production_branch`, `create_branch_password`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `manage_passwords`, `manage_production_branch_passwords` |\n| Database | `manage_passwords`, `manage_production_branch_passwords` |\n| Branch | `manage_passwords` |" + }, + "delete": { + "tags": [ + "Database branch passwords" + ], + "consumes": [ + "application/json" + ], + "operationId": "delete-a-password", + "summary": "Delete a password", + "parameters": [ + { + "name": "id", + "type": "string", + "in": "path", + "required": true, + "description": "The ID of the password" + }, + { + "name": "branch", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch the password belongs to" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the password belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the password belongs to" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "204": { + "description": "Deletes the password" + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `delete_production_branch_password`, `delete_branch_password`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `manage_passwords`, `manage_production_branch_passwords` |\n| Database | `manage_passwords`, `manage_production_branch_passwords` |\n| Branch | `manage_passwords` |" + } + }, + "/organizations/{organization}/databases/{database}/branches/{branch}/passwords/{id}/renew": { + "post": { + "tags": [ + "Database branch passwords" + ], + "consumes": [ + "application/json" + ], + "operationId": "renew-a-password", + "summary": "Renew a password", + "parameters": [ + { + "name": "id", + "type": "string", + "in": "path", + "required": true, + "description": "The ID of the password" + }, + { + "name": "branch", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch the password belongs to" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the password belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the password belongs to" + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "read_only_region_id": { + "type": "string", + "description": "A read-only region of the database branch. If present, the password results will be filtered to only those in the region" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Updates a password", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the password" + }, + "name": { + "type": "string", + "description": "The display name for the password" + }, + "role": { + "type": "string", + "description": "The role for the password" + }, + "created_at": { + "type": "string", + "description": "When the password was created" + }, + "deleted_at": { + "type": "string", + "description": "When the password was deleted" + }, + "expires_at": { + "type": "string", + "description": "When the password will expire" + }, + "ttl_seconds": { + "type": "number", + "description": "Time to live (in seconds) for the password. The password will be invalid when TTL has passed" + }, + "access_host_url": { + "type": "string", + "description": "The host URL for the password" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "username": { + "type": "string", + "description": "The username for the password" + }, + "renewable": { + "type": "boolean", + "description": "Whether or not the password can be renewed" + }, + "database_branch": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The name for the database branch" + }, + "id": { + "type": "string", + "description": "The ID for the database branch" + }, + "production": { + "type": "boolean", + "description": "Whether or not the database branch is a production database branch" + }, + "access_host_url": { + "type": "string", + "description": "The host URL for the password" + }, + "mysql_edge_address": { + "type": "string", + "description": "The address of the MySQL provider for the branch" + } + }, + "additionalProperties": false, + "required": [ + "name", + "id", + "production", + "access_host_url", + "mysql_edge_address" + ] + }, + "plain_text": { + "type": "string", + "description": "The plain text password" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "role", + "created_at", + "ttl_seconds", + "access_host_url", + "renewable", + "database_branch", + "plain_text" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `connect_production_branch`, `create_branch_password`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `manage_passwords`, `manage_production_branch_passwords` |\n| Database | `manage_passwords`, `manage_production_branch_passwords` |\n| Branch | `manage_passwords` |" + } + }, + "/organizations/{organization}/databases/{database}/branches/{name}": { + "get": { + "tags": [ + "Database branches" + ], + "consumes": [ + "application/json" + ], + "operationId": "get-a-branch", + "summary": "Get a branch", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns information about a branch", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the branch" + }, + "name": { + "type": "string", + "description": "The name of the branch" + }, + "created_at": { + "type": "string", + "description": "When the branch was created" + }, + "updated_at": { + "type": "string", + "description": "When the branch was last updated" + }, + "restore_checklist_completed_at": { + "type": "string", + "description": "When a user last marked a backup restore checklist as completed" + }, + "access_host_url": { + "type": "string", + "description": "The access host URL for the branch. This is a legacy field, use `mysql_edge_address`" + }, + "schema_last_updated_at": { + "type": "string", + "description": "When the schema for the branch was last updated" + }, + "mysql_address": { + "type": "string", + "description": "The MySQL address for the branch" + }, + "mysql_edge_address": { + "type": "string", + "description": "The address of the MySQL provider for the branch" + }, + "initial_restore_id": { + "type": "string", + "description": "The ID of the backup from which the branch was restored" + }, + "ready": { + "type": "boolean", + "description": "Whether or not the branch is ready to serve queries" + }, + "production": { + "type": "boolean", + "description": "Whether or not the branch is a production branch" + }, + "sharded": { + "type": "boolean", + "description": "Whether or not the branch is sharded" + }, + "shard_count": { + "type": "number", + "description": "The number of shards in the branch" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "restored_from_branch": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the resource" + }, + "name": { + "type": "string", + "description": "The name for the resource" + }, + "created_at": { + "type": "string", + "description": "When the resource was created" + }, + "updated_at": { + "type": "string", + "description": "When the resource was last updated" + }, + "deleted_at": { + "type": "string", + "description": "When the resource was deleted, if deleted" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "deleted_at" + ] + }, + "html_url": { + "type": "string", + "description": "Planetscale app URL for the branch" + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "parent_branch": { + "type": "string", + "description": "The name of the parent branch from which the branch was created" + }, + "cluster_rate_name": { + "type": "string", + "description": "The SKU representing the branch's cluster size" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "schema_last_updated_at", + "mysql_address", + "mysql_edge_address", + "ready", + "production", + "sharded", + "html_url", + "cluster_rate_name" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_branch`, `delete_branch`, `create_branch`, `connect_production_branch`, `connect_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_branches` |\n| Database | `read_branches` |\n| Branch | `read_branch` |" + }, + "delete": { + "tags": [ + "Database branches" + ], + "consumes": [ + "application/json" + ], + "operationId": "delete-a-branch", + "summary": "Delete a branch", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "204": { + "description": "Delete a branch" + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `delete_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `delete_branches`, `delete_production_branches` |\n| Database | `delete_branches`, `delete_production_branches` |\n| Branch | `delete_branch` |" + } + }, + "/organizations/{organization}/databases/{database}/branches/{name}/demote": { + "post": { + "tags": [ + "Database branches" + ], + "consumes": [ + "application/json" + ], + "operationId": "demote-a-branch", + "summary": "Demote a branch", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns a development branch", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the branch" + }, + "name": { + "type": "string", + "description": "The name of the branch" + }, + "created_at": { + "type": "string", + "description": "When the branch was created" + }, + "updated_at": { + "type": "string", + "description": "When the branch was last updated" + }, + "restore_checklist_completed_at": { + "type": "string", + "description": "When a user last marked a backup restore checklist as completed" + }, + "access_host_url": { + "type": "string", + "description": "The access host URL for the branch. This is a legacy field, use `mysql_edge_address`" + }, + "schema_last_updated_at": { + "type": "string", + "description": "When the schema for the branch was last updated" + }, + "mysql_address": { + "type": "string", + "description": "The MySQL address for the branch" + }, + "mysql_edge_address": { + "type": "string", + "description": "The address of the MySQL provider for the branch" + }, + "initial_restore_id": { + "type": "string", + "description": "The ID of the backup from which the branch was restored" + }, + "ready": { + "type": "boolean", + "description": "Whether or not the branch is ready to serve queries" + }, + "production": { + "type": "boolean", + "description": "Whether or not the branch is a production branch" + }, + "sharded": { + "type": "boolean", + "description": "Whether or not the branch is sharded" + }, + "shard_count": { + "type": "number", + "description": "The number of shards in the branch" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "restored_from_branch": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the resource" + }, + "name": { + "type": "string", + "description": "The name for the resource" + }, + "created_at": { + "type": "string", + "description": "When the resource was created" + }, + "updated_at": { + "type": "string", + "description": "When the resource was last updated" + }, + "deleted_at": { + "type": "string", + "description": "When the resource was deleted, if deleted" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "deleted_at" + ] + }, + "html_url": { + "type": "string", + "description": "Planetscale app URL for the branch" + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "parent_branch": { + "type": "string", + "description": "The name of the parent branch from which the branch was created" + }, + "cluster_rate_name": { + "type": "string", + "description": "The SKU representing the branch's cluster size" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "schema_last_updated_at", + "mysql_address", + "mysql_edge_address", + "ready", + "production", + "sharded", + "html_url", + "cluster_rate_name" + ] + } + } + }, + "description": "Demotes a branch from production to development\n### Authorization\nA OAuth token must have at least one of the following scopes in order to use this API endpoint:\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `demote_branches` |\n| Database | `demote_branches` |" + } + }, + "/organizations/{organization}/databases/{database}/branches/{name}/promote": { + "post": { + "tags": [ + "Database branches" + ], + "consumes": [ + "application/json" + ], + "operationId": "promote-a-branch", + "summary": "Promote a branch", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns a production branch", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the branch" + }, + "name": { + "type": "string", + "description": "The name of the branch" + }, + "created_at": { + "type": "string", + "description": "When the branch was created" + }, + "updated_at": { + "type": "string", + "description": "When the branch was last updated" + }, + "restore_checklist_completed_at": { + "type": "string", + "description": "When a user last marked a backup restore checklist as completed" + }, + "access_host_url": { + "type": "string", + "description": "The access host URL for the branch. This is a legacy field, use `mysql_edge_address`" + }, + "schema_last_updated_at": { + "type": "string", + "description": "When the schema for the branch was last updated" + }, + "mysql_address": { + "type": "string", + "description": "The MySQL address for the branch" + }, + "mysql_edge_address": { + "type": "string", + "description": "The address of the MySQL provider for the branch" + }, + "initial_restore_id": { + "type": "string", + "description": "The ID of the backup from which the branch was restored" + }, + "ready": { + "type": "boolean", + "description": "Whether or not the branch is ready to serve queries" + }, + "production": { + "type": "boolean", + "description": "Whether or not the branch is a production branch" + }, + "sharded": { + "type": "boolean", + "description": "Whether or not the branch is sharded" + }, + "shard_count": { + "type": "number", + "description": "The number of shards in the branch" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "restored_from_branch": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the resource" + }, + "name": { + "type": "string", + "description": "The name for the resource" + }, + "created_at": { + "type": "string", + "description": "When the resource was created" + }, + "updated_at": { + "type": "string", + "description": "When the resource was last updated" + }, + "deleted_at": { + "type": "string", + "description": "When the resource was deleted, if deleted" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "deleted_at" + ] + }, + "html_url": { + "type": "string", + "description": "Planetscale app URL for the branch" + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "parent_branch": { + "type": "string", + "description": "The name of the parent branch from which the branch was created" + }, + "cluster_rate_name": { + "type": "string", + "description": "The SKU representing the branch's cluster size" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "schema_last_updated_at", + "mysql_address", + "mysql_edge_address", + "ready", + "production", + "sharded", + "html_url", + "cluster_rate_name" + ] + } + } + }, + "description": "Promotes a branch from development to production\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `connect_production_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `promote_branches` |\n| Database | `promote_branches` |" + } + }, + "/organizations/{organization}/databases/{database}/branches/{name}/safe-migrations": { + "post": { + "tags": [ + "Database branches" + ], + "consumes": [ + "application/json" + ], + "operationId": "enable-safe-migrations-for-a-branch", + "summary": "Enable safe migrations for a branch", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns the branch with safe migrations enabled", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the branch" + }, + "name": { + "type": "string", + "description": "The name of the branch" + }, + "created_at": { + "type": "string", + "description": "When the branch was created" + }, + "updated_at": { + "type": "string", + "description": "When the branch was last updated" + }, + "restore_checklist_completed_at": { + "type": "string", + "description": "When a user last marked a backup restore checklist as completed" + }, + "access_host_url": { + "type": "string", + "description": "The access host URL for the branch. This is a legacy field, use `mysql_edge_address`" + }, + "schema_last_updated_at": { + "type": "string", + "description": "When the schema for the branch was last updated" + }, + "mysql_address": { + "type": "string", + "description": "The MySQL address for the branch" + }, + "mysql_edge_address": { + "type": "string", + "description": "The address of the MySQL provider for the branch" + }, + "initial_restore_id": { + "type": "string", + "description": "The ID of the backup from which the branch was restored" + }, + "ready": { + "type": "boolean", + "description": "Whether or not the branch is ready to serve queries" + }, + "production": { + "type": "boolean", + "description": "Whether or not the branch is a production branch" + }, + "sharded": { + "type": "boolean", + "description": "Whether or not the branch is sharded" + }, + "shard_count": { + "type": "number", + "description": "The number of shards in the branch" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "restored_from_branch": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the resource" + }, + "name": { + "type": "string", + "description": "The name for the resource" + }, + "created_at": { + "type": "string", + "description": "When the resource was created" + }, + "updated_at": { + "type": "string", + "description": "When the resource was last updated" + }, + "deleted_at": { + "type": "string", + "description": "When the resource was deleted, if deleted" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "deleted_at" + ] + }, + "html_url": { + "type": "string", + "description": "Planetscale app URL for the branch" + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "parent_branch": { + "type": "string", + "description": "The name of the parent branch from which the branch was created" + }, + "cluster_rate_name": { + "type": "string", + "description": "The SKU representing the branch's cluster size" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "schema_last_updated_at", + "mysql_address", + "mysql_edge_address", + "ready", + "production", + "sharded", + "html_url", + "cluster_rate_name" + ] + } + } + }, + "description": "\n" + }, + "delete": { + "tags": [ + "Database branches" + ], + "consumes": [ + "application/json" + ], + "operationId": "disable-safe-migrations-for-a-branch", + "summary": "Disable safe migrations for a branch", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns the branch with safe migrations disabled", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the branch" + }, + "name": { + "type": "string", + "description": "The name of the branch" + }, + "created_at": { + "type": "string", + "description": "When the branch was created" + }, + "updated_at": { + "type": "string", + "description": "When the branch was last updated" + }, + "restore_checklist_completed_at": { + "type": "string", + "description": "When a user last marked a backup restore checklist as completed" + }, + "access_host_url": { + "type": "string", + "description": "The access host URL for the branch. This is a legacy field, use `mysql_edge_address`" + }, + "schema_last_updated_at": { + "type": "string", + "description": "When the schema for the branch was last updated" + }, + "mysql_address": { + "type": "string", + "description": "The MySQL address for the branch" + }, + "mysql_edge_address": { + "type": "string", + "description": "The address of the MySQL provider for the branch" + }, + "initial_restore_id": { + "type": "string", + "description": "The ID of the backup from which the branch was restored" + }, + "ready": { + "type": "boolean", + "description": "Whether or not the branch is ready to serve queries" + }, + "production": { + "type": "boolean", + "description": "Whether or not the branch is a production branch" + }, + "sharded": { + "type": "boolean", + "description": "Whether or not the branch is sharded" + }, + "shard_count": { + "type": "number", + "description": "The number of shards in the branch" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "restored_from_branch": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the resource" + }, + "name": { + "type": "string", + "description": "The name for the resource" + }, + "created_at": { + "type": "string", + "description": "When the resource was created" + }, + "updated_at": { + "type": "string", + "description": "When the resource was last updated" + }, + "deleted_at": { + "type": "string", + "description": "When the resource was deleted, if deleted" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "deleted_at" + ] + }, + "html_url": { + "type": "string", + "description": "Planetscale app URL for the branch" + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "parent_branch": { + "type": "string", + "description": "The name of the parent branch from which the branch was created" + }, + "cluster_rate_name": { + "type": "string", + "description": "The SKU representing the branch's cluster size" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "created_at", + "updated_at", + "schema_last_updated_at", + "mysql_address", + "mysql_edge_address", + "ready", + "production", + "sharded", + "html_url", + "cluster_rate_name" + ] + } + } + }, + "description": "\n" + } + }, + "/organizations/{organization}/databases/{database}/branches/{name}/schema": { + "get": { + "tags": [ + "Database branches" + ], + "consumes": [ + "application/json" + ], + "operationId": "get-a-branch-schema", + "summary": "Get a branch schema", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + }, + { + "name": "keyspace", + "type": "string", + "in": "query", + "description": "If provided, the schema for this keyspace is returned" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Gets the schema for the branch", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the table" + }, + "html": { + "type": "string", + "description": "Syntax highlighted HTML for the table's schema" + }, + "raw": { + "type": "string", + "description": "The table's schema" + } + }, + "additionalProperties": false, + "required": [ + "name", + "html", + "raw" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_branch`, `delete_branch`, `create_branch`, `connect_production_branch`, `connect_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_branches` |\n| Database | `read_branches` |\n| Branch | `read_branch` |" + } + }, + "/organizations/{organization}/databases/{database}/branches/{name}/schema/lint": { + "get": { + "tags": [ + "Database branches" + ], + "consumes": [ + "application/json" + ], + "operationId": "lint-a-branch-schema", + "summary": "Lint a branch schema", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the branch" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the branch belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the branch belongs to" + }, + { + "name": "page", + "type": "number", + "in": "query", + "default": 1, + "description": "If provided, specifies the page offset of returned results" + }, + { + "name": "per_page", + "type": "number", + "in": "query", + "default": 25, + "description": "If provided, specifies the number of returned results" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns a list of schema errors for a branch", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "lint_error": { + "type": "string", + "description": "Code representing" + }, + "subject_type": { + "type": "string", + "enum": [ + "table", + "vschema", + "routing_rules" + ], + "description": "The subject for the errors" + }, + "keyspace_name": { + "type": "string", + "description": "The keyspace of the schema with the error" + }, + "table_name": { + "type": "string", + "description": "The table with the error" + }, + "error_description": { + "type": "string", + "description": "A description for the error that occurred" + }, + "docs_url": { + "type": "string", + "description": "A link to the documentation related to the error" + }, + "column_name": { + "type": "string", + "description": "The column in a table relevant to the error" + }, + "foreign_key_column_names": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A list of invalid foreign key columns in a table" + }, + "auto_increment_column_names": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A list of invalid auto-incremented columns" + }, + "charset_name": { + "type": "string", + "description": "The charset of the schema" + }, + "engine_name": { + "type": "string", + "description": "The engine of the schema" + }, + "vindex_name": { + "type": "string", + "description": "The name of the vindex for the schema" + }, + "json_path": { + "type": "string", + "description": "The path for an invalid JSON column" + }, + "check_constraint_name": { + "type": "string", + "description": "The name of the invalid check constraint" + }, + "enum_value": { + "type": "string", + "description": "The name of the invalid enum value" + }, + "partitioning_type": { + "type": "string", + "description": "The name of the invalid partitioning type" + }, + "partition_name": { + "type": "string", + "description": "The name of the invalid partition in the schema" + } + }, + "additionalProperties": false, + "required": [ + "lint_error", + "subject_type", + "keyspace_name", + "table_name", + "error_description", + "docs_url", + "column_name", + "foreign_key_column_names", + "auto_increment_column_names", + "charset_name", + "engine_name", + "vindex_name", + "json_path", + "check_constraint_name", + "enum_value", + "partitioning_type", + "partition_name" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_branch`, `delete_branch`, `create_branch`, `connect_production_branch`, `connect_branch`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_branches` |\n| Database | `read_branches` |\n| Branch | `read_branch` |" + } + }, + "/organizations/{organization}/databases/{database}/deploy-queue": { + "get": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "get-the-deploy-queue", + "summary": "Get the deploy queue", + "parameters": [ + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's organization" + } + ], + "responses": { + "200": { + "description": "Returns the deploy queue for a database", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for a deployment" + }, + "auto_cutover": { + "type": "boolean", + "description": "Whether or not to automatically cutover once deployment is finished" + }, + "created_at": { + "type": "string", + "description": "When the deployment was created" + }, + "cutover_at": { + "type": "string", + "description": "When the cutover for the deployment was initiated" + }, + "cutover_expiring": { + "type": "boolean", + "description": "Whether or not the deployment cutover will expire soon and be cancelled" + }, + "deploy_check_errors": { + "type": "string", + "description": "Deploy check errors for the deployment" + }, + "finished_at": { + "type": "string", + "description": "When the deployment was finished" + }, + "queued_at": { + "type": "string", + "description": "When the deployment was queued" + }, + "ready_to_cutover_at": { + "type": "string", + "description": "When the deployment was ready for cutover" + }, + "started_at": { + "type": "string", + "description": "When the deployment was started" + }, + "state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The state the deployment is in" + }, + "submitted_at": { + "type": "string", + "description": "When the deployment was submitted" + }, + "updated_at": { + "type": "string", + "description": "When the deployment was last updated" + } + }, + "additionalProperties": false, + "required": [ + "id", + "auto_cutover", + "created_at", + "cutover_expiring", + "state", + "submitted_at", + "updated_at" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "The deploy queue returns the current list of deploy requests in the order they will be deployed.\n" + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests": { + "get": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "list-deploy-requests", + "summary": "List deploy requests", + "parameters": [ + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's organization" + }, + { + "name": "page", + "type": "number", + "in": "query", + "default": 1, + "description": "If provided, specifies the page offset of returned results" + }, + { + "name": "per_page", + "type": "number", + "in": "query", + "default": 25, + "description": "If provided, specifies the number of returned results" + }, + { + "name": "state", + "type": "string", + "in": "query", + "description": "Filter by state of the deploy request (open, closed, deployed)" + }, + { + "name": "branch", + "type": "string", + "in": "query", + "description": "Filter by the name of the branch the deploy request is created from" + }, + { + "name": "into_branch", + "type": "string", + "in": "query", + "description": "Filter by the name of the branch the deploy request will be merged into" + } + ], + "responses": { + "200": { + "description": "Returns a list of deploy requests for a database", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "number": { + "type": "number", + "description": "The number of the deploy request" + }, + "id": { + "type": "string", + "description": "The ID of the deploy request" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "closed_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch": { + "type": "string", + "description": "The name of the branch the deploy request was created from" + }, + "branch_deleted": { + "type": "boolean", + "description": "Whether or not the deploy request branch was deleted" + }, + "branch_deleted_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch_deleted_at": { + "type": "string", + "description": "When the deploy request branch was deleted" + }, + "into_branch": { + "type": "string", + "description": "The name of the branch the deploy request will be merged into" + }, + "into_branch_sharded": { + "type": "boolean", + "description": "Whether or not the branch the deploy request will be merged into is sharded" + }, + "into_branch_shard_count": { + "type": "number", + "description": "The number of shards the branch the deploy request will be merged into has" + }, + "approved": { + "type": "boolean", + "description": "Whether or not the deploy request is approved" + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed" + ], + "description": "Whether the deploy request is open or closed" + }, + "deployment_state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The deployment state of the deploy request" + }, + "html_url": { + "type": "string", + "description": "The PlanetScale app address for the deploy request" + }, + "notes": { + "type": "string", + "description": "Notes on the deploy request" + }, + "html_body": { + "type": "string", + "description": "The HTML body of the deploy request" + }, + "created_at": { + "type": "string", + "description": "When the deploy request was created" + }, + "updated_at": { + "type": "string", + "description": "When the deploy request was last updated" + }, + "closed_at": { + "type": "string", + "description": "When the deploy request was closed" + }, + "deployed_at": { + "type": "string", + "description": "When the deploy request was deployed" + } + }, + "additionalProperties": false, + "required": [ + "number", + "id", + "actor", + "closed_by", + "branch", + "branch_deleted", + "branch_deleted_by", + "branch_deleted_at", + "into_branch", + "into_branch_sharded", + "into_branch_shard_count", + "approved", + "state", + "deployment_state", + "html_url", + "notes", + "html_body", + "created_at", + "updated_at", + "closed_at", + "deployed_at" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "List deploy requests for a database\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_deploy_requests` |\n| Database | `read_deploy_requests` |" + }, + "post": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "create-a-deploy-request", + "summary": "Create a deploy request", + "parameters": [ + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's organization" + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "branch": { + "type": "string", + "description": "The name of the branch the deploy request is created from" + }, + "into_branch": { + "type": "string", + "description": "The name of the branch the deploy request will be merged into" + }, + "notes": { + "type": "string", + "description": "Notes about the deploy request" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "201": { + "description": "Returns the created deploy request", + "schema": { + "type": "object", + "properties": { + "number": { + "type": "number", + "description": "The number of the deploy request" + }, + "id": { + "type": "string", + "description": "The ID of the deploy request" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "closed_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch": { + "type": "string", + "description": "The name of the branch the deploy request was created from" + }, + "branch_deleted": { + "type": "boolean", + "description": "Whether or not the deploy request branch was deleted" + }, + "branch_deleted_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch_deleted_at": { + "type": "string", + "description": "When the deploy request branch was deleted" + }, + "into_branch": { + "type": "string", + "description": "The name of the branch the deploy request will be merged into" + }, + "into_branch_sharded": { + "type": "boolean", + "description": "Whether or not the branch the deploy request will be merged into is sharded" + }, + "into_branch_shard_count": { + "type": "number", + "description": "The number of shards the branch the deploy request will be merged into has" + }, + "approved": { + "type": "boolean", + "description": "Whether or not the deploy request is approved" + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed" + ], + "description": "Whether the deploy request is open or closed" + }, + "deployment_state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The deployment state of the deploy request" + }, + "html_url": { + "type": "string", + "description": "The PlanetScale app address for the deploy request" + }, + "notes": { + "type": "string", + "description": "Notes on the deploy request" + }, + "html_body": { + "type": "string", + "description": "The HTML body of the deploy request" + }, + "created_at": { + "type": "string", + "description": "When the deploy request was created" + }, + "updated_at": { + "type": "string", + "description": "When the deploy request was last updated" + }, + "closed_at": { + "type": "string", + "description": "When the deploy request was closed" + }, + "deployed_at": { + "type": "string", + "description": "When the deploy request was deployed" + }, + "deployment": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for a deployment" + }, + "auto_cutover": { + "type": "boolean", + "description": "Whether or not to automatically cutover once deployment is finished" + }, + "created_at": { + "type": "string", + "description": "When the deployment was created" + }, + "cutover_at": { + "type": "string", + "description": "When the cutover for the deployment was initiated" + }, + "cutover_expiring": { + "type": "boolean", + "description": "Whether or not the deployment cutover will expire soon and be cancelled" + }, + "deploy_check_errors": { + "type": "string", + "description": "Deploy check errors for the deployment" + }, + "finished_at": { + "type": "string", + "description": "When the deployment was finished" + }, + "queued_at": { + "type": "string", + "description": "When the deployment was queued" + }, + "ready_to_cutover_at": { + "type": "string", + "description": "When the deployment was ready for cutover" + }, + "started_at": { + "type": "string", + "description": "When the deployment was started" + }, + "state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The state the deployment is in" + }, + "submitted_at": { + "type": "string", + "description": "When the deployment was submitted" + }, + "updated_at": { + "type": "string", + "description": "When the deployment was last updated" + } + }, + "additionalProperties": false, + "required": [ + "id", + "auto_cutover", + "created_at", + "cutover_expiring", + "state", + "submitted_at", + "updated_at" + ] + } + }, + "additionalProperties": false, + "required": [ + "number", + "id", + "actor", + "closed_by", + "branch", + "branch_deleted", + "branch_deleted_by", + "branch_deleted_at", + "into_branch", + "into_branch_sharded", + "into_branch_shard_count", + "approved", + "state", + "deployment_state", + "html_url", + "notes", + "html_body", + "created_at", + "updated_at", + "closed_at", + "deployed_at", + "deployment" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_requests`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `write_deploy_requests` |\n| Database | `write_deploy_requests` |" + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}": { + "get": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "get-a-deploy-request", + "summary": "Get a deploy request", + "parameters": [ + { + "name": "number", + "type": "string", + "in": "path", + "required": true, + "description": "The number of the deploy request" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's organization" + } + ], + "responses": { + "200": { + "description": "Returns information about a deploy request", + "schema": { + "type": "object", + "properties": { + "number": { + "type": "number", + "description": "The number of the deploy request" + }, + "id": { + "type": "string", + "description": "The ID of the deploy request" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "closed_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch": { + "type": "string", + "description": "The name of the branch the deploy request was created from" + }, + "branch_deleted": { + "type": "boolean", + "description": "Whether or not the deploy request branch was deleted" + }, + "branch_deleted_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch_deleted_at": { + "type": "string", + "description": "When the deploy request branch was deleted" + }, + "into_branch": { + "type": "string", + "description": "The name of the branch the deploy request will be merged into" + }, + "into_branch_sharded": { + "type": "boolean", + "description": "Whether or not the branch the deploy request will be merged into is sharded" + }, + "into_branch_shard_count": { + "type": "number", + "description": "The number of shards the branch the deploy request will be merged into has" + }, + "approved": { + "type": "boolean", + "description": "Whether or not the deploy request is approved" + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed" + ], + "description": "Whether the deploy request is open or closed" + }, + "deployment_state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The deployment state of the deploy request" + }, + "html_url": { + "type": "string", + "description": "The PlanetScale app address for the deploy request" + }, + "notes": { + "type": "string", + "description": "Notes on the deploy request" + }, + "html_body": { + "type": "string", + "description": "The HTML body of the deploy request" + }, + "created_at": { + "type": "string", + "description": "When the deploy request was created" + }, + "updated_at": { + "type": "string", + "description": "When the deploy request was last updated" + }, + "closed_at": { + "type": "string", + "description": "When the deploy request was closed" + }, + "deployed_at": { + "type": "string", + "description": "When the deploy request was deployed" + }, + "deployment": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for a deployment" + }, + "auto_cutover": { + "type": "boolean", + "description": "Whether or not to automatically cutover once deployment is finished" + }, + "created_at": { + "type": "string", + "description": "When the deployment was created" + }, + "cutover_at": { + "type": "string", + "description": "When the cutover for the deployment was initiated" + }, + "cutover_expiring": { + "type": "boolean", + "description": "Whether or not the deployment cutover will expire soon and be cancelled" + }, + "deploy_check_errors": { + "type": "string", + "description": "Deploy check errors for the deployment" + }, + "finished_at": { + "type": "string", + "description": "When the deployment was finished" + }, + "queued_at": { + "type": "string", + "description": "When the deployment was queued" + }, + "ready_to_cutover_at": { + "type": "string", + "description": "When the deployment was ready for cutover" + }, + "started_at": { + "type": "string", + "description": "When the deployment was started" + }, + "state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The state the deployment is in" + }, + "submitted_at": { + "type": "string", + "description": "When the deployment was submitted" + }, + "updated_at": { + "type": "string", + "description": "When the deployment was last updated" + } + }, + "additionalProperties": false, + "required": [ + "id", + "auto_cutover", + "created_at", + "cutover_expiring", + "state", + "submitted_at", + "updated_at" + ] + } + }, + "additionalProperties": false, + "required": [ + "number", + "id", + "actor", + "closed_by", + "branch", + "branch_deleted", + "branch_deleted_by", + "branch_deleted_at", + "into_branch", + "into_branch_sharded", + "into_branch_shard_count", + "approved", + "state", + "deployment_state", + "html_url", + "notes", + "html_body", + "created_at", + "updated_at", + "closed_at", + "deployed_at", + "deployment" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_deploy_requests` |\n| Database | `read_deploy_requests` |" + }, + "patch": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "close-a-deploy-request", + "summary": "Close a deploy request", + "parameters": [ + { + "name": "number", + "type": "string", + "in": "path", + "required": true, + "description": "The number of the deploy request" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's organization" + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "closed" + ], + "description": "The deploy request will be updated to this state" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "200": { + "description": "Returns the updated deploy request", + "schema": { + "type": "object", + "properties": { + "number": { + "type": "number", + "description": "The number of the deploy request" + }, + "id": { + "type": "string", + "description": "The ID of the deploy request" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "closed_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch": { + "type": "string", + "description": "The name of the branch the deploy request was created from" + }, + "branch_deleted": { + "type": "boolean", + "description": "Whether or not the deploy request branch was deleted" + }, + "branch_deleted_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch_deleted_at": { + "type": "string", + "description": "When the deploy request branch was deleted" + }, + "into_branch": { + "type": "string", + "description": "The name of the branch the deploy request will be merged into" + }, + "into_branch_sharded": { + "type": "boolean", + "description": "Whether or not the branch the deploy request will be merged into is sharded" + }, + "into_branch_shard_count": { + "type": "number", + "description": "The number of shards the branch the deploy request will be merged into has" + }, + "approved": { + "type": "boolean", + "description": "Whether or not the deploy request is approved" + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed" + ], + "description": "Whether the deploy request is open or closed" + }, + "deployment_state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The deployment state of the deploy request" + }, + "html_url": { + "type": "string", + "description": "The PlanetScale app address for the deploy request" + }, + "notes": { + "type": "string", + "description": "Notes on the deploy request" + }, + "html_body": { + "type": "string", + "description": "The HTML body of the deploy request" + }, + "created_at": { + "type": "string", + "description": "When the deploy request was created" + }, + "updated_at": { + "type": "string", + "description": "When the deploy request was last updated" + }, + "closed_at": { + "type": "string", + "description": "When the deploy request was closed" + }, + "deployed_at": { + "type": "string", + "description": "When the deploy request was deployed" + }, + "deployment": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for a deployment" + }, + "auto_cutover": { + "type": "boolean", + "description": "Whether or not to automatically cutover once deployment is finished" + }, + "created_at": { + "type": "string", + "description": "When the deployment was created" + }, + "cutover_at": { + "type": "string", + "description": "When the cutover for the deployment was initiated" + }, + "cutover_expiring": { + "type": "boolean", + "description": "Whether or not the deployment cutover will expire soon and be cancelled" + }, + "deploy_check_errors": { + "type": "string", + "description": "Deploy check errors for the deployment" + }, + "finished_at": { + "type": "string", + "description": "When the deployment was finished" + }, + "queued_at": { + "type": "string", + "description": "When the deployment was queued" + }, + "ready_to_cutover_at": { + "type": "string", + "description": "When the deployment was ready for cutover" + }, + "started_at": { + "type": "string", + "description": "When the deployment was started" + }, + "state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The state the deployment is in" + }, + "submitted_at": { + "type": "string", + "description": "When the deployment was submitted" + }, + "updated_at": { + "type": "string", + "description": "When the deployment was last updated" + } + }, + "additionalProperties": false, + "required": [ + "id", + "auto_cutover", + "created_at", + "cutover_expiring", + "state", + "submitted_at", + "updated_at" + ] + } + }, + "additionalProperties": false, + "required": [ + "number", + "id", + "actor", + "closed_by", + "branch", + "branch_deleted", + "branch_deleted_by", + "branch_deleted_at", + "into_branch", + "into_branch_sharded", + "into_branch_shard_count", + "approved", + "state", + "deployment_state", + "html_url", + "notes", + "html_body", + "created_at", + "updated_at", + "closed_at", + "deployed_at", + "deployment" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `write_deploy_requests` |\n| Database | `write_deploy_requests` |" + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/apply-deploy": { + "post": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "complete-a-gated-deploy-request", + "summary": "Complete a gated deploy request", + "parameters": [ + { + "name": "number", + "type": "string", + "in": "path", + "required": true, + "description": "The number of the deploy request" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's organization" + } + ], + "responses": { + "200": { + "description": "Returns the deploy request whose deployment has been completed", + "schema": { + "type": "object", + "properties": { + "number": { + "type": "number", + "description": "The number of the deploy request" + }, + "id": { + "type": "string", + "description": "The ID of the deploy request" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "closed_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch": { + "type": "string", + "description": "The name of the branch the deploy request was created from" + }, + "branch_deleted": { + "type": "boolean", + "description": "Whether or not the deploy request branch was deleted" + }, + "branch_deleted_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch_deleted_at": { + "type": "string", + "description": "When the deploy request branch was deleted" + }, + "into_branch": { + "type": "string", + "description": "The name of the branch the deploy request will be merged into" + }, + "into_branch_sharded": { + "type": "boolean", + "description": "Whether or not the branch the deploy request will be merged into is sharded" + }, + "into_branch_shard_count": { + "type": "number", + "description": "The number of shards the branch the deploy request will be merged into has" + }, + "approved": { + "type": "boolean", + "description": "Whether or not the deploy request is approved" + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed" + ], + "description": "Whether the deploy request is open or closed" + }, + "deployment_state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The deployment state of the deploy request" + }, + "html_url": { + "type": "string", + "description": "The PlanetScale app address for the deploy request" + }, + "notes": { + "type": "string", + "description": "Notes on the deploy request" + }, + "html_body": { + "type": "string", + "description": "The HTML body of the deploy request" + }, + "created_at": { + "type": "string", + "description": "When the deploy request was created" + }, + "updated_at": { + "type": "string", + "description": "When the deploy request was last updated" + }, + "closed_at": { + "type": "string", + "description": "When the deploy request was closed" + }, + "deployed_at": { + "type": "string", + "description": "When the deploy request was deployed" + } + }, + "additionalProperties": false, + "required": [ + "number", + "id", + "actor", + "closed_by", + "branch", + "branch_deleted", + "branch_deleted_by", + "branch_deleted_at", + "into_branch", + "into_branch_sharded", + "into_branch_shard_count", + "approved", + "state", + "deployment_state", + "html_url", + "notes", + "html_body", + "created_at", + "updated_at", + "closed_at", + "deployed_at" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |" + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/auto-apply": { + "put": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "update-auto-apply-for-deploy-request", + "summary": "Update auto-apply for deploy request", + "parameters": [ + { + "name": "number", + "type": "string", + "in": "path", + "required": true, + "description": "The number of the deploy request" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's organization" + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "enable": { + "type": "boolean", + "description": "Whether or not to enable auto-apply for the deploy request" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "200": { + "description": "Returns the deploy request whose auto-apply setting was updated", + "schema": { + "type": "object", + "properties": { + "number": { + "type": "number", + "description": "The number of the deploy request" + }, + "id": { + "type": "string", + "description": "The ID of the deploy request" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "closed_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch": { + "type": "string", + "description": "The name of the branch the deploy request was created from" + }, + "branch_deleted": { + "type": "boolean", + "description": "Whether or not the deploy request branch was deleted" + }, + "branch_deleted_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch_deleted_at": { + "type": "string", + "description": "When the deploy request branch was deleted" + }, + "into_branch": { + "type": "string", + "description": "The name of the branch the deploy request will be merged into" + }, + "into_branch_sharded": { + "type": "boolean", + "description": "Whether or not the branch the deploy request will be merged into is sharded" + }, + "into_branch_shard_count": { + "type": "number", + "description": "The number of shards the branch the deploy request will be merged into has" + }, + "approved": { + "type": "boolean", + "description": "Whether or not the deploy request is approved" + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed" + ], + "description": "Whether the deploy request is open or closed" + }, + "deployment_state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The deployment state of the deploy request" + }, + "html_url": { + "type": "string", + "description": "The PlanetScale app address for the deploy request" + }, + "notes": { + "type": "string", + "description": "Notes on the deploy request" + }, + "html_body": { + "type": "string", + "description": "The HTML body of the deploy request" + }, + "created_at": { + "type": "string", + "description": "When the deploy request was created" + }, + "updated_at": { + "type": "string", + "description": "When the deploy request was last updated" + }, + "closed_at": { + "type": "string", + "description": "When the deploy request was closed" + }, + "deployed_at": { + "type": "string", + "description": "When the deploy request was deployed" + } + }, + "additionalProperties": false, + "required": [ + "number", + "id", + "actor", + "closed_by", + "branch", + "branch_deleted", + "branch_deleted_by", + "branch_deleted_at", + "into_branch", + "into_branch_sharded", + "into_branch_shard_count", + "approved", + "state", + "deployment_state", + "html_url", + "notes", + "html_body", + "created_at", + "updated_at", + "closed_at", + "deployed_at" + ] + } + } + }, + "description": "Enables or disabled the auto-apply setting for a deploy request\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |" + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/cancel": { + "post": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "cancel-a-queued-deploy-request", + "summary": "Cancel a queued deploy request", + "parameters": [ + { + "name": "number", + "type": "string", + "in": "path", + "required": true, + "description": "The number of the deploy request" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's organization" + } + ], + "responses": { + "200": { + "description": "Returns the deploy request whose deployment was canceled", + "schema": { + "type": "object", + "properties": { + "number": { + "type": "number", + "description": "The number of the deploy request" + }, + "id": { + "type": "string", + "description": "The ID of the deploy request" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "closed_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch": { + "type": "string", + "description": "The name of the branch the deploy request was created from" + }, + "branch_deleted": { + "type": "boolean", + "description": "Whether or not the deploy request branch was deleted" + }, + "branch_deleted_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch_deleted_at": { + "type": "string", + "description": "When the deploy request branch was deleted" + }, + "into_branch": { + "type": "string", + "description": "The name of the branch the deploy request will be merged into" + }, + "into_branch_sharded": { + "type": "boolean", + "description": "Whether or not the branch the deploy request will be merged into is sharded" + }, + "into_branch_shard_count": { + "type": "number", + "description": "The number of shards the branch the deploy request will be merged into has" + }, + "approved": { + "type": "boolean", + "description": "Whether or not the deploy request is approved" + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed" + ], + "description": "Whether the deploy request is open or closed" + }, + "deployment_state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The deployment state of the deploy request" + }, + "html_url": { + "type": "string", + "description": "The PlanetScale app address for the deploy request" + }, + "notes": { + "type": "string", + "description": "Notes on the deploy request" + }, + "html_body": { + "type": "string", + "description": "The HTML body of the deploy request" + }, + "created_at": { + "type": "string", + "description": "When the deploy request was created" + }, + "updated_at": { + "type": "string", + "description": "When the deploy request was last updated" + }, + "closed_at": { + "type": "string", + "description": "When the deploy request was closed" + }, + "deployed_at": { + "type": "string", + "description": "When the deploy request was deployed" + } + }, + "additionalProperties": false, + "required": [ + "number", + "id", + "actor", + "closed_by", + "branch", + "branch_deleted", + "branch_deleted_by", + "branch_deleted_at", + "into_branch", + "into_branch_sharded", + "into_branch_shard_count", + "approved", + "state", + "deployment_state", + "html_url", + "notes", + "html_body", + "created_at", + "updated_at", + "closed_at", + "deployed_at" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |" + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/complete-deploy": { + "post": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "complete-an-errored-deploy", + "summary": "Complete an errored deploy", + "parameters": [ + { + "name": "number", + "type": "string", + "in": "path", + "required": true, + "description": "The number of the deploy request" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's organization" + } + ], + "responses": { + "200": { + "description": "Returns the completed deploy request", + "schema": { + "type": "object", + "properties": { + "number": { + "type": "number", + "description": "The number of the deploy request" + }, + "id": { + "type": "string", + "description": "The ID of the deploy request" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "closed_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch": { + "type": "string", + "description": "The name of the branch the deploy request was created from" + }, + "branch_deleted": { + "type": "boolean", + "description": "Whether or not the deploy request branch was deleted" + }, + "branch_deleted_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch_deleted_at": { + "type": "string", + "description": "When the deploy request branch was deleted" + }, + "into_branch": { + "type": "string", + "description": "The name of the branch the deploy request will be merged into" + }, + "into_branch_sharded": { + "type": "boolean", + "description": "Whether or not the branch the deploy request will be merged into is sharded" + }, + "into_branch_shard_count": { + "type": "number", + "description": "The number of shards the branch the deploy request will be merged into has" + }, + "approved": { + "type": "boolean", + "description": "Whether or not the deploy request is approved" + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed" + ], + "description": "Whether the deploy request is open or closed" + }, + "deployment_state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The deployment state of the deploy request" + }, + "html_url": { + "type": "string", + "description": "The PlanetScale app address for the deploy request" + }, + "notes": { + "type": "string", + "description": "Notes on the deploy request" + }, + "html_body": { + "type": "string", + "description": "The HTML body of the deploy request" + }, + "created_at": { + "type": "string", + "description": "When the deploy request was created" + }, + "updated_at": { + "type": "string", + "description": "When the deploy request was last updated" + }, + "closed_at": { + "type": "string", + "description": "When the deploy request was closed" + }, + "deployed_at": { + "type": "string", + "description": "When the deploy request was deployed" + } + }, + "additionalProperties": false, + "required": [ + "number", + "id", + "actor", + "closed_by", + "branch", + "branch_deleted", + "branch_deleted_by", + "branch_deleted_at", + "into_branch", + "into_branch_sharded", + "into_branch_shard_count", + "approved", + "state", + "deployment_state", + "html_url", + "notes", + "html_body", + "created_at", + "updated_at", + "closed_at", + "deployed_at" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |" + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/deploy": { + "post": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "queue-a-deploy-request", + "summary": "Queue a deploy request", + "parameters": [ + { + "name": "number", + "type": "string", + "in": "path", + "required": true, + "description": "The number of the deploy request" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the deploy request belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the deploy request belongs to" + } + ], + "responses": { + "200": { + "description": "Returns the deployed deploy request", + "schema": { + "type": "object", + "properties": { + "number": { + "type": "number", + "description": "The number of the deploy request" + }, + "id": { + "type": "string", + "description": "The ID of the deploy request" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "closed_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch": { + "type": "string", + "description": "The name of the branch the deploy request was created from" + }, + "branch_deleted": { + "type": "boolean", + "description": "Whether or not the deploy request branch was deleted" + }, + "branch_deleted_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch_deleted_at": { + "type": "string", + "description": "When the deploy request branch was deleted" + }, + "into_branch": { + "type": "string", + "description": "The name of the branch the deploy request will be merged into" + }, + "into_branch_sharded": { + "type": "boolean", + "description": "Whether or not the branch the deploy request will be merged into is sharded" + }, + "into_branch_shard_count": { + "type": "number", + "description": "The number of shards the branch the deploy request will be merged into has" + }, + "approved": { + "type": "boolean", + "description": "Whether or not the deploy request is approved" + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed" + ], + "description": "Whether the deploy request is open or closed" + }, + "deployment_state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The deployment state of the deploy request" + }, + "html_url": { + "type": "string", + "description": "The PlanetScale app address for the deploy request" + }, + "notes": { + "type": "string", + "description": "Notes on the deploy request" + }, + "html_body": { + "type": "string", + "description": "The HTML body of the deploy request" + }, + "created_at": { + "type": "string", + "description": "When the deploy request was created" + }, + "updated_at": { + "type": "string", + "description": "When the deploy request was last updated" + }, + "closed_at": { + "type": "string", + "description": "When the deploy request was closed" + }, + "deployed_at": { + "type": "string", + "description": "When the deploy request was deployed" + } + }, + "additionalProperties": false, + "required": [ + "number", + "id", + "actor", + "closed_by", + "branch", + "branch_deleted", + "branch_deleted_by", + "branch_deleted_at", + "into_branch", + "into_branch_sharded", + "into_branch_shard_count", + "approved", + "state", + "deployment_state", + "html_url", + "notes", + "html_body", + "created_at", + "updated_at", + "closed_at", + "deployed_at" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |" + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/deployment": { + "get": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "get-a-deployment", + "summary": "Get a deployment", + "parameters": [ + { + "name": "number", + "type": "string", + "in": "path", + "required": true, + "description": "The number of the deploy request" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's organization" + } + ], + "responses": { + "200": { + "description": "Returns the deployment for a deploy request", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for a deployment" + }, + "auto_cutover": { + "type": "boolean", + "description": "Whether or not to automatically cutover once deployment is finished" + }, + "created_at": { + "type": "string", + "description": "When the deployment was created" + }, + "cutover_at": { + "type": "string", + "description": "When the cutover for the deployment was initiated" + }, + "cutover_expiring": { + "type": "boolean", + "description": "Whether or not the deployment cutover will expire soon and be cancelled" + }, + "deploy_check_errors": { + "type": "string", + "description": "Deploy check errors for the deployment" + }, + "finished_at": { + "type": "string", + "description": "When the deployment was finished" + }, + "queued_at": { + "type": "string", + "description": "When the deployment was queued" + }, + "ready_to_cutover_at": { + "type": "string", + "description": "When the deployment was ready for cutover" + }, + "started_at": { + "type": "string", + "description": "When the deployment was started" + }, + "state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The state the deployment is in" + }, + "submitted_at": { + "type": "string", + "description": "When the deployment was submitted" + }, + "updated_at": { + "type": "string", + "description": "When the deployment was last updated" + } + }, + "additionalProperties": false, + "required": [ + "id", + "auto_cutover", + "created_at", + "cutover_expiring", + "state", + "submitted_at", + "updated_at" + ] + } + } + }, + "description": "Get the deployment for a deploy request\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_deploy_requests` |\n| Database | `read_deploy_requests` |" + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/operations": { + "get": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "list-deploy-operations", + "summary": "List deploy operations", + "parameters": [ + { + "name": "number", + "type": "string", + "in": "path", + "required": true, + "description": "The number of the deploy request" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the deploy request belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the deploy request belongs to" + }, + { + "name": "page", + "type": "number", + "in": "query", + "default": 1, + "description": "If provided, specifies the page offset of returned results" + }, + { + "name": "per_page", + "type": "number", + "in": "query", + "default": 25, + "description": "If provided, specifies the number of returned results" + } + ], + "responses": { + "200": { + "description": "Returns a list of deploy operations for the deploy request", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID for the deploy operation" + }, + "state": { + "type": "string", + "enum": [ + "pending", + "queued", + "in_progress", + "complete", + "cancelled", + "error" + ], + "description": "The state of the deploy operation" + }, + "keyspace_name": { + "type": "string", + "description": "The keyspace modified by the deploy operation" + }, + "table_name": { + "type": "string", + "description": "The name of the table modifed by the deploy operation" + }, + "operation_name": { + "type": "string", + "description": "The operation name of the deploy operation" + }, + "eta_seconds": { + "type": "number", + "description": "The estimated seconds until completion for the deploy operation" + }, + "progress_percentage": { + "type": "number", + "description": "The percent completion for the deploy operation" + }, + "deploy_error_docs_url": { + "type": "string", + "description": "A link to documentation explaining the deploy error, if present" + }, + "ddl_statement": { + "type": "string", + "description": "The DDL statement for the deploy operation" + }, + "syntax_highlighted_ddl": { + "type": "string", + "description": "A syntax-highlighted DDL statement for the deploy operation" + }, + "created_at": { + "type": "string", + "description": "When the deploy operation was created" + }, + "updated_at": { + "type": "string", + "description": "When the deploy operation was last updated" + }, + "can_drop_data": { + "type": "boolean", + "description": "Whether or not the deploy operation is capable of dropping data" + }, + "table_recently_used": { + "type": "boolean", + "description": "Whether or not the table modified by the deploy operation was recently used" + }, + "table_recently_used_at": { + "type": "string", + "description": "When the table modified by the deploy operation was last used" + }, + "deploy_errors": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Deploy errors for the deploy operation" + } + }, + "additionalProperties": false, + "required": [ + "id", + "state", + "keyspace_name", + "table_name", + "operation_name", + "eta_seconds", + "progress_percentage", + "deploy_error_docs_url", + "ddl_statement", + "syntax_highlighted_ddl", + "created_at", + "updated_at", + "can_drop_data", + "table_recently_used", + "table_recently_used_at", + "deploy_errors" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "List deploy operations for a deploy request\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_deploy_requests` |\n| Database | `read_deploy_requests` |" + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/revert": { + "post": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "complete-a-revert", + "summary": "Complete a revert", + "parameters": [ + { + "name": "number", + "type": "string", + "in": "path", + "required": true, + "description": "The number of the deploy request" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's organization" + } + ], + "responses": { + "200": { + "description": "Returns the deploy request that was reverted", + "schema": { + "type": "object", + "properties": { + "number": { + "type": "number", + "description": "The number of the deploy request" + }, + "id": { + "type": "string", + "description": "The ID of the deploy request" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "closed_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch": { + "type": "string", + "description": "The name of the branch the deploy request was created from" + }, + "branch_deleted": { + "type": "boolean", + "description": "Whether or not the deploy request branch was deleted" + }, + "branch_deleted_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch_deleted_at": { + "type": "string", + "description": "When the deploy request branch was deleted" + }, + "into_branch": { + "type": "string", + "description": "The name of the branch the deploy request will be merged into" + }, + "into_branch_sharded": { + "type": "boolean", + "description": "Whether or not the branch the deploy request will be merged into is sharded" + }, + "into_branch_shard_count": { + "type": "number", + "description": "The number of shards the branch the deploy request will be merged into has" + }, + "approved": { + "type": "boolean", + "description": "Whether or not the deploy request is approved" + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed" + ], + "description": "Whether the deploy request is open or closed" + }, + "deployment_state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The deployment state of the deploy request" + }, + "html_url": { + "type": "string", + "description": "The PlanetScale app address for the deploy request" + }, + "notes": { + "type": "string", + "description": "Notes on the deploy request" + }, + "html_body": { + "type": "string", + "description": "The HTML body of the deploy request" + }, + "created_at": { + "type": "string", + "description": "When the deploy request was created" + }, + "updated_at": { + "type": "string", + "description": "When the deploy request was last updated" + }, + "closed_at": { + "type": "string", + "description": "When the deploy request was closed" + }, + "deployed_at": { + "type": "string", + "description": "When the deploy request was deployed" + } + }, + "additionalProperties": false, + "required": [ + "number", + "id", + "actor", + "closed_by", + "branch", + "branch_deleted", + "branch_deleted_by", + "branch_deleted_at", + "into_branch", + "into_branch_sharded", + "into_branch_shard_count", + "approved", + "state", + "deployment_state", + "html_url", + "notes", + "html_body", + "created_at", + "updated_at", + "closed_at", + "deployed_at" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |" + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/reviews": { + "get": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "list-deploy-request-reviews", + "summary": "List deploy request reviews", + "parameters": [ + { + "name": "number", + "type": "string", + "in": "path", + "required": true, + "description": "The number of the deploy request" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the deploy request belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the deploy request belongs to" + } + ], + "responses": { + "200": { + "description": "Returns an array of deploy request reviews", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The id of the review" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "body": { + "type": "string", + "description": "The text body of the review" + }, + "html_body": { + "type": "string", + "description": "The HTML body of the review" + }, + "state": { + "type": "string", + "enum": [ + "commented", + "approved" + ], + "description": "Whether the review is a comment or approval" + }, + "created_at": { + "type": "string", + "description": "When the review was created" + }, + "updated_at": { + "type": "string", + "description": "When the review was last updated" + } + }, + "additionalProperties": false, + "required": [ + "id", + "actor", + "body", + "html_body", + "state", + "created_at", + "updated_at" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_deploy_requests` |\n| Database | `read_deploy_requests` |" + }, + "post": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "review-a-deploy-request", + "summary": "Review a deploy request", + "parameters": [ + { + "name": "number", + "type": "string", + "in": "path", + "required": true, + "description": "The number of the deploy request" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database the deploy request belongs to" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the deploy request belongs to" + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "state": { + "type": "string", + "enum": [ + "commented", + "approved" + ], + "description": "Whether the review is a comment or approval. Service tokens must have corresponding access (either `approve_deploy_request` or `review_deploy_request`)" + }, + "body": { + "type": "string", + "description": "Deploy request review comments" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "201": { + "description": "Returns the created deploy request review", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The id of the review" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "body": { + "type": "string", + "description": "The text body of the review" + }, + "html_body": { + "type": "string", + "description": "The HTML body of the review" + }, + "state": { + "type": "string", + "enum": [ + "commented", + "approved" + ], + "description": "Whether the review is a comment or approval" + }, + "created_at": { + "type": "string", + "description": "When the review was created" + }, + "updated_at": { + "type": "string", + "description": "When the review was last updated" + } + }, + "additionalProperties": false, + "required": [ + "id", + "actor", + "body", + "html_body", + "state", + "created_at", + "updated_at" + ] + } + } + }, + "description": "Review a deploy request by either approving or commenting on the deploy request\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `approve_deploy_request`, `review_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `approve_deploy_requests` |\n| Database | `approve_deploy_requests` |" + } + }, + "/organizations/{organization}/databases/{database}/deploy-requests/{number}/skip-revert": { + "post": { + "tags": [ + "Deploy requests" + ], + "consumes": [ + "application/json" + ], + "operationId": "skip-revert-period", + "summary": "Skip revert period", + "parameters": [ + { + "name": "number", + "type": "string", + "in": "path", + "required": true, + "description": "The number of the deploy request" + }, + { + "name": "database", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the deploy request's organization" + } + ], + "responses": { + "200": { + "description": "Returns the deploy request whose deploy revert was skipped", + "schema": { + "type": "object", + "properties": { + "number": { + "type": "number", + "description": "The number of the deploy request" + }, + "id": { + "type": "string", + "description": "The ID of the deploy request" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "closed_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch": { + "type": "string", + "description": "The name of the branch the deploy request was created from" + }, + "branch_deleted": { + "type": "boolean", + "description": "Whether or not the deploy request branch was deleted" + }, + "branch_deleted_by": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "branch_deleted_at": { + "type": "string", + "description": "When the deploy request branch was deleted" + }, + "into_branch": { + "type": "string", + "description": "The name of the branch the deploy request will be merged into" + }, + "into_branch_sharded": { + "type": "boolean", + "description": "Whether or not the branch the deploy request will be merged into is sharded" + }, + "into_branch_shard_count": { + "type": "number", + "description": "The number of shards the branch the deploy request will be merged into has" + }, + "approved": { + "type": "boolean", + "description": "Whether or not the deploy request is approved" + }, + "state": { + "type": "string", + "enum": [ + "open", + "closed" + ], + "description": "Whether the deploy request is open or closed" + }, + "deployment_state": { + "type": "string", + "enum": [ + "pending", + "ready", + "no_changes", + "queued", + "submitting", + "in_progress", + "pending_cutover", + "in_progress_vschema", + "in_progress_cancel", + "in_progress_cutover", + "complete", + "complete_cancel", + "complete_error", + "complete_pending_revert", + "in_progress_revert", + "complete_revert", + "complete_revert_error", + "cancelled", + "error" + ], + "description": "The deployment state of the deploy request" + }, + "html_url": { + "type": "string", + "description": "The PlanetScale app address for the deploy request" + }, + "notes": { + "type": "string", + "description": "Notes on the deploy request" + }, + "html_body": { + "type": "string", + "description": "The HTML body of the deploy request" + }, + "created_at": { + "type": "string", + "description": "When the deploy request was created" + }, + "updated_at": { + "type": "string", + "description": "When the deploy request was last updated" + }, + "closed_at": { + "type": "string", + "description": "When the deploy request was closed" + }, + "deployed_at": { + "type": "string", + "description": "When the deploy request was deployed" + } + }, + "additionalProperties": false, + "required": [ + "number", + "id", + "actor", + "closed_by", + "branch", + "branch_deleted", + "branch_deleted_by", + "branch_deleted_at", + "into_branch", + "into_branch_sharded", + "into_branch_shard_count", + "approved", + "state", + "deployment_state", + "html_url", + "notes", + "html_body", + "created_at", + "updated_at", + "closed_at", + "deployed_at" + ] + } + } + }, + "description": "Skips the revert period for a deploy request\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_deploy_request`, `create_deploy_request`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `deploy_deploy_requests` |\n| Database | `deploy_deploy_requests` |" + } + }, + "/organizations/{organization}/databases/{name}": { + "get": { + "tags": [ + "Databases" + ], + "consumes": [ + "application/json" + ], + "operationId": "get-a-database", + "summary": "Get a database", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the database belongs to" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns a database", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the database" + }, + "type": { + "type": "string", + "description": "The object type" + }, + "url": { + "type": "string", + "description": "The URL to the database API endpoint" + }, + "branches_url": { + "type": "string", + "description": "The URL to retrieve this database's branches via the API" + }, + "branches_count": { + "type": "number", + "description": "The total number of database branches" + }, + "development_branches_count": { + "type": "number", + "description": "The total number of database development branches" + }, + "production_branches_count": { + "type": "number", + "description": "The total number of database production branches" + }, + "issues_count": { + "type": "number", + "description": "The total number of ongoing issues within a database" + }, + "multiple_admins_required_for_deletion": { + "type": "boolean", + "description": "If the database requires multiple admins for deletion" + }, + "ready": { + "type": "boolean", + "description": "If the database is ready to be used" + }, + "at_development_branch_limit": { + "type": "boolean", + "description": "If the database has reached its development branch limit" + }, + "at_backup_restore_branches_limit": { + "type": "boolean", + "description": "If the database has reached its backup restored branch limit" + }, + "data_import": { + "type": "object", + "properties": { + "state": { + "type": "string" + }, + "import_check_errors": { + "type": "string" + }, + "started_at": { + "type": "string" + }, + "finished_at": { + "type": "string" + }, + "data_source": { + "type": "object", + "properties": { + "hostname": { + "type": "string" + }, + "port": { + "type": "string" + }, + "database": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "hostname", + "port", + "database" + ] + } + }, + "additionalProperties": false, + "required": [ + "state", + "import_check_errors", + "started_at", + "finished_at", + "data_source" + ] + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "html_url": { + "type": "string", + "description": "The URL to see this database's branches in the web UI" + }, + "name": { + "type": "string", + "description": "Name of the database" + }, + "state": { + "type": "string", + "description": "State of the database" + }, + "sharded": { + "type": "boolean", + "description": "If the database is sharded" + }, + "default_branch_shard_count": { + "type": "number", + "description": "Number of shards in the default branch" + }, + "default_branch_read_only_regions_count": { + "type": "number", + "description": "Number of read only regions in the default branch" + }, + "default_branch_table_count": { + "type": "number", + "description": "Number of tables in the default branch schema" + }, + "default_branch": { + "type": "string", + "description": "The default branch for the database" + }, + "require_approval_for_deploy": { + "type": "boolean", + "description": "Whether an approval is required to deploy schema changes to this database" + }, + "allow_data_branching": { + "type": "boolean", + "description": "Whether seeding branches with data is enabled for all branches" + }, + "automatic_migrations": { + "type": "boolean", + "description": "Whether to automatically manage Rails migrations during deploy requests" + }, + "restrict_branch_region": { + "type": "boolean", + "description": "Whether to restrict branch creation to one region" + }, + "insights_raw_queries": { + "type": "boolean", + "description": "Whether raw SQL queries are collected" + }, + "plan": { + "type": "string", + "description": "The database plan" + }, + "production_branch_web_console": { + "type": "boolean", + "description": "Whether web console is enabled for production branches" + }, + "migration_table_name": { + "type": "string", + "description": "Table name to use for copying schema migration data" + }, + "migration_framework": { + "type": "string", + "description": "Framework used for applying migrations" + }, + "created_at": { + "type": "string" + }, + "updated_at": { + "type": "string" + }, + "schema_last_updated_at": { + "type": "string", + "description": "When the default branch schema was last changed." + } + }, + "additionalProperties": false, + "required": [ + "id", + "type", + "url", + "branches_url", + "branches_count", + "development_branches_count", + "production_branches_count", + "issues_count", + "multiple_admins_required_for_deletion", + "ready", + "at_development_branch_limit", + "at_backup_restore_branches_limit", + "region", + "html_url", + "name", + "state", + "sharded", + "default_branch_shard_count", + "default_branch_read_only_regions_count", + "default_branch_table_count", + "default_branch", + "require_approval_for_deploy", + "allow_data_branching", + "restrict_branch_region", + "insights_raw_queries", + "plan", + "production_branch_web_console", + "created_at", + "updated_at" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_database`, `delete_database`, `write_database`, `read_branch`, `delete_branch`, `create_branch`, `delete_production_branch`, `connect_branch`, `connect_production_branch`, `delete_branch_password`, `delete_production_branch_password`, `read_deploy_request`, `create_deploy_request`, `approve_deploy_request`, `read_comment`, `create_comment`, `restore_backup`, `restore_production_branch_backup`, `read_backups`, `write_backups`, `delete_backups`, `delete_production_branch_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_databases` |\n| Database | `read_database` |" + }, + "patch": { + "tags": [ + "Databases" + ], + "consumes": [ + "application/json" + ], + "operationId": "update-database-settings", + "summary": "Update database settings", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the database belongs to" + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "automatic_migrations": { + "type": "boolean", + "description": "Whether or not to copy migration data to new branches and in deploy requests." + }, + "migration_framework": { + "type": "string", + "description": "A migration framework to use on the database" + }, + "migration_table_name": { + "type": "string", + "description": "Name of table to use as migration table for the database" + }, + "require_approval_for_deploy": { + "type": "boolean", + "description": "Whether or not deploy requests must be approved by a database administrator other than the request creator" + }, + "restrict_branch_region": { + "type": "boolean", + "description": "Whether or not to limit branch creation to the AWS us-east-1 region." + }, + "allow_data_branching": { + "type": "boolean", + "description": "Whether or not data branching is allowed on the database" + }, + "insights_raw_queries": { + "type": "boolean", + "description": "Whether or not full queries should be collected from the database" + }, + "production_branch_web_console": { + "type": "boolean", + "description": "Whether or not the web console can be used on the production branch of the database" + }, + "default_branch": { + "type": "string", + "description": "The default branch of the database" + } + }, + "additionalProperties": false + } + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns the updated database", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the database" + }, + "type": { + "type": "string", + "description": "The object type" + }, + "url": { + "type": "string", + "description": "The URL to the database API endpoint" + }, + "branches_url": { + "type": "string", + "description": "The URL to retrieve this database's branches via the API" + }, + "branches_count": { + "type": "number", + "description": "The total number of database branches" + }, + "development_branches_count": { + "type": "number", + "description": "The total number of database development branches" + }, + "production_branches_count": { + "type": "number", + "description": "The total number of database production branches" + }, + "issues_count": { + "type": "number", + "description": "The total number of ongoing issues within a database" + }, + "multiple_admins_required_for_deletion": { + "type": "boolean", + "description": "If the database requires multiple admins for deletion" + }, + "ready": { + "type": "boolean", + "description": "If the database is ready to be used" + }, + "at_development_branch_limit": { + "type": "boolean", + "description": "If the database has reached its development branch limit" + }, + "at_backup_restore_branches_limit": { + "type": "boolean", + "description": "If the database has reached its backup restored branch limit" + }, + "data_import": { + "type": "object", + "properties": { + "state": { + "type": "string" + }, + "import_check_errors": { + "type": "string" + }, + "started_at": { + "type": "string" + }, + "finished_at": { + "type": "string" + }, + "data_source": { + "type": "object", + "properties": { + "hostname": { + "type": "string" + }, + "port": { + "type": "string" + }, + "database": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "hostname", + "port", + "database" + ] + } + }, + "additionalProperties": false, + "required": [ + "state", + "import_check_errors", + "started_at", + "finished_at", + "data_source" + ] + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + }, + "html_url": { + "type": "string", + "description": "The URL to see this database's branches in the web UI" + }, + "name": { + "type": "string", + "description": "Name of the database" + }, + "state": { + "type": "string", + "description": "State of the database" + }, + "sharded": { + "type": "boolean", + "description": "If the database is sharded" + }, + "default_branch_shard_count": { + "type": "number", + "description": "Number of shards in the default branch" + }, + "default_branch_read_only_regions_count": { + "type": "number", + "description": "Number of read only regions in the default branch" + }, + "default_branch_table_count": { + "type": "number", + "description": "Number of tables in the default branch schema" + }, + "default_branch": { + "type": "string", + "description": "The default branch for the database" + }, + "require_approval_for_deploy": { + "type": "boolean", + "description": "Whether an approval is required to deploy schema changes to this database" + }, + "allow_data_branching": { + "type": "boolean", + "description": "Whether seeding branches with data is enabled for all branches" + }, + "automatic_migrations": { + "type": "boolean", + "description": "Whether to automatically manage Rails migrations during deploy requests" + }, + "restrict_branch_region": { + "type": "boolean", + "description": "Whether to restrict branch creation to one region" + }, + "insights_raw_queries": { + "type": "boolean", + "description": "Whether raw SQL queries are collected" + }, + "plan": { + "type": "string", + "description": "The database plan" + }, + "production_branch_web_console": { + "type": "boolean", + "description": "Whether web console is enabled for production branches" + }, + "migration_table_name": { + "type": "string", + "description": "Table name to use for copying schema migration data" + }, + "migration_framework": { + "type": "string", + "description": "Framework used for applying migrations" + }, + "created_at": { + "type": "string" + }, + "updated_at": { + "type": "string" + }, + "schema_last_updated_at": { + "type": "string", + "description": "When the default branch schema was last changed." + } + }, + "additionalProperties": false, + "required": [ + "id", + "type", + "url", + "branches_url", + "branches_count", + "development_branches_count", + "production_branches_count", + "issues_count", + "multiple_admins_required_for_deletion", + "ready", + "at_development_branch_limit", + "at_backup_restore_branches_limit", + "region", + "html_url", + "name", + "state", + "sharded", + "default_branch_shard_count", + "default_branch_read_only_regions_count", + "default_branch_table_count", + "default_branch", + "require_approval_for_deploy", + "allow_data_branching", + "restrict_branch_region", + "insights_raw_queries", + "plan", + "production_branch_web_console", + "created_at", + "updated_at" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `write_database`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `write_databases` |\n| Database | `write_database` |" + }, + "delete": { + "tags": [ + "Databases" + ], + "consumes": [ + "application/json" + ], + "operationId": "delete-a-database", + "summary": "Delete a database", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the database belongs to" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "204": { + "description": "Deletes a database" + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `delete_database`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `delete_databases` |\n| Database | `delete_database` |" + } + }, + "/organizations/{organization}/databases/{name}/read-only-regions": { + "get": { + "tags": [ + "Databases" + ], + "consumes": [ + "application/json" + ], + "operationId": "list-read-only-regions", + "summary": "List read-only regions", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the database belongs to" + }, + { + "name": "page", + "type": "number", + "in": "query", + "default": 1, + "description": "If provided, specifies the page offset of returned results" + }, + { + "name": "per_page", + "type": "number", + "in": "query", + "default": 25, + "description": "If provided, specifies the number of returned results" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "List of the database's read-only regions", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the read-only region" + }, + "created_at": { + "type": "string", + "description": "When the read-only region was created" + }, + "display_name": { + "type": "string", + "description": "The name of the read-only region" + }, + "ready_at": { + "type": "string", + "description": "When the read-only region was ready to serve queries" + }, + "updated_at": { + "type": "string", + "description": "When the read-only region was last updated" + }, + "ready": { + "type": "boolean", + "description": "Whether or not the read-only region is ready to serve queries" + }, + "actor": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the actor" + }, + "display_name": { + "type": "string", + "description": "The name of the actor" + }, + "avatar_url": { + "type": "string", + "description": "The URL of the actor's avatar" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url" + ] + }, + "region": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + } + }, + "additionalProperties": false, + "required": [ + "id", + "created_at", + "display_name", + "ready_at", + "updated_at", + "ready", + "actor", + "region" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "List read-only regions for the database's default branch\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_database`, `delete_database`, `write_database`, `read_branch`, `delete_branch`, `create_branch`, `delete_production_branch`, `connect_branch`, `connect_production_branch`, `delete_branch_password`, `delete_production_branch_password`, `read_deploy_request`, `create_deploy_request`, `approve_deploy_request`, `read_comment`, `create_comment`, `restore_backup`, `restore_production_branch_backup`, `read_backups`, `write_backups`, `delete_backups`, `delete_production_branch_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_branches` |\n| Database | `read_branches` |" + } + }, + "/organizations/{organization}/databases/{name}/regions": { + "get": { + "tags": [ + "Databases" + ], + "consumes": [ + "application/json" + ], + "operationId": "list-database-regions", + "summary": "List database regions", + "parameters": [ + { + "name": "name", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the database" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the database belongs to" + }, + { + "name": "page", + "type": "number", + "in": "query", + "default": 1, + "description": "If provided, specifies the page offset of returned results" + }, + { + "name": "per_page", + "type": "number", + "in": "query", + "default": 25, + "description": "If provided, specifies the number of returned results" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns the available regions for a database", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the region" + }, + "provider": { + "type": "string", + "description": "Provider for the region (ex. AWS)" + }, + "enabled": { + "type": "boolean", + "description": "Whether or not the region is currently active" + }, + "public_ip_addresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Public IP addresses for the region" + }, + "display_name": { + "type": "string", + "description": "Name of the region" + }, + "location": { + "type": "string", + "description": "Location of the region" + }, + "slug": { + "type": "string", + "description": "The slug of the region" + } + }, + "additionalProperties": false, + "required": [ + "id", + "provider", + "enabled", + "public_ip_addresses", + "display_name", + "location", + "slug" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "\n### Authorization\nA service token or OAuth token must have at least one of the following access or scopes in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_database`, `delete_database`, `write_database`, `read_branch`, `delete_branch`, `create_branch`, `delete_production_branch`, `connect_branch`, `connect_production_branch`, `delete_branch_password`, `delete_production_branch_password`, `read_deploy_request`, `create_deploy_request`, `approve_deploy_request`, `read_comment`, `create_comment`, `restore_backup`, `restore_production_branch_backup`, `read_backups`, `write_backups`, `delete_backups`, `delete_production_branch_backups`\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| Organization | `read_databases` |\n| Database | `read_database` |" + } + }, + "/organizations/{organization}/oauth-applications": { + "get": { + "tags": [ + "OAuth applications" + ], + "consumes": [ + "application/json" + ], + "operationId": "list-oauth-applications", + "summary": "List OAuth applications", + "parameters": [ + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the OAuth applications belong to" + }, + { + "name": "page", + "type": "number", + "in": "query", + "default": 1, + "description": "If provided, specifies the page offset of returned results" + }, + { + "name": "per_page", + "type": "number", + "in": "query", + "default": 25, + "description": "If provided, specifies the number of returned results" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns a list of the organization's oauth applications", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the OAuth application" + }, + "name": { + "type": "string", + "description": "The name of the OAuth application" + }, + "redirect_uri": { + "type": "string", + "description": "The redirect URI of the OAuth application" + }, + "domain": { + "type": "string", + "description": "The domain of the OAuth application. Used for verification of a valid redirect uri" + }, + "created_at": { + "type": "string", + "description": "When the OAuth application was created" + }, + "updated_at": { + "type": "string", + "description": "When the OAuth application was last updated" + }, + "scopes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The scopes that the OAuth application requires on a user's accout" + }, + "avatar": { + "type": "string", + "description": "The image source for the OAuth application's avatar" + }, + "client_id": { + "type": "string", + "description": "The OAuth application's unique client id" + }, + "tokens": { + "type": "number", + "description": "The number of tokens issued by the OAuth application" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "redirect_uri", + "domain", + "created_at", + "updated_at", + "scopes", + "client_id", + "tokens" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "\n### Authorization\nA service token must have at least one of the following access in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_oauth_applications`\n\n" + } + }, + "/organizations/{organization}/oauth-applications/{application_id}": { + "get": { + "tags": [ + "OAuth applications" + ], + "consumes": [ + "application/json" + ], + "operationId": "get-an-oauth-application", + "summary": "Get an OAuth application", + "parameters": [ + { + "name": "application_id", + "type": "string", + "in": "path", + "required": true, + "description": "The ID of the OAuth application" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the OAuth application belongs to" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns information abuot an OAuth application", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the OAuth application" + }, + "name": { + "type": "string", + "description": "The name of the OAuth application" + }, + "redirect_uri": { + "type": "string", + "description": "The redirect URI of the OAuth application" + }, + "domain": { + "type": "string", + "description": "The domain of the OAuth application. Used for verification of a valid redirect uri" + }, + "created_at": { + "type": "string", + "description": "When the OAuth application was created" + }, + "updated_at": { + "type": "string", + "description": "When the OAuth application was last updated" + }, + "scopes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The scopes that the OAuth application requires on a user's accout" + }, + "avatar": { + "type": "string", + "description": "The image source for the OAuth application's avatar" + }, + "client_id": { + "type": "string", + "description": "The OAuth application's unique client id" + }, + "tokens": { + "type": "number", + "description": "The number of tokens issued by the OAuth application" + } + }, + "additionalProperties": false, + "required": [ + "id", + "name", + "redirect_uri", + "domain", + "created_at", + "updated_at", + "scopes", + "client_id", + "tokens" + ] + } + } + }, + "description": "\n### Authorization\nA service token must have at least one of the following access in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_oauth_applications`\n\n" + } + }, + "/organizations/{organization}/oauth-applications/{application_id}/tokens": { + "get": { + "tags": [ + "OAuth applications" + ], + "consumes": [ + "application/json" + ], + "operationId": "list-oauth-tokens", + "summary": "List OAuth tokens", + "parameters": [ + { + "name": "application_id", + "type": "string", + "in": "path", + "required": true, + "description": "The ID of the OAuth application" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the OAuth application belongs to" + }, + { + "name": "page", + "type": "number", + "in": "query", + "default": 1, + "description": "If provided, specifies the page offset of returned results" + }, + { + "name": "per_page", + "type": "number", + "in": "query", + "default": 25, + "description": "If provided, specifies the number of returned results" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns a list of OAuth tokens issued on behalf of the OAuth application", + "schema": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the service token" + }, + "display_name": { + "type": "string", + "description": "The display name of the service token" + }, + "avatar_url": { + "type": "string", + "description": "The image source for the avatar of the service token" + }, + "created_at": { + "type": "string", + "description": "When the service token was created" + }, + "updated_at": { + "type": "string", + "description": "When the service token was last updated" + }, + "expires_at": { + "type": "string", + "description": "When the service token will expire" + }, + "last_used_at": { + "type": "string", + "description": "When the service token was last used" + }, + "name": { + "type": "string", + "description": "The name of the service token" + }, + "actor_id": { + "type": "string", + "description": "The ID of the actor on whose behalf the service token was created" + }, + "actor_display_name": { + "type": "string", + "description": "The name of the actor on whose behalf the service token was created" + }, + "actor_type": { + "type": "string", + "description": "The type of the actor on whose behalf the service token was created" + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url", + "created_at", + "updated_at", + "expires_at", + "last_used_at", + "name", + "actor_id", + "actor_display_name", + "actor_type" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ] + } + } + }, + "description": "List OAuth tokens created by an OAuth application\n### Authorization\nA service token must have at least one of the following access in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_oauth_tokens`\n\n" + } + }, + "/organizations/{organization}/oauth-applications/{application_id}/tokens/{token_id}": { + "get": { + "tags": [ + "OAuth applications" + ], + "consumes": [ + "application/json" + ], + "operationId": "get-an-oauth-token", + "summary": "Get an OAuth token", + "parameters": [ + { + "name": "token_id", + "type": "string", + "in": "path", + "required": true, + "description": "The ID of the OAuth application token" + }, + { + "name": "application_id", + "type": "string", + "in": "path", + "required": true, + "description": "The ID of the OAuth application" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the OAuth application belongs to" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns an OAuth token that was issued on behalf of the OAuth application", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the service token" + }, + "display_name": { + "type": "string", + "description": "The display name of the service token" + }, + "avatar_url": { + "type": "string", + "description": "The image source for the avatar of the service token" + }, + "created_at": { + "type": "string", + "description": "When the service token was created" + }, + "updated_at": { + "type": "string", + "description": "When the service token was last updated" + }, + "expires_at": { + "type": "string", + "description": "When the service token will expire" + }, + "last_used_at": { + "type": "string", + "description": "When the service token was last used" + }, + "name": { + "type": "string", + "description": "The name of the service token" + }, + "actor_id": { + "type": "string", + "description": "The ID of the actor on whose behalf the service token was created" + }, + "actor_display_name": { + "type": "string", + "description": "The name of the actor on whose behalf the service token was created" + }, + "actor_type": { + "type": "string", + "description": "The type of the actor on whose behalf the service token was created" + }, + "oauth_accesses_by_resource": { + "type": "object", + "properties": { + "user": { + "type": "object", + "properties": { + "users": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Users the token has access to" + }, + "accesses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Accesses the token has on the resources" + } + }, + "additionalProperties": false, + "required": [ + "users", + "accesses" + ] + }, + "organization": { + "type": "object", + "properties": { + "organizations": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Organizations the token has access to" + }, + "accesses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Accesses the token has on the resources" + } + }, + "additionalProperties": false, + "required": [ + "organizations", + "accesses" + ] + }, + "database": { + "type": "object", + "properties": { + "databases": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Databases the token has access to" + }, + "accesses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Accesses the token has on the resources" + } + }, + "additionalProperties": false, + "required": [ + "databases", + "accesses" + ] + }, + "branch": { + "type": "object", + "properties": { + "branches": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Branches the token has access to" + }, + "accesses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Accesses the token has on the resources" + } + }, + "additionalProperties": false, + "required": [ + "branches", + "accesses" + ] + } + }, + "additionalProperties": false, + "required": [ + "user", + "organization", + "database", + "branch" + ] + } + }, + "additionalProperties": false, + "required": [ + "id", + "display_name", + "avatar_url", + "created_at", + "updated_at", + "expires_at", + "last_used_at", + "name", + "actor_id", + "actor_display_name", + "actor_type", + "oauth_accesses_by_resource" + ] + } + } + }, + "description": "\n### Authorization\nA service token must have at least one of the following access in order to use this API endpoint:\n\n**Service Token Accesses**\n `read_oauth_tokens`\n\n" + }, + "delete": { + "tags": [ + "OAuth applications" + ], + "consumes": [ + "application/json" + ], + "operationId": "delete-an-oauth-token", + "summary": "Delete an OAuth token", + "parameters": [ + { + "name": "token_id", + "type": "string", + "in": "path", + "required": true, + "description": "The ID of the OAuth application token" + }, + { + "name": "application_id", + "type": "string", + "in": "path", + "required": true, + "description": "The ID of the OAuth application" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the OAuth application belongs to" + } + ], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "204": { + "description": "Deletes an OAuth application's OAuth token" + } + }, + "description": "\n### Authorization\nA service token must have at least one of the following access in order to use this API endpoint:\n\n**Service Token Accesses**\n `delete_oauth_tokens`\n\n" + } + }, + "/organizations/{organization}/oauth-applications/{id}/token": { + "post": { + "tags": [ + "OAuth tokens" + ], + "consumes": [ + "application/json" + ], + "operationId": "create-or-renew-an-oauth-token", + "summary": "Create or renew an OAuth token", + "parameters": [ + { + "name": "id", + "type": "string", + "in": "path", + "required": true, + "description": "The ID of the OAuth application" + }, + { + "name": "organization", + "type": "string", + "in": "path", + "required": true, + "description": "The name of the organization the OAuth application belongs to" + }, + { + "name": "body", + "in": "body", + "schema": { + "type": "object", + "properties": { + "client_id": { + "type": "string", + "description": "The OAuth application's client ID" + }, + "client_secret": { + "type": "string", + "description": "The OAuth applicatin's client secret" + }, + "grant_type": { + "type": "string", + "enum": [ + "authorization_code", + "refresh_token" + ], + "description": "Whether an OAuth grant code or a refresh token is being exchanged for an OAuth token" + }, + "code": { + "type": "string", + "description": "The OAuth grant code provided to your OAuth application's redirect URI. Required when grant_type is authorization_code" + }, + "redirect_uri": { + "type": "string", + "description": "The OAuth application's redirect URI. Required when grant_type is authorization_code" + }, + "refresh_token": { + "type": "string", + "description": "The refresh token from the original OAuth token grant. Required when grant_type is refresh_token" + } + }, + "additionalProperties": false, + "required": [ + "client_id", + "client_secret", + "grant_type" + ] + } + } + ], + "responses": { + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "422": { + "description": "Unprocessable Entity" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns the created OAuth token", + "schema": { + "type": "object", + "properties": { + "display_name": { + "type": "string", + "description": "The display name of the OAuth token" + }, + "name": { + "type": "string", + "description": "The name of the OAuth token" + }, + "token": { + "type": "string", + "description": "The plain text OAuth token" + }, + "plain_text_refresh_token": { + "type": "string", + "description": "The refresh token used to refresh this OAuth token" + }, + "actor_id": { + "type": "string", + "description": "The ID of the actor on whose behalf the token was issued" + }, + "actor_display_name": { + "type": "string", + "description": "The name of the actor on whose behalf the token was issued" + }, + "service_token_accesses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The accesses issued to this OAuth token" + } + }, + "additionalProperties": false + } + } + }, + "description": "Create an OAuth token from an authorization grant code, or refresh an OAuth token from a refresh token\n### Authorization\nA service token must have at least one of the following access in order to use this API endpoint:\n\n**Service Token Accesses**\n `write_oauth_tokens`\n\n" + } + }, + "/user": { + "get": { + "tags": [ + "Users" + ], + "consumes": [ + "application/json" + ], + "operationId": "get-current-user", + "summary": "Get current user", + "parameters": [], + "responses": { + "401": { + "description": "Unauthorized" + }, + "404": { + "description": "Not Found" + }, + "403": { + "description": "Forbidden" + }, + "500": { + "description": "Internal Server Error" + }, + "200": { + "description": "Returns the current user that is associated with this service token", + "schema": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the user" + }, + "display_name": { + "type": "string", + "description": "The display name of the user" + }, + "name": { + "type": "string", + "description": "The name of the user" + }, + "email": { + "type": "string", + "description": "The email of the user" + }, + "avatar_url": { + "type": "string", + "description": "The URL source of the user's avatar" + }, + "created_at": { + "type": "string", + "description": "When the user was created" + }, + "updated_at": { + "type": "string", + "description": "When the user was last updated" + }, + "two_factor_auth_configured": { + "type": "boolean", + "description": "Whether or not the user has configured two factor authentication" + }, + "default_organization_id": { + "type": "string", + "description": "The default organization for the user" + }, + "sso": { + "type": "boolean", + "description": "Whether or not the user is managed by WorkOS" + }, + "managed": { + "type": "boolean", + "description": "Whether or not the user is managed by an authentication provider" + }, + "directory_managed": { + "type": "boolean", + "description": "Whether or not the user is managed by a WorkOS directory" + }, + "email_verified": { + "type": "boolean", + "description": "Whether or not the user is verified by email" + } + }, + "additionalProperties": false + } + } + }, + "description": "Get the user associated with this service token\n### Authorization\nA OAuth token must have at least one of the following scopes in order to use this API endpoint:\n\n**OAuth Scopes**\n\n | Resource | Scopes |\n| :------- | :---------- |\n| User | `read_user` |" + } + } + }, + "definitions": {}, + "schemes": [ + "https" + ], + "tags": [ + { + "name": "Backups", + "description": " API endpoints for managing database branch backups.\n" + }, + { + "name": "Databases", + "description": " API endpoints for managing databases within an organization.\n" + }, + { + "name": "Database branch passwords", + "description": " API endpoints for managing database branch passwords.\n" + }, + { + "name": "OAuth applications", + "description": " API endpoints for fetching OAuth applications.\n" + }, + { + "name": "OAuth tokens", + "description": " API endpoints for managing OAuth tokens.\n" + }, + { + "name": "Organizations", + "description": " API endpoints for managing organizations.\n" + }, + { + "name": "Users", + "description": " API endpoints for fetching user information.\n" + }, + { + "name": "Deploy requests", + "description": " API endpoints for managing database deploy requests.\n" + } + ], + "securityDefinitions": { + "ApiKeyHeader": { + "type": "apiKey", + "in": "header", + "name": "Authorization" + } + }, + "security": [ + { + "ApiKeyHeader": [ + "Authorization" + ] + } + ], + "host": "api.planetscale.com", + "produces": [ + "application/json" + ], + "x-readme": { + "explorer-enabled": false + } +} diff --git a/script/generate b/script/generate new file mode 100755 index 0000000..20814e6 --- /dev/null +++ b/script/generate @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +set -euo pipefail + +root=$(git rev-parse --show-toplevel) + +function main() { + # generate_original + generate_with_ref +} + +function generate_original() { + go run ${root}/internal/cmd/client_codegen/*.go --spec ${root}/openapi/openapi-spec.json > ${root}/internal/client/planetscale/planetscale.go +} + +function generate_with_ref() { + go run ${root}/internal/cmd/extractref/main.go --cfg openapi/extract-ref-cfg.json --spec openapi/openapi-spec.json | jq . > ${root}/openapi-spec.json + go run ${root}/internal/cmd/client_codegen/*.go --spec ${root}/openapi-spec.json > ${root}/internal/client/planetscale/planetscale.go +} + +main \ No newline at end of file diff --git a/script/run_example_apply b/script/run_example_apply new file mode 100755 index 0000000..6391f8c --- /dev/null +++ b/script/run_example_apply @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -euo pipefail + +root=$(git rev-parse --show-toplevel) + +function main() { + go install github.com/planetscale/terraform-provider-planetscale + terraform -chdir=$root/examples/provider apply +} + +main \ No newline at end of file diff --git a/script/run_example_plan b/script/run_example_plan new file mode 100755 index 0000000..d1e7ea2 --- /dev/null +++ b/script/run_example_plan @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -euo pipefail + +root=$(git rev-parse --show-toplevel) + +function main() { + go install github.com/planetscale/terraform-provider-planetscale + terraform -chdir=$root/examples/provider plan +} + +main \ No newline at end of file diff --git a/script/update_openapi_spec b/script/update_openapi_spec new file mode 100755 index 0000000..2b081ea --- /dev/null +++ b/script/update_openapi_spec @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -euo pipefail + +root=$(git rev-parse --show-toplevel) + +function main() { + curl -q https://api.planetscale.com/v1/openapi-spec | jq . > ${root}/openapi/openapi-spec.json +} + +main \ No newline at end of file diff --git a/terraform-registry-manifest.json b/terraform-registry-manifest.json new file mode 100644 index 0000000..fec2a56 --- /dev/null +++ b/terraform-registry-manifest.json @@ -0,0 +1,6 @@ +{ + "version": 1, + "metadata": { + "protocol_versions": ["6.0"] + } +} diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 0000000..867d3a2 --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build tools + +package tools + +import ( + // Documentation generation + _ "github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs" +)