diff --git a/src/routes/blog/post/3-things-you-can-build-with-go-runtime/+page.markdoc b/src/routes/blog/post/3-things-you-can-build-with-go-runtime/+page.markdoc index 00fadd93bc..0d2eae44df 100644 --- a/src/routes/blog/post/3-things-you-can-build-with-go-runtime/+page.markdoc +++ b/src/routes/blog/post/3-things-you-can-build-with-go-runtime/+page.markdoc @@ -25,11 +25,11 @@ Our Go runtime (just like our other runtimes) has been developed by our team and ## Event-driven nature -Appwrite Functions can be executed by various types of events, which allows you to integrate them into your applications in many different ways. These events include all HTTP actions (to consume like a REST API), CRON schedules (to run them on set time periods), and any events across the various Appwrite products in your project (for example, user creation, document deletion, or file upload). +Appwrite Functions can be executed by various types of events, which allows you to integrate them into your applications in many different ways. These events include all HTTP actions (to consume like a REST API), CRON schedules (to run them on set time periods), and any events across the various Appwrite products in your project (for example, user creation, row deletion, or file upload). ## Global environment variables -Aside from environment variables at the function level, Appwrite also allows you to environment variables at the project level so that they can be shared across multiple functions in a single project. +Aside from environment variables at the function level, Appwrite also allows you to environment variables at the project level so that they can be shared across multiple functions in a single project. ## Permissions system @@ -51,7 +51,7 @@ Once your function is set up, we can try some examples: ## Example 1: AI Chatbot using GPT-3.5 -The first example is a simple chatbot function that accepts a prompt in the request body and returns an answer in the response from the ChatGPT API. +The first example is a simple chatbot function that accepts a prompt in the request body and returns an answer in the response from the ChatGPT API. To do this, we must first add the `go-openai` dependency to our project’s `mod` file. @@ -138,7 +138,7 @@ You can then deploy this function using the `appwrite deploy function` command. ## Example 2: HTML Resume -The second example is an online HTML-based resume that you can deliver online through the function. +The second example is an online HTML-based resume that you can deliver online through the function. For this, the first thing we do is create a `static` directory in the function folder and add a file, `resume.html` with the contents of our resume. You can [copy our template](https://github.com/appwrite-community/go-function-examples/blob/main/functions/go-resume/static/resume.html) if you’d like. @@ -178,7 +178,7 @@ You can then deploy this function using the `appwrite deploy function` command. ## Example 3: URL Shortener -The third example is a personal URL shortener that stores your shortened URL path and long URL in an Appwrite Database and redirects the consumer to the appropriate long URL on pinging the shortened URL. +The third example is a personal URL shortener that stores your shortened URL path and long URL in an Appwrite Database and redirects the consumer to the appropriate long URL on pinging the shortened URL. To build this function, create a `services` directory in the function folder and add a file `setup.go`. Here, we will add the necessary functions to initialize our Appwrite database. @@ -186,12 +186,12 @@ To build this function, create a `services` directory in the function folder and package services import ( - "github.com/appwrite/sdk-for-go/databases" + "github.com/appwrite/sdk-for-go/tablesdb" "github.com/appwrite/sdk-for-go/permission" "github.com/open-runtimes/types-for-go/v4/openruntimes" ) -func DoesDatabaseExist(dbs databases.Databases, dbId string) bool { +func DoesDatabaseExist(dbs tablesdb.TablesDB, dbId string) bool { _, err := dbs.Get(dbId) if err != nil { return false @@ -199,23 +199,23 @@ func DoesDatabaseExist(dbs databases.Databases, dbId string) bool { return true } -func DoesCollectionExist(dbs databases.Databases, dbId string, collId string) bool { - _, err := dbs.GetCollection(dbId, collId) +func DoesTableExist(dbs tablesdb.TablesDB, dbId string, collId string) bool { + _, err := dbs.GetTable(dbId, collId) if err != nil { return false } return true } -func DoesAttributeExist(dbs databases.Databases, dbId string, collId string, attribId string) bool { - _, err := dbs.GetAttribute(dbId, collId, attribId) +func DoesColumnExist(dbs tablesdb.TablesDB, dbId string, collId string, attribId string) bool { + _, err := dbs.GetColumn(dbId, collId, attribId) if err != nil { return false } return true } -func InitialiseDatabase(Context openruntimes.Context, dbs databases.Databases, dbId string, collId string) { +func InitialiseDatabase(Context openruntimes.Context, dbs tablesdb.TablesDB, dbId string, collId string) { doesDbExist := DoesDatabaseExist(dbs, dbId) if !doesDbExist { dbs.Create( @@ -224,24 +224,24 @@ func InitialiseDatabase(Context openruntimes.Context, dbs databases.Databases, d ) } - doesCollExist := DoesCollectionExist(dbs, dbId, collId) + doesCollExist := DoesTableExist(dbs, dbId, collId) if !doesCollExist { - dbs.CreateCollection( + dbs.CreateTable( dbId, collId, "URLs", - dbs.WithCreateCollectionPermissions([]string{permission.Read("any")}), + dbs.WithCreateTablePermissions([]string{permission.Read("any")}), ) } - doesAttribExist := DoesAttributeExist(dbs, dbId, collId, "longUrl") + doesAttribExist := DoesColumnExist(dbs, dbId, collId, "longUrl") if !doesAttribExist { - dbs.CreateUrlAttribute( + dbs.CreateUrlColumn( dbId, collId, "longUrl", true, - dbs.WithCreateUrlAttributeArray(false), + dbs.WithCreateUrlColumnArray(false), ) } } @@ -276,12 +276,12 @@ func Main(Context openruntimes.Context) openruntimes.Response { appwrite.WithKey(Context.Req.Headers["x-appwrite-key"]), ) - databases := appwrite.NewDatabases(client) + tablesDB := appwrite.NewTablesDB(client) dbId := "urlDatabase" - collId := "urlCollection" + collId := "urlTable" - services.InitialiseDatabase(Context, *databases, dbId, collId) + services.InitialiseDatabase(Context, *tablesDB, dbId, collId) if Context.Req.Method == "POST" { var requestBody RequestBody @@ -294,7 +294,7 @@ func Main(Context openruntimes.Context) openruntimes.Response { }, Context.Res.WithStatusCode(400)) } - _, err = databases.CreateDocument( + _, err = tablesDB.CreateRow( dbId, collId, requestBody.ShortId, @@ -326,7 +326,7 @@ func Main(Context openruntimes.Context) openruntimes.Response { shortId := path[1:] - document, err := databases.GetDocument(dbId, collId, shortId) + row, err := tablesDB.GetRow(dbId, collId, shortId) if err != nil { Context.Error(err) @@ -334,7 +334,7 @@ func Main(Context openruntimes.Context) openruntimes.Response { } var responseBody ResponseBody - document.Decode(&responseBody) + row.Decode(&responseBody) return Context.Res.Redirect(responseBody.LongUrl, Context.Res.WithStatusCode(302)) } @@ -345,7 +345,7 @@ func Main(Context openruntimes.Context) openruntimes.Response { You can then deploy this function using the `appwrite deploy function` command. -After deployment, go to the Settings tab on the Function page in your Appwrite project and enable the following scopes for the dynamic API key: `databases.read`, `databases.write`, `collections.read`, `collections.write`, `attributes.read`, `attributes.write`, `documents.read`, `documents.write`, +After deployment, go to the Settings tab on the Function page in your Appwrite project and enable the following scopes for the dynamic API key: `databases.read`, `databases.write`, `tables.read`, `tables.write`, `columns.read`, `columns.write`, `rows.read`, `rows.write`, # More resources diff --git a/src/routes/blog/post/add-a-search-function-to-your-app/+page.markdoc b/src/routes/blog/post/add-a-search-function-to-your-app/+page.markdoc index a68bc9264d..5037708898 100644 --- a/src/routes/blog/post/add-a-search-function-to-your-app/+page.markdoc +++ b/src/routes/blog/post/add-a-search-function-to-your-app/+page.markdoc @@ -16,14 +16,14 @@ One such integration you can implement using Appwrite Functions is **Searching** # Setting up the Template -Meilisearch is a flexible and powerful user-focused search engine that can be added to any website or application. The purpose of this function template is to sync documents in an Appwrite database collection to a Meilisearch index. Using this function template, users can explore, search, and retrieve information from the connected database collection. Through this template, documents from the Appwrite collection are systematically indexed within Meilisearch. +Meilisearch is a flexible and powerful user-focused search engine that can be added to any website or application. The purpose of this function template is to sync rows in an Appwrite database table to a Meilisearch index. Using this function template, users can explore, search, and retrieve information from the connected database table. Through this template, rows from the Appwrite table are systematically indexed within Meilisearch. To use the function, you need the following set of keys: - `APPWRITE_KEY` - API Key to talk to Appwrite backend APIs.To generate API Keys you can follow the documentation [here](https://appwrite.io/docs/getting-started-for-server#apiKey) - `APPWRITE_ENDPOINT` - To get the Appwrite endpoint, you need to go to [Appwrite](https://cloud.appwrite.io/) and find it under “Settings” -- `APPWRITE_DATABASE_ID` - The ID of the Appwrite database that contains the collection to sync. You can find the documentation [here](https://appwrite.io/docs/databases). -- `APPWRITE_COLLECTION_ID` - The ID of the collection in the Appwrite database to sync. +- `APPWRITE_DATABASE_ID` - The ID of the Appwrite database that contains the table to sync. You can find the documentation [here](https://appwrite.io/docs/databases). +- `APPWRITE_TABLE_ID` - The ID of the table in the Appwrite database to sync. To use Meilisearch, you can either self-host it using the command 👇 @@ -31,7 +31,7 @@ To use Meilisearch, you can either self-host it using the command 👇 curl -L [https://install.meilisearch.com](https://install.meilisearch.com/) | sh ``` -Or use [Meilisearch Cloud](https://www.meilisearch.com/cloud). For this example, we will assume that you are using Meilisearch Cloud. +Or use [Meilisearch Cloud](https://www.meilisearch.com/cloud). For this example, we will assume that you are using Meilisearch Cloud. Here’s the keys you need: @@ -40,7 +40,7 @@ Here’s the keys you need: ![Overview Meilisearch](/images/blog/add-a-search-function-to-your-app/functions.png) -- `MEILISEARCH_ADMIN_API_KEY` - This is the admin API key for Meilisearch. You will find it in the Meilisearch Console under “API Key”. +- `MEILISEARCH_ADMIN_API_KEY` - This is the admin API key for Meilisearch. You will find it in the Meilisearch Console under “API Key”. - `MEILISEARCH_SEARCH_API_KEY` - This is the API Key for Meilisearch search operations. To get this, you need you create a new index from the Meilisearch Console. Once created you will find it under `Overview` as `Default Search API Key` @@ -50,12 +50,12 @@ Here’s the keys you need: ![Keys](/images/blog/add-a-search-function-to-your-app/connect.png) -- `MEILISEARCH_INDEX_NAME` - Name of the Meilisearch index to which the documents will be synchronized. For e.g, in the above picture, the Index name is `Newindex`. You can also find it under `Settings` as `Index Name`. - +- `MEILISEARCH_INDEX_NAME` - Name of the Meilisearch index to which the rows will be synchronized. For e.g, in the above picture, the Index name is `Newindex`. You can also find it under `Settings` as `Index Name`. + ## Preparing the Function -The function template syncs documents in an Appwrite database collection to a Meilisearch index. It should get you up and running, but you will need to add real data to build a useful search index. +The function template syncs rows in an Appwrite database table to a Meilisearch index. It should get you up and running, but you will need to add real data to build a useful search index. If you want to see the source code, you can find it on our [templates GitHub repository](https://github.com/appwrite/templates/tree/main/node/sync-with-meilisearch). Now, let’s navigate to our functions page on **[Appwrite](https://cloud.appwrite.io/)**. From there, we will select the **Templates** tab, search for and select the **Sync with Meilisearch** function template. @@ -63,7 +63,7 @@ If you want to see the source code, you can find it on our [templates GitHub re ![templates](/images/blog/add-a-search-function-to-your-app/templates.png) -The function requires `APPWRITE_API_KEY`, `APPWRITE_DATABASE_ID`, `APPWRITE_COLLECTION_ID` , `MEILISEARCH_ENDPOINT`, `MEILISEARCH_ADMIN_API_KEY`, `MEILISEARCH_SEARCH_API_KEY`, `MEILISEARCH_INDEX_NAME`. Once you have added them you can proceed to the Connect step. +The function requires `APPWRITE_API_KEY`, `APPWRITE_DATABASE_ID`, `APPWRITE_TABLE_ID` , `MEILISEARCH_ENDPOINT`, `MEILISEARCH_ADMIN_API_KEY`, `MEILISEARCH_SEARCH_API_KEY`, `MEILISEARCH_INDEX_NAME`. Once you have added them you can proceed to the Connect step. Select **Create a new repository** (this will generate a GitHub repository for you with the function), and leave the production branch and root settings as default to create this function. @@ -84,13 +84,13 @@ Visit the **Domains** tab on the function page and copy the domain URL to test We’ve added search functionality to our app and opened up many possibilities to improve the experience of our app’s users. How can the *template* be extended ? -- Using events to automatically index new collections +- Using events to automatically index new tables - Using weights and other meilisearch features to optimise search such as excluding certain fields from indexing Some examples are: 1. **Real-time Data Exploration:** It can be used to provide real-time search capabilities for datasets and data streams, allowing users to explore and analyze data in real-time. -2. **Content Management Systems:** The function template can be integrated into content management systems (CMS) to facilitate efficient content retrieval for editors and site visitors. +2. **Content Management Systems:** The function template can be integrated into content management systems (CMS) to facilitate efficient content retrieval for editors and site visitors. Be sure to check out the other available Function Templates. We’ve created multiple that could be of use in your projects. You can find the [templates GitHub repository here](https://github.com/appwrite/templates). diff --git a/src/routes/blog/post/ai-crystal-ball/+page.markdoc b/src/routes/blog/post/ai-crystal-ball/+page.markdoc index 3e87af1f64..65423b8082 100644 --- a/src/routes/blog/post/ai-crystal-ball/+page.markdoc +++ b/src/routes/blog/post/ai-crystal-ball/+page.markdoc @@ -20,7 +20,7 @@ In order to build this application, we have a few prerequisites. We must set up - OpenAI API key - GitHub OAuth app - Appwrite OAuth adapter for GitHub -- Appwrite collections to store GitHub data and destinies +- Appwrite tables to store GitHub data and destinies ## OpenAI @@ -59,11 +59,11 @@ To implement GitHub OAuth, we must visit the **Auth** page on the Appwrite proje ## Appwrite Database -We must create a database with the ID `crystalball` and two collections with the IDs `githubData` and `destiny` in the Appwrite project with the following details: +We must create a database with the ID `crystalball` and two tables with the IDs `githubData` and `destiny` in the Appwrite project with the following details: -#### The `githubData` collection +#### The `githubData` table -Create the collection and add the following attributes: +Create the table and add the following columns: | Key | Type | Size | Required | Array | | --- | --- | --- | --- | --- | @@ -72,22 +72,22 @@ Create the collection and add the following attributes: | following | Integer | - | Yes | - | | username | String | 255 | Yes | - | -Visit the collection settings, enable **Document security,** and set the following (collection-level) **Permissions**: +Visit the table settings, enable **Row security,** and set the following (table-level) **Permissions**: | Role | Create | Read | Update | Delete | | --- | --- | --- | --- | --- | | Users | Yes | - | - | - | -#### The `destiny` collection +#### The `destiny` table -Create the collection and add the following attributes: +Create the table and add the following columns: | Key | Type | Size | Required | | --- | --- | --- | --- | | destiny | String | 25000 | Yes | | username | String | 255 | Yes | -Visit the collection settings, enable **Document security,** and set the following (collection-level) **Permissions**: +Visit the table settings, enable **Row security,** and set the following (table-level) **Permissions**: | Role | Create | Read | Update | Delete | | --- | --- | --- | --- | --- | @@ -123,15 +123,15 @@ Lastly, we must create a `.env` file at the root of the directory and add the fo PUBLIC_APPWRITE_ENDPOINT= PUBLIC_APPWRITE_PROJECT_ID= PUBLIC_APPWRITE_DATABASE_ID= -PUBLIC_APPWRITE_COLLECTION_ID_GITHUBDATA= -PUBLIC_APPWRITE_COLLECTION_ID_DESTINY= +PUBLIC_APPWRITE_TABLE_ID_GITHUBDATA= +PUBLIC_APPWRITE_TABLE_ID_DESTINY= SECRET_OPENAI_API_KEY= ``` After the environment variables are created, we can set up the Appwrite SDK by creating a file `./src/lib/appwrite.js` and adding the following: ```js -import { Client, Account, Databases, OAuthProvider } from 'appwrite'; +import { Client, Account, TablesDB, OAuthProvider } from 'appwrite'; import { env } from '$env/dynamic/public'; const client = new Client() @@ -139,7 +139,7 @@ const client = new Client() .setProject(env.PUBLIC_APPWRITE_PROJECT_ID); export const account = new Account(client); -export const databases = new Databases(client); +export const tablesDB = new TablesDB(client); export { OAuthProvider }; ``` @@ -272,31 +272,31 @@ At this point, we also want to create our Database library using the Appwrite SD ```js import { Permission, Role, ID } from 'appwrite'; -import { databases } from './appwrite'; +import { tablesDB } from './appwrite'; import { env } from '$env/dynamic/public'; const databaseId = env.PUBLIC_APPWRITE_DATABASE_ID; -const githubDataCollectionId = env.PUBLIC_APPWRITE_COLLECTION_ID_GITHUBDATA; +const githubDataTableId = env.PUBLIC_APPWRITE_TABLE_ID_GITHUBDATA; export const db = { - getUserData: async(documentId) => { + getUserData: async(rowId) => { try{ - return await databases.getDocument({ + return await tablesDB.getRow( databaseId, - collectionId: githubDataCollectionId, - documentId - }); + githubDataTableId, + rowId + ); } catch(err){ return false; } }, addUserData: async(userId, username, followers, following, languages) => { - return await databases.createDocument({ + return await tablesDB.createRow( databaseId, - collectionId: githubDataCollectionId, - documentId: userId, - data: { + githubDataTableId, + userId, + { username, followers, following, @@ -504,31 +504,31 @@ Lastly, to share our destiny with the rest of the world, we must create an addit . . . -const destinyCollectionId = env.PUBLIC_APPWRITE_COLLECTION_ID_DESTINY; +const destinyTableId = env.PUBLIC_APPWRITE_TABLE_ID_DESTINY; export const db = {** . . . addDestiny: async(username, destiny) => { - return await databases.createDocument({ + return await tablesDB.createRow( databaseId, - collectionId: destinyCollectionId, - documentId: ID.unique(), - data: { + destinyTableId, + ID.unique(), + { username, destiny } }) }, - getDestiny: async(documentId) => { + getDestiny: async(rowId) => { try{ - return await databases.getDocument({ + return await tablesDB.getRow( databaseId, - collectionId: destinyCollectionId, - documentId - }); + destinyTableId, + rowId + ); } catch(err){ return { username: 'Not found', diff --git a/src/routes/blog/post/announcing-atomic-numeric-operations/+page.markdoc b/src/routes/blog/post/announcing-atomic-numeric-operations/+page.markdoc index 85aa375938..94ecfe1f4e 100644 --- a/src/routes/blog/post/announcing-atomic-numeric-operations/+page.markdoc +++ b/src/routes/blog/post/announcing-atomic-numeric-operations/+page.markdoc @@ -1,7 +1,7 @@ --- layout: post title: "Announcing Atomic numeric operations: Safe, server-side increments and decrements" -description: Safely update numeric fields like counters, stock levels, or credits without fetching or rewriting the document. +description: Safely update numeric fields like counters, stock levels, or credits without fetching or rewriting the row. date: 2025-08-04 cover: /images/blog/announcing-atomic-numeric-operations/cover.png timeToRead: 5 @@ -10,20 +10,19 @@ category: announcement featured: false --- -In high-concurrency systems like social apps, games, and usage-tracked services, even updating a single number such as a like, retry count, or quota, can lead to consistency issues. When multiple clients try to update the same value simultaneously, it’s easy to end up with conflicting writes, lost updates, or inaccurate data. +In high-concurrency systems like social apps, games, and usage-tracked services, even updating a single number such as a like, retry count, or quota, can lead to consistency issues. When multiple clients try to update the same value simultaneously, it’s easy to end up with conflicting writes, lost updates, or inaccurate data. -Most setups require you to fetch the document, change the number on the client, and then write it back. This process is slow, error-prone, and wastes bandwidth, especially when you're only trying to change a single field. +Most setups require you to fetch the row, change the number on the client, and then write it back. This process is slow, error-prone, and wastes bandwidth, especially when you're only trying to change a single field. To change this, we introduce **Atomic numeric operations** in Appwrite. -A new feature that lets you increment or decrement numeric fields directly on the server, without fetching the full document. It’s fast, safe, bandwidth-efficient, and concurrency-friendly. +A new feature that lets you increment or decrement numeric fields directly on the server, without fetching the full row. It's fast, safe, bandwidth-efficient, and concurrency-friendly. # Race-free numeric updates +Before this feature, updating a number meant fetching the entire row, modifying it on the client, and writing it back, a process prone to race conditions, unnecessary bandwidth use, and extra logic to handle edge cases. -Before this feature, updating a number meant fetching the entire document, modifying it on the client, and writing it back, a process prone to race conditions, unnecessary bandwidth use, and extra logic to handle edge cases. - -With **Atomic numeric operations,** you simply send a delta (like `+1` or `-3`), and Appwrite applies the update atomically on the server. No full document reads, no conflicts, no custom logic. Just consistent, permission-aware updates that work reliably under load. +With **Atomic numeric operations,** you simply send a delta (like `+1` or `-3`), and Appwrite applies the update atomically on the server. No full row reads, no conflicts, no custom logic. Just consistent, permission-aware updates that work reliably under load. # Built for real-time, multi-user systems @@ -46,17 +45,17 @@ Use the `incrementDocumentAttribute` and `decrementDocumentAttribute` methods to ## Increment a field {% #increment-field %} ```client-web -import { Client, Databases } from "appwrite"; +import { Client, TablesDB } from "appwrite"; const client = new Client() .setEndpoint('https://.cloud.appwrite.io/v1') // Your API Endpoint .setProject(''); // Your project ID -const databases = new Databases(client); +const tablesDB = new TablesDB(client); -const result = await databases.incrementDocumentAttribute( +const result = await tablesDB.incrementDocumentAttribute( '', - '', + '', '', 'likes', // attribute 1 // value @@ -66,17 +65,17 @@ const result = await databases.incrementDocumentAttribute( ## Decrement a field {% #decrement-field %} ```client-web -import { Client, Databases } from "appwrite"; +import { Client, TablesDB } from "appwrite"; const client = new Client() .setEndpoint('https://.cloud.appwrite.io/v1') // Your API Endpoint .setProject(''); // Your project ID -const databases = new Databases(client); +const tablesDB = new TablesDB(client); -const result = await databases.decrementDocumentAttribute( +const result = await tablesDB.decrementDocumentAttribute( '', - '', + '', '', 'credits', // attribute 5 // value @@ -87,17 +86,17 @@ const result = await databases.decrementDocumentAttribute( This feature solves a common problem with a clean, built-in approach. You don’t need to write custom logic to handle concurrency, retries, or limits. It’s a simple API call that replaces a lot of complex edge-case handling. And it just works. -- **Atomic by default:** Every delta is applied in a single server-side write. The document is locked during the update, so there’s no room for race conditions or overlapping writes, even under heavy concurrency. +- **Atomic by default:** Every delta is applied in a single server-side write. The row is locked during the update, so there's no room for race conditions or overlapping writes, even under heavy concurrency. - **Supports both increments and decrements:** You're not limited to just adding `+1`. You can apply any positive or negative delta, whether you're increasing API credits or reducing stock levels after a purchase. - **Built-in constraints:** You can define optional `min` and `max` bounds on the value. If the update would push the value outside that range, it’s rejected. Great for enforcing limits like “stock can’t go below zero” or “credits can't exceed a cap.” -- **Respects permissions:** This works just like any other Appwrite document update. If the user doesn’t have permission to modify the document, the update doesn’t go through. No exceptions. +- **Respects permissions:** This works just like any other Appwrite row update. If the user doesn't have permission to modify the row, the update doesn't go through. No exceptions. Atomic numeric operations are live for both **Appwrite Cloud** and **Self-Hosted** environments. -This is a core building block for modern, concurrent-safe applications and it’s now built into Appwrite’s document system. +This is a core building block for modern, concurrent-safe applications and it's now built into Appwrite's row system. # More resources diff --git a/src/routes/blog/post/announcing-auto-increment-support/+page.markdoc b/src/routes/blog/post/announcing-auto-increment-support/+page.markdoc index 2b9106c339..7ec06f2e61 100644 --- a/src/routes/blog/post/announcing-auto-increment-support/+page.markdoc +++ b/src/routes/blog/post/announcing-auto-increment-support/+page.markdoc @@ -1,7 +1,7 @@ --- layout: post -title: "Announcing Auto-increment support: Built-in numeric sequencing for your documents" -description: Get reliable, sequential ordering across your collections with fast, indexed auto-increment IDs. +title: "Announcing Auto-increment support: Built-in numeric sequencing for your rows" +description: Get reliable, sequential ordering across your tables with fast, indexed auto-increment IDs. date: 2025-07-15 cover: /images/blog/announcing-auto-increment-support/cover.png timeToRead: 5 @@ -14,11 +14,11 @@ Managing ordered data can often be complex and error-prone, especially when it r To tackle this issue, we're introducing **Auto-increment support.** -This new feature automatically handles a `$sequence` column within your documents, incrementing reliably with each new insertion. This ensures your data remains ordered and clear without additional manual overhead. +This new feature automatically handles a `$sequence` column within your rows, incrementing reliably with each new insertion. This ensures your data remains ordered and clear without additional manual overhead. # Automatic, predictable ordering -Previously, maintaining consistent insertion order and generating numeric identifiers required either manual increments, complex logic, or dependence on timestamps, which weren't always accurate or reliable. Appwrite’s new Auto-increment support feature solves these issues seamlessly, providing a built-in numeric identifier that increases predictably with every new document added. +Previously, maintaining consistent insertion order and generating numeric identifiers required either manual increments, complex logic, or dependence on timestamps, which weren't always accurate or reliable. Appwrite’s new Auto-increment support feature solves these issues seamlessly, providing a built-in numeric identifier that increases predictably with every new row added. Whether you're creating paginated data sets, managing invoice numbers, logging activities, or building real-time feeds, Appwrite's Auto-increment support feature offers effortless numeric sequencing. This means less manual work, fewer bugs, and significantly improved reliability. @@ -37,22 +37,22 @@ Integrating Auto-increment support into your Appwrite Databases makes your backe # How it works -For numeric ordering based on insertion order, you can use the `$sequence` field, which Appwrite automatically adds to all documents. This field increments with each new insert. +For numeric ordering based on insertion order, you can use the `$sequence` field, which Appwrite automatically adds to all rows. This field increments with each new insert. ```client-web -import { Client, Databases, Query } from "appwrite"; +import { Client, TablesDB, Query } from "appwrite"; const client = new Client() .setEndpoint('https://.cloud.appwrite.io/v1') .setProject(''); -const databases = new Databases(client); +const tablesDB = new TablesDB(client); -databases.listDocuments({ - databaseId: '', - collectionId: '', - queries: [ +tablesDB.listRows( + '', + '', + [ Query.orderAsc('$sequence'), ] }); diff --git a/src/routes/blog/post/announcing-bulk-api/+page.markdoc b/src/routes/blog/post/announcing-bulk-api/+page.markdoc index 25f6f9800a..f969f8ecdc 100644 --- a/src/routes/blog/post/announcing-bulk-api/+page.markdoc +++ b/src/routes/blog/post/announcing-bulk-api/+page.markdoc @@ -14,9 +14,9 @@ We're excited to introduce another Appwrite Databases feature, **Bulk API**. Exp # Faster development with bulk actions -Previously, writing or modifying large amounts of data in Appwrite Databases required sending one request per document. This method was inefficient, slow, and resource-intensive, especially when dealing with thousands of records. +Previously, writing or modifying large amounts of data in Appwrite Databases required sending one request per row. This method was inefficient, slow, and resource-intensive, especially when dealing with thousands of records. -With the new Bulk API, you can create, update, or delete multiple documents in one go, vastly speeding up your workflows and reducing network overhead. +With the new Bulk API, you can create, update, or delete multiple rows in one go, vastly speeding up your workflows and reducing network overhead. # Optimized for server-side workloads @@ -36,12 +36,12 @@ Bulk operations can only be performed via the server-side SDKs. The client-side Utilizing the Bulk API is straightforward. You can use it to: -- Create multiple documents in a single request using the `createDocuments` method -- Update multiple documents in a single request using the `updateDocuments` method -- Delete multiple documents in a single request using the `deleteDocuments` method -- Upsert multiple documents in a single request using the `upsertDocuments` method +- Create multiple rows in a single request using the `createRows` method +- Update multiple rows in a single request using the `updateRows` method +- Delete multiple rows in a single request using the `deleteRows` method +- Upsert multiple rows in a single request using the `upsertRows` method -Here is a code example for creating multiple documents in a single request: +Here is a code example for creating multiple rows in a single request: ```server-nodejs @@ -52,19 +52,19 @@ const client = new sdk.Client() .setProject('') .setKey(''); -const databases = new sdk.Databases(client); +const tablesDB = new sdk.TablesDB(client); -const result = await databases.createDocuments( +const result = await tablesDB.createRows( '', - '', + '', [ { $id: sdk.ID.unique(), - name: 'Document 1', + name: 'Row 1', }, { $id: sdk.ID.unique(), - name: 'Document 2', + name: 'Row 2', } ] ); diff --git a/src/routes/blog/post/announcing-csv-imports/+page.markdoc b/src/routes/blog/post/announcing-csv-imports/+page.markdoc index c6651e2b09..c4f8e1561f 100644 --- a/src/routes/blog/post/announcing-csv-imports/+page.markdoc +++ b/src/routes/blog/post/announcing-csv-imports/+page.markdoc @@ -1,7 +1,7 @@ --- layout: post title: "Announcing CSV Import: Bring in large datasets to Appwrite with ease" -description: Learn how to import documents into your Appwrite collections using a simple CSV file, a new feature built on top of Appwrite's migration APIs. +description: Learn how to import rows into your Appwrite tables using a simple CSV file, a new feature built on top of Appwrite's migration APIs. date: 2025-07-01 # update this cover later, once available! cover: /images/blog/announcing-csv-imports/cover.png @@ -11,28 +11,28 @@ category: announcement featured: false --- -We're introducing a new way to populate your Appwrite databases: **document imports from CSV files**. +We're introducing a new way to populate your Appwrite databases: **row imports from CSV files**. -Built on top of Appwrite's migration APIs, this feature makes it easy to bring in large datasets, seed collections, or migrate structured data using only a CSV file. +Built on top of Appwrite's migration APIs, this feature makes it easy to bring in large datasets, seed tables, or migrate structured data using only a CSV file. -The CSV document import is useful for migrating user data from external systems, importing inventory records, seeding test environments, or onboarding structured content such as FAQs. +The CSV row import is useful for migrating user data from external systems, importing inventory records, seeding test environments, or onboarding structured content such as FAQs. # How it works -To get started, create a collection and define its attributes in the Appwrite Console. Your CSV file should follow a standard format: +To get started, create a table and define its columns in the Appwrite Console. Your CSV file should follow a standard format: -- The first row must be a header containing attribute names that match your collection -- Each subsequent row represents a document, with values separated by commas +- The first row must be a header containing column names that match your table +- Each subsequent row represents a row, with values separated by commas {% info title="Good to know" %} -You can optionally include the `$id` column to assign custom document IDs. +You can optionally include the `$id` column to assign custom row IDs. {% /info %} -![Collections screen](/images/blog/announcing-csv-imports/csv-import.png) +![Tables screen](/images/blog/announcing-csv-imports/csv-import.png) -All required attributes must be present in the CSV, and Appwrite will validate each row before importing it. +All required columns must be present in the CSV, and Appwrite will validate each row before importing it. -For example, if your collection contains attributes like `title`, `author`, `year`, and `available`, a valid CSV file would look like this: +For example, if your table contains columns like `title`, `author`, `year`, and `available`, a valid CSV file would look like this: ```text $id,title,author,year,available @@ -47,7 +47,7 @@ v42cj0quxp,Pride and Prejudice,Jane Austen,1813,true ## Uploading your CSV file You can upload a new file during import or select an existing one from your project's storage bucket. -The Console provides a guided interface to help you select the CSV and link it to your target collection. Once uploaded, the import process begins immediately. +The Console provides a guided interface to help you select the CSV and link it to your target table. Once uploaded, the import process begins immediately. ## Designed for scale diff --git a/src/routes/blog/post/announcing-database-reads-and-writes-pricing/+page.markdoc b/src/routes/blog/post/announcing-database-reads-and-writes-pricing/+page.markdoc index 2abdf79f04..d4ab2b896f 100644 --- a/src/routes/blog/post/announcing-database-reads-and-writes-pricing/+page.markdoc +++ b/src/routes/blog/post/announcing-database-reads-and-writes-pricing/+page.markdoc @@ -40,16 +40,16 @@ Database operations in Appwrite are categorized into two types: ## Read operations Any action that retrieves data from your database, including: -- Fetching documents with `getDocument` or `listDocuments`. +- Fetching rows with `getRow` or `listRows`. ## Write operations Any action that modifies data in your database, including: -- Creating documents with `createDocument`. -- Updating documents with `updateDocument`. -- Deleting documents with `deleteDocument`. +- Creating rows with `createRow`. +- Updating rows with `updateRow`. +- Deleting rows with `deleteRow`. -Most operations are counted based on the number of documents affected. For example, if you fetch a collection of 50 documents with a single API call, this counts as 50 read operations, not as a single operation. However, if your query returns no documents, it will count as a single operation. +Most operations are counted based on the number of rows affected. For example, if you fetch a table of 50 rows with a single API call, this counts as 50 read operations, not as a single operation. However, if your query returns no rows, it will count as a single operation. # Your usage diff --git a/src/routes/blog/post/announcing-database-upsert/+page.markdoc b/src/routes/blog/post/announcing-database-upsert/+page.markdoc index c40714ea7e..7743a8877f 100644 --- a/src/routes/blog/post/announcing-database-upsert/+page.markdoc +++ b/src/routes/blog/post/announcing-database-upsert/+page.markdoc @@ -1,7 +1,7 @@ --- layout: post title: "Announcing Database Upsert: Simplify your database interactions" -description: A cleaner, faster, and atomic way to manage your documents in Appwrite. +description: A cleaner, faster, and atomic way to manage your rows in Appwrite. date: 2025-07-08 cover: /images/blog/announcing-database-upsert/cover.png timeToRead: 5 @@ -10,13 +10,13 @@ category: announcement featured: false --- -Working with databases often involves small but repetitive decisions like checking if a document exists, choosing between creating or updating, handling errors that come from guessing wrong. These steps are not difficult on their own, but over time they add complexity to your code and friction to your workflow. +Working with databases often involves small but repetitive decisions like checking if a row exists, choosing between creating or updating, handling errors that come from guessing wrong. These steps are not difficult on their own, but over time they add complexity to your code and friction to your workflow. To simplify this, we introduce Database Upsert in Appwrite. # How it works -Upsert allows you to create or update a document using a single API call. If the document does not exist, it is created. If it does, it is updated. You no longer need to write separate logic to check for existence or handle 404 responses. The server handles that for you. +Upsert allows you to create or update a row using a single API call. If the row does not exist, it is created. If it does, it is updated. You no longer need to write separate logic to check for existence or handle 404 responses. The server handles that for you. This change removes the need for client-side conditionals, reduces the number of requests between your app and the database, and helps avoid potential race conditions. It is a small shift in how you interact with the database, but one that can make your code cleaner and your application logic easier to follow. @@ -36,18 +36,18 @@ This brings you immediate benefits such as: Implementing Upsert is straightforward and intuitive: ```javascript -import { Client, Databases } from "appwrite"; +import { Client, TablesDB } from "appwrite"; const client = new Client() .setEndpoint('https://.cloud.appwrite.io/v1') .setProject(''); -const databases = new Databases(client); +const tablesDB = new TablesDB(client); -const result = await databases.upsertDocument( +const result = await tablesDB.upsertRow( '', - '', - '', + '', + '', { 'status': 'succeeded', 'amount': 4999, @@ -64,7 +64,7 @@ This feature simplifies your database interactions, enhancing efficiency, reduci # More resources -- [Read the documentation to get started](/docs/products/databases/documents) +- [Read the documentation to get started](/docs/products/databases/rows) - [Announcing Bulk API: Handle heavy data workloads with ease](/blog/post/announcing-bulk-api) - [Build a personal CRM with SvelteKit and Appwrite Databases](/blog/post/build-personal-crm-sveltekit) -- [Announcing: Document imports from CSV files](/blog/post/announcing-csv-imports) +- [Announcing: Row imports from CSV files](/blog/post/announcing-csv-imports) diff --git a/src/routes/blog/post/announcing-encrypted-string-attributes/+page.markdoc b/src/routes/blog/post/announcing-encrypted-string-attributes/+page.markdoc index fad4cbbaa4..e53b3b8863 100644 --- a/src/routes/blog/post/announcing-encrypted-string-attributes/+page.markdoc +++ b/src/routes/blog/post/announcing-encrypted-string-attributes/+page.markdoc @@ -1,6 +1,6 @@ --- layout: post -title: "Announcing Encrypted string attribute support: Built-in encryption for sensitive fields" +title: "Announcing Encrypted string column support: Built-in encryption for sensitive fields" description: Easily encrypt sensitive string fields at rest, with no manual encryption logic. date: 2025-07-10 cover: /images/blog/announcing-encrypted-string-attributes/cover.png @@ -9,9 +9,9 @@ author: jake-barnby category: announcement featured: false --- -Appwrite is secure by default. We build every single product and feature with the highest regard for security. +Appwrite is secure by default. We build every single product and feature with the highest regard for security. -With this in mind, we introduce **Encrypted string attribute support** for Appwrite Databases, a new feature to enhance your databases’ security. +With this in mind, we introduce **Encrypted string column support** for Appwrite Databases, a new feature to enhance your databases’ security. This critical addition lets you store sensitive data securely, encrypted at rest, directly within your databases. @@ -19,11 +19,11 @@ This critical addition lets you store sensitive data securely, encrypted at rest Previously, storing confidential or sensitive data required manual encryption and decryption. This not only increased complexity but also introduced potential security risks from implementation errors. -With Encrypted string attribute support, Appwrite handles encryption transparently using industry-standard AES-GCM encryption. Your data remains secure, encrypted server-side, without additional effort or complex custom code. Additionally, Appwrite ensures compliance with common security standards, including support for environments requiring FIPS compliance. +With Encrypted string column support, Appwrite handles encryption transparently using industry-standard AES-GCM encryption. Your data remains secure, encrypted server-side, without additional effort or complex custom code. Additionally, Appwrite ensures compliance with common security standards, including support for environments requiring FIPS compliance. # Serving security-focused industries -It is essential for creating secure apps in fintech, healthcare, messaging, and other sectors where security and compliance are paramount. It ensures data protection out of the box, freeing you from manually managing encryption keys. +It is essential for creating secure apps in fintech, healthcare, messaging, and other sectors where security and compliance are paramount. It ensures data protection out of the box, freeing you from manually managing encryption keys. Key features entail: @@ -34,9 +34,9 @@ Key features entail: # Setting the security standard -Appwrite delivers comprehensive, secure, easy-to-use encryption that is seamlessly integrated into your workflow. When a string attribute is marked as encrypted, Appwrite applies AES-128 encryption in Galois/Counter Mode (GCM) before writing it to the database. +Appwrite delivers comprehensive, secure, easy-to-use encryption that is seamlessly integrated into your workflow. When a string column is marked as encrypted, Appwrite applies AES-128 encryption in Galois/Counter Mode (GCM) before writing it to the database. -This new feature is available on Appwrite Cloud Pro and Scale plan, and self-hosted. Encrypted string attribute support significantly enhances your application's security posture, making Appwrite a trusted choice for handling sensitive data. +This new feature is available on Appwrite Cloud Pro and Scale plan, and self-hosted. Encrypted string column support significantly enhances your application's security posture, making Appwrite a trusted choice for handling sensitive data. # More resources diff --git a/src/routes/blog/post/announcing-go-support/+page.markdoc b/src/routes/blog/post/announcing-go-support/+page.markdoc index e3a66a94e2..bfc29fa6b8 100644 --- a/src/routes/blog/post/announcing-go-support/+page.markdoc +++ b/src/routes/blog/post/announcing-go-support/+page.markdoc @@ -18,7 +18,7 @@ Let’s look at the benefits of Go runtime and SDK, and how they can help you bu # What makes Go special? -Go (or Golang) is a statically typed compiled programming language that balances the ease of use like Python and JavaScript and the performance of compiled languages like Rust. +Go (or Golang) is a statically typed compiled programming language that balances the ease of use like Python and JavaScript and the performance of compiled languages like Rust. It was developed by Google in 2012, and has since grown a large developer community online, with companies like Netflix, Paypal, Riot Games and more moving to Go due to its speed and developer accessibility. There are a few reasons why Go is popular with devs and enterprise companies alike: @@ -188,7 +188,7 @@ package handler import ( "os" - + "github.com/open-runtimes/types-for-go/v4/openruntimes" "github.com/appwrite/sdk-for-go/appwrite" ) @@ -211,12 +211,12 @@ func Main(Context openruntimes.Context) openruntimes.Response { for _, bucket := range response.Buckets { Context.Log("Bucket Name:", bucket.Name) } - + return Context.Res.Json(response.Buckets) } ``` -Some of our API endpoints such as the ones that return preferences from the Accounts and Teams APIs as well as the ones the return documents from the Appwrite Database are loosely typed because they can be customised by user. For these, the structs offer a `Decode()` method, as seen in the following example. +Some of our API endpoints such as the ones that return preferences from the Accounts and Teams APIs as well as the ones the return rows from the Appwrite Database are loosely typed because they can be customised by user. For these, the structs offer a `Decode()` method, as seen in the following example. ```go package handler @@ -230,14 +230,14 @@ import ( ) type Profile struct { - *models.Document + *models.Row Name string `json:"name"` Verified bool `json:"verified"` } type ProfileList struct { - *models.DocumentList - Documents []Profile `json:"documents"` + *models.RowList + Rows []Profile `json:"rows"` } func Main(Context openruntimes.Context) openruntimes.Response { @@ -247,9 +247,9 @@ func Main(Context openruntimes.Context) openruntimes.Response { appwrite.WithKey(Context.Req.Headers["x-appwrite-key"]), ) - databases := appwrite.NewDatabases(client) + tablesDB := appwrite.NewTablesDB(client) - response, err := databases.ListDocuments("main", "profiles") + response, err := tablesDB.ListRows("main", "profiles") if err != nil { Context.Error(err) return Context.Res.Text("Internal error" Context.Res.WithStatusCode(500)) @@ -262,11 +262,11 @@ func Main(Context openruntimes.Context) openruntimes.Response { return Context.Res.Text("Internal error", Context.Res.WithStatusCode(500)) } - for _, profile := range profiles.Documents { + for _, profile := range profiles.Rows { Context.Log(profile.Id, profile.Name, profile.Verified) } - - return Context.Res.Json(profiles.Documents) + + return Context.Res.Json(profiles.Rows) } ``` @@ -285,4 +285,4 @@ Hopefully, by this point, you’re excited to start building with Go. Here are a Ready to get your hands on the new Go runtime and Go SDK? We’ve created a few resources to help you get your first Go project off the ground and leverage Golang’s speed and performance in your apps. Check them out: - [Join the Appwrite Community on Discord](https://appwrite.io/discord) -- [More about Init](https://file+.vscode-resource.vscode-cdn.net/Users/ebenezerdon/Documents/ed-repos/ed-technical-articles/appwrite/link_to_init) +- [More about Init](https://appwrite.io/init) diff --git a/src/routes/blog/post/announcing-roles-for-enhanced-collaboration-and-security/+page.markdoc b/src/routes/blog/post/announcing-roles-for-enhanced-collaboration-and-security/+page.markdoc index 3dd42ff76a..98534cce4e 100644 --- a/src/routes/blog/post/announcing-roles-for-enhanced-collaboration-and-security/+page.markdoc +++ b/src/routes/blog/post/announcing-roles-for-enhanced-collaboration-and-security/+page.markdoc @@ -10,11 +10,11 @@ category: product, announcement featured: false --- -We’re excited to announce a new feature available in the Pro and Scale plans: **Roles**. This enhancement is designed to bring granular permissions to the Appwrite Console, improving both team collaboration and security across your projects. +We’re excited to announce a new feature available in the Pro and Scale plans: **Roles**. This enhancement is designed to bring granular permissions to the Appwrite Console, improving both team collaboration and security across your projects. # The benefit of multiple roles -Previously, you could only invite team members to your organization with one role that gave equal access to all members, regardless of their function. Needless to say, this is not an ideal situation, especially as your team expands, bringing more complexities in managing permissions. With Roles, you can now assign specific permissions to each member of your team, ensuring everyone has the right level of access to their responsibilities. This update provides more control over who can access, modify, and manage different parts of your organization’s projects, making it a key addition to growing teams and larger projects. Managing access to your Appwrite Console has never been easier. +Previously, you could only invite team members to your organization with one role that gave equal access to all members, regardless of their function. Needless to say, this is not an ideal situation, especially as your team expands, bringing more complexities in managing permissions. With Roles, you can now assign specific permissions to each member of your team, ensuring everyone has the right level of access to their responsibilities. This update provides more control over who can access, modify, and manage different parts of your organization’s projects, making it a key addition to growing teams and larger projects. Managing access to your Appwrite Console has never been easier. # Overview of the new Roles in Appwrite @@ -24,17 +24,17 @@ Before this feature update, you only had one role: But now, alongside the existing owner role, we’ve added four new roles that you can assign to your team members: - **Developer**: Developers have access to all development resources but cannot manage the team or billing settings, making this ideal for members focusing solely on coding and technical tasks. -- **Editor**: Editors can modify most resources but cannot alter key backend elements such as collections, buckets, and topics. This is perfect for team members who handle content updates or file uploads without needing to adjust infrastructure. +- **Editor**: Editors can modify most resources but cannot alter key backend elements such as tables, buckets, and topics. This is perfect for team members who handle content updates or file uploads without needing to adjust infrastructure. - **Analyst**: Analysts have read-only access to all resources, ideal for team members who need to view data, analytics, or reports without making changes. - **Billing**: This role is strictly for billing-related actions, with access limited to billing details only, keeping financial data secure without touching other areas of your projects. # Invite more members for free -The new feature is now available on both the Pro and Scale plans. To celebrate the new feature, until the end of the year, you can invite members with the new roles to your Pro teams for free! This is the perfect opportunity to explore the new feature and see how it enhances your team's workflow. -Starting in the new year, member seats will be priced at $15 per month as part of our standard pricing. +The new feature is now available on both the Pro and Scale plans. To celebrate the new feature, until the end of the year, you can invite members with the new roles to your Pro teams for free! This is the perfect opportunity to explore the new feature and see how it enhances your team's workflow. +Starting in the new year, member seats will be priced at $15 per month as part of our standard pricing. # How to add new members with roles -To add new members with a specific Role to your organization, go to your organization overview and click `Invite` on the right. +To add new members with a specific Role to your organization, go to your organization overview and click `Invite` on the right. ![Console invite](/images/blog/new-roles/console2.png) diff --git a/src/routes/blog/post/announcing-type-generation-feature/+page.markdoc b/src/routes/blog/post/announcing-type-generation-feature/+page.markdoc index 88f52bc15e..b92eb7749f 100644 --- a/src/routes/blog/post/announcing-type-generation-feature/+page.markdoc +++ b/src/routes/blog/post/announcing-type-generation-feature/+page.markdoc @@ -1,7 +1,7 @@ --- layout: post title: "Introducing Type generation: Automate your type definitions with Appwrite" -description: "Automatically generate types from your collections with support for multiple languages." +description: "Automatically generate types from your tables with support for multiple languages." date: 2025-06-24 cover: /images/blog/type-generation-feature/cover.png timeToRead: 4 @@ -9,7 +9,7 @@ author: chirag-aggarwal category: announcement --- -We're excited to announce Appwrite’s newest CLI feature, **Type generation**. Designed specifically to enhance your developer experience. Type generation automates the creation of type definitions directly from your database collections, seamlessly integrating with your preferred programming language. +We're excited to announce Appwrite’s newest CLI feature, **Type generation**. Designed specifically to enhance your developer experience. Type generation automates the creation of type definitions directly from your database tables, seamlessly integrating with your preferred programming language. # Say goodbye to manual mapping @@ -21,12 +21,12 @@ Whether you work with PHP, Swift, Dart, TypeScript, JavaScript, Java, or Kotlin, # Simplified workflow, immediate benefits -Using Type generation is straightforward. +Using Type generation is straightforward. -First, ensure you have the [Appwrite CLI](/docs/tooling/command-line/installation#getting-started) installed and your project is [initialised](/docs/tooling/command-line/installation#initialization). Then, run the following command in your terminal to pull collections from your Appwrite project: +First, ensure you have the [Appwrite CLI](/docs/tooling/command-line/installation#getting-started) installed and your project is [initialised](/docs/tooling/command-line/installation#initialization). Then, run the following command in your terminal to pull tables from your Appwrite project: ```bash -appwrite pull collections +appwrite pull tables ``` To generate types, you can use the Appwrite CLI command: diff --git a/src/routes/blog/post/appwrite-1.5-now-available-on-cloud/+page.markdoc b/src/routes/blog/post/appwrite-1.5-now-available-on-cloud/+page.markdoc index 0eb08f9130..d0070734c9 100644 --- a/src/routes/blog/post/appwrite-1.5-now-available-on-cloud/+page.markdoc +++ b/src/routes/blog/post/appwrite-1.5-now-available-on-cloud/+page.markdoc @@ -206,22 +206,22 @@ New Database operators `contains` and `or`, providing greater control and flexib The contains operator is a great addition to the existing text search operators such as startsWith & endsWith, and can be used in combination with these two. ```js -db.listDocuments({ - databaseId: '', - collectionId: '', - queries: [ - Query.contains('content', ['happy', 'love']) - ] -}); +db.listRows( + '', + '', + [ + Query.contains('content', ['happy', 'love']), + ] + ) ``` To use the OR operator pass Query.or([...]) to the queries array and provide at least two queries within the nested array. ```js -db.listDocuments({ - databaseId: '', - collectionId: '', - queries: [ +db.listRows( + '', + '', + [ Query.or([ Query.contains('name','ivy'), Query.greaterThan('age',30) diff --git a/src/routes/blog/post/appwrite-realtime-for-flutter/+page.markdoc b/src/routes/blog/post/appwrite-realtime-for-flutter/+page.markdoc index d13c304418..1aa06399fe 100644 --- a/src/routes/blog/post/appwrite-realtime-for-flutter/+page.markdoc +++ b/src/routes/blog/post/appwrite-realtime-for-flutter/+page.markdoc @@ -1,7 +1,7 @@ --- layout: post title: "Get started with Appwrite Realtime for Flutter" -description: Learn how to build a Flutter app using Appwrite's powerful Realtime API. +description: Learn how to build a Flutter app using Appwrite's powerful Realtime API. cover: /images/blog/appwrite-realtime-with-flutter/cover.png timeToRead: 6 date: 2024-09-02 @@ -26,24 +26,24 @@ On the databases page, click on the **Create database** button. In the dialog that pops up, enter a name and database ID, and click **Create to create the database and show the database page**. Make sure to note down the database ID next to the database name as we will need that later in our code. -Once on the database page, click on the **Create collection** button. +Once on the database page, click on the **Create table** button. -![Create collection](/images/blog/appwrite-realtime-with-flutter/2.png) +![Create table](/images/blog/appwrite-realtime-with-flutter/2.png) -In the dialog that pops up, set the collection name to **Items** and click on the **Create** button to create the collection, and you will be redirected to the new collection's page. +In the dialog that pops up, set the table name to **Items** and click on the **Create** button to create the table, and you will be redirected to the new table's page. -Switch to the Attributes tab and create the following attribute. Also note down the **Collection ID** from the top of the page next to the collection name. +Switch to the Columns tab and create the following column. Also note down the **Table ID** from the top of the page next to the table name. - Type: String -- Attribute Key: name +- Column Key: name - Size: 25 - Default: null - Required: true - Array: false ![Create String](/images/blog/appwrite-realtime-with-flutter/3.png) - -Switch to the Settings tab and scroll down to **Permissions** to configure the permissions for the collection. Add the **Any** role and check create, read, update, and delete so that anyone can read and write. + +Switch to the Settings tab and scroll down to **Permissions** to configure the permissions for the table. Add the **Any** role and check create, read, update, and delete so that anyone can read and write. ![Set permissions](/images/blog/appwrite-realtime-with-flutter/4.png) @@ -83,7 +83,7 @@ By registering a new platform, you are allowing your app to communicate with the # Home page -We will start by creating a simple stateful widget that will list all the items from our items collection and allow adding new items and deleting existing items. Our home page will also connect to Appwrite's Realtime service and display changes in the collection of items by updating the UI as they happen. So, let's create our **HomePage** widget. Modify the code in **lib/main.dart** as follows: +We will start by creating a simple stateful widget that will list all the items from our items table and allow adding new items and deleting existing items. Our home page will also connect to Appwrite's Realtime service and display changes in the table of items by updating the UI as they happen. So, let's create our **HomePage** widget. Modify the code in **lib/main.dart** as follows: ```dart import 'package:flutter/material.dart'; @@ -171,7 +171,7 @@ class _HomePageState extends State { ``` -In the **initState** function of the HomePage, we will create and initialize our Appwrite client, as well as subscribe to real-time changes in documents in our **items** collection. +In the **initState** function of the HomePage, we will create and initialize our Appwrite client, as well as subscribe to real-time changes in rows in our **items** table. ```dart RealtimeSubscription? subscription; @@ -193,33 +193,33 @@ dispose(){ } ``` -Now, let us set up different variables and functions to load the initial data, listen to changes in the collection documents, and update the UI to reflect the changes in real time. +Now, let us set up different variables and functions to load the initial data, listen to changes in the table rows, and update the UI to reflect the changes in real time. -First, initialize our database ID and items collection ID and set up a function to load initial data when the application first starts. For that, we will also set up an Appwrite database service. +First, initialize our database ID and items table ID and set up a function to load initial data when the application first starts. For that, we will also set up an Appwrite database service. ```dart final database = 'default'; // your database id - final itemsCollection = 'items'; // your collection id - late final Databases databases; + final itemsTable = 'items'; // your table id + late final TablesDB tablesDB; @override initState() { super.initState(); client = Client().setProject('delete'); // your project id - databases = Databases(client); + tablesDB = TablesDB(client); loadItems(); subscribe(); } loadItems() async { try { - final res = await databases.listDocuments( + final res = await tablesDB.listRows( databaseId: database, - collectionId: itemsCollection, + tableId: itemsTable, ); setState(() { items = - List>.from(res.documents.map((e) => e.data)); + List>.from(res.rows.map((e) => e.data)); }); } on AppwriteException catch (e) { print(e.message); @@ -227,14 +227,14 @@ First, initialize our database ID and items collection ID and set up a function } ``` -Now, we will set up our subscribe function, which will listen to changes to documents in our items collection. +Now, we will set up our subscribe function, which will listen to changes to rows in our items table. ```dart void subscribe() { final realtime = Realtime(client); subscription = realtime.subscribe([ - 'documents' // subscribe to all documents in every database and collection + 'rows' // subscribe to all rows in every database and table ]); // listen to changes @@ -261,10 +261,10 @@ Finally, let's modify our `_addItem` function to add items to Appwrite's databas ```dart void _addItem(String name) async { try { - await databases.createDocument( + await tablesDB.createRow( databaseId: database, - collectionId: itemsCollection, - documentId: ID.unique(), + tableId: itemsTable, + rowId: ID.unique(), data: {'name': name}, ); } on AppwriteException catch (e) { @@ -281,10 +281,10 @@ Let us also modify our `ListTile` widget to add a delete button that will allo trailing: IconButton( icon: const Icon(Icons.delete), onPressed: () async { - await databases.deleteDocument( + await tablesDB.deleteRow( databaseId: database, - collectionId: itemsCollection, - documentId: item['\$id'], + tableId: itemsTable, + rowId: item['\$id'], ); }, ), @@ -329,27 +329,27 @@ class _HomePageState extends State { RealtimeSubscription? subscription; late final Client client; final database = 'default'; // your database id - final itemsCollection = 'items'; // your collection id - late final Databases databases; + final itemsTable = 'items'; // your table id + late final TablesDB tablesDB; @override initState() { super.initState(); client = Client().setProject('delete'); // your project id - databases = Databases(client); + tablesDB = TablesDB(client); loadItems(); subscribe(); } loadItems() async { try { - final res = await databases.listDocuments( + final res = await tablesDB.listRows( databaseId: database, - collectionId: itemsCollection, + tableId: itemsTable, ); setState(() { items = - List>.from(res.documents.map((e) => e.data)); + List>.from(res.rows.map((e) => e.data)); }); } on AppwriteException catch (e) { print(e.message); @@ -360,7 +360,7 @@ class _HomePageState extends State { final realtime = Realtime(client); subscription = realtime.subscribe([ - 'documents' // subscribe to all documents in every database and collection + 'rows' // subscribe to all rows in every database and table ]); // listen to changes @@ -399,10 +399,10 @@ class _HomePageState extends State { trailing: IconButton( icon: const Icon(Icons.delete), onPressed: () async { - await databases.deleteDocument( + await tablesDB.deleteRow( databaseId: database, - collectionId: itemsCollection, - documentId: item['\$id'], + tableId: itemsTable, + rowId: item['\$id'], ); }, ), @@ -446,10 +446,10 @@ class _HomePageState extends State { void _addItem(String name) async { try { - await databases.createDocument( + await tablesDB.createRow( databaseId: database, - collectionId: itemsCollection, - documentId: ID.unique(), + tableId: itemsTable, + rowId: ID.unique(), data: {'name': name}, ); } on AppwriteException catch (e) { @@ -462,7 +462,7 @@ class _HomePageState extends State { # Conclusion -I hope you enjoyed learning and building Flutter applications with Appwrite Realtime service. +I hope you enjoyed learning and building Flutter applications with Appwrite Realtime service. If you have any questions, feel free to ask on our [Discord server](https://appwrite.io/discord). You can also share your apps built Flutter and Appwrite Realtime on [Built with Appwrite](https://builtwith.appwrite.io/), and we'll feature it on our socials! diff --git a/src/routes/blog/post/best-pagination-technique/+page.markdoc b/src/routes/blog/post/best-pagination-technique/+page.markdoc index 58fe8b283d..d3fdaba085 100644 --- a/src/routes/blog/post/best-pagination-technique/+page.markdoc +++ b/src/routes/blog/post/best-pagination-technique/+page.markdoc @@ -45,9 +45,9 @@ To get the second page, we keep the limit at 10 (this doesn't change since we wa In the SQL world, such a query would be written as `SELECT * FROM posts OFFSET 10 LIMIT 10`. -Some websites implementing offset pagination also show the page number of the last page. How do they do it? Alongside results for each page, they also tend to return a `sum` attribute telling you how many rows there are in total. Using `limit`, `sum`, and a bit of math, you can calculate last page number using `lastPage = ceil(sum / limit)` +Some websites implementing offset pagination also show the page number of the last page. How do they do it? Alongside results for each page, they also tend to return a `sum`column telling you how many rows there are in total. Using `limit`, `sum`, and a bit of math, you can calculate last page number using `lastPage = ceil(sum / limit)` -As convenient as this feature is for the user, developers struggle to scale this type of pagination. Looking at `sum` attribute, we can already see that it can take quite some time to count all rows in a database to the exact number. **Alongside that, the `offset` in databases is implemented in a way that loops through rows to know how many should be skipped.** That means that the higher our offset is, the longer our database query will take. +As convenient as this feature is for the user, developers struggle to scale this type of pagination. Looking at `sum`column, we can already see that it can take quite some time to count all rows in a database to the exact number. **Alongside that, the `offset` in databases is implemented in a way that loops through rows to know how many should be skipped.** That means that the higher our offset is, the longer our database query will take. Another downside of offset pagination is that it doesn't play well with real-time data or data that changes often. Offset says how many rows we want to skip but doesn't account for row deletion or new rows being created. Such an offset can result in showing duplicate data or missing data. @@ -81,39 +81,39 @@ This is a really rare condition and only occurs if the row's ID that you are abo [Appwrite](https://appwrite.io/) is an open-source backend-as-a-service that abstracts all the complexity involved in building a modern application by providing you with a set of REST APIs for your core backend needs. Appwrite handles user authentication and authorization, databases, file storage, cloud functions, webhooks, messaging, and more. You can extend Appwrite using your favorite backend language if anything is missing. -Appwrite Database lets you store any text-based data that needs to be shared across your users. Appwrite's database allows you to create multiple collections (tables) and store multiple documents (rows) in it. Each collection has attributes (columns) configured to give your dataset a proper schema. You can also configure indexes to make your search queries more performant. When reading your data, you can use powerful queries, filter them, sort them, limit the number of results, and paginate over them. +Appwrite Database lets you store any text-based data that needs to be shared across your users. Appwrite's database allows you to create multiple tables and store multiple rows in it. Each table has columns configured to give your dataset a proper schema. You can also configure indexes to make your search queries more performant. When reading your data, you can use powerful queries, filter them, sort them, limit the number of results, and paginate over them. -Appwrite's pagination supports both offset and cursor pagination. Let's imagine you have a collection with ID `articles`. You can get documents from this collection with either offset or cursor pagination: +Appwrite's pagination supports both offset and cursor pagination. Let's imagine you have a table with ID `articles`. You can get rows from this table with either offset or cursor pagination: ```jsx // Setup -import { Appwrite, Databases, Query } from "appwrite"; +import { Appwrite, TablesDB, Query } from "appwrite"; const client = new Appwrite(); client .setEndpoint('https://.cloud.appwrite.io/v1') // Your API Endpoint .setProject('articles-demo'); // Your project ID -const databases = new Databases(client); +const tablesDB = new TablesDB(client); // Offset pagination -databases.listDocuments( +tablesDB.listRows( 'main', // Database ID - 'articles', // Collection ID + 'articles', // Table ID [ - Query.limit(10), // Limit, total documents in the response - Query.offset(500), // Offset, amount of documents to skip + Query.limit(10), // Limit, total rows in the response + Query.offset(500), // Offset, amount of rows to skip ] ).then((response) => { console.log(response); }); // Cursor pagination -databases.listDocuments( +tablesDB.listRows( 'main', // Database ID - 'articles', // Collection ID + 'articles', // Table ID [ - Query.limit(10), // Limit, total documents in the response - Query.cursorAfter('61d6eb2281fce3650c2c', // ID of document I want to paginate after + Query.limit(10), // Limit, total rows in the response + Query.cursorAfter('61d6eb2281fce3650c2c', // ID of row I want to paginate after ] ).then((response) => { console.log(response); @@ -121,7 +121,7 @@ databases.listDocuments( ``` -First, we import the Appwrite SDK and set up an instance that connects to our Appwrite Cloud project. Then, we list 10 documents using offset pagination. Right after, we write the exact same list documents query, but this time using `cursor` instead of offset pagination. +First, we import the Appwrite SDK and set up an instance that connects to our Appwrite Cloud project. Then, we list 10 rows using offset pagination. Right after, we write the exact same list rows query, but this time using `cursor` instead of offset pagination. ## Benchmarks @@ -129,7 +129,7 @@ We’ve frequently mentioned the term "performance" in this article without prov > You can find complete source code in the [GitHub repository.](https://github.com/appwrite) -First, you set up Appwrite, register a user, create a project and create a collection called `posts` with read permission set to `any`. To learn more about this process, visit the [Appwrite docs](https://appwrite.io/docs). You should now have Appwrite ready to go. +First, you set up Appwrite, register a user, create a project and create a table called `posts` with read permission set to `any`. To learn more about this process, visit the [Appwrite docs](https://appwrite.io/docs). You should now have Appwrite ready to go. Use the following script to load data into our MariaDB database and prepare for the benchmark. We could use Appwrite SDK, but talking directly to MariaDB offers more optional write queries for large datasets. @@ -161,7 +161,7 @@ for(let i = 0; i < 100; i++) { index++; } - const query = `INSERT INTO _project_${config.projectId}_collection_posts (_uid, _read, _write) VALUES ${queryValues.join(", ")}`; + const query = `INSERT INTO _project_${config.projectId}_table_posts (_uid, _read, _write) VALUES ${queryValues.join(", ")}`; promises.push(connection.execute(query)); } @@ -175,9 +175,9 @@ console.error(`🌟 Successfully finished`); > We used two layers for loops to increase the speed of the script. First for loop creates query executions that need to be awaited, and the second loop creates a long query holding multiple insert requests. Ideally, we would want everything in one request, but that is impossible due to MySQL configuration, so we split it into 100 requests. -Now you have 1 million documents inserted in less than a minute, and are ready to start the benchmarks. We will be using the [k6](https://k6.io/) load-testing library for this demo. +Now you have 1 million rows inserted in less than a minute, and are ready to start the benchmarks. We will be using the [k6](https://k6.io/) load-testing library for this demo. -Let's benchmark the well-known and widely used offset pagination first. During each test scenario, we try to fetch a page with 10 documents, from different parts of our dataset. We will start with offset 0 and go all the way to an offset of 900k in increments of 100k. The benchmark is written in a way that makes only one request at a time to keep it as accurate as possible. We will also run the same benchmark ten times and measure average response times to ensure statistical significance. We'll be using k6's HTTP client to make requests to Appwrite's REST API. +Let's benchmark the well-known and widely used offset pagination first. During each test scenario, we try to fetch a page with 10 rows, from different parts of our dataset. We will start with offset 0 and go all the way to an offset of 900k in increments of 100k. The benchmark is written in a way that makes only one request at a time to keep it as accurate as possible. We will also run the same benchmark ten times and measure average response times to ensure statistical significance. We'll be using k6's HTTP client to make requests to Appwrite's REST API. ```jsx // script_offset.sh @@ -198,7 +198,7 @@ export default function () { const offset = Query.offset(__ENV.OFFSET); const limit = 10; - http.get(`${config.endpoint}/databases/main/collections/posts/documents?queries[]=${offset}&queries[]=${limit}`, { + http.get(`${config.endpoint}/databases/main/tables/posts/rows?queries[]=${offset}&queries[]=${limit}`, { headers: { 'content-type': 'application/json', 'X-Appwrite-Project': config.projectId @@ -243,9 +243,9 @@ Within a minute, all benchmarks finished, providing me with the average response | 90% offset | 482.71 | ![Cursor pagination benchmark](/images/blog/best-pagination-technique/graph.png) -As you can see, offset 0 was pretty fast, responding in less than 4ms. The first jump was to offset 100k, and the change was drastic, increasing response times to 52ms. With each increase in the offset, the duration went up, resulting in almost 500ms to get ten documents after an offset of 900k documents. That is crazy! +As you can see, offset 0 was pretty fast, responding in less than 4ms. The first jump was to offset 100k, and the change was drastic, increasing response times to 52ms. With each increase in the offset, the duration went up, resulting in almost 500ms to get ten rows after an offset of 900k rows. That is crazy! -Now let's update our script to use cursor pagination. We will update our script to use a cursor instead of offset and update our bash script to provide a cursor (document ID) instead of an offset number. +Now let's update our script to use cursor pagination. We will update our script to use a cursor instead of offset and update our bash script to provide a cursor (row ID) instead of an offset number. ```jsx // script_cursor.js @@ -266,7 +266,7 @@ export default function () { const cursor = Query.cursorAfter(__ENV.CURSOR); const limit = 10; - http.get(`${config.endpoint}/databases/main/collections/posts/documents?queries[]=${offset}&queries[]=${limit}`, { + http.get(`${config.endpoint}/databases/main/tables/posts/rows?queries[]=${offset}&queries[]=${limit}`, { headers: { 'content-type': 'application/json', 'X-Appwrite-Project': config.projectId diff --git a/src/routes/blog/post/build-fullstack-svelte-appwrite/+page.markdoc b/src/routes/blog/post/build-fullstack-svelte-appwrite/+page.markdoc index 417c7f39fb..875906978e 100644 --- a/src/routes/blog/post/build-fullstack-svelte-appwrite/+page.markdoc +++ b/src/routes/blog/post/build-fullstack-svelte-appwrite/+page.markdoc @@ -61,7 +61,7 @@ Our application needs to communicate with [Appwrite](https://cloud.appwrite.io/? PUBLIC_APPWRITE_ENDPOINT=https://cloud.appwrite.io/v1 PUBLIC_APPWRITE_PROJECT_ID=your-project-id PUBLIC_APPWRITE_DATABASE_ID=expense-db -PUBLIC_APPWRITE_COLLECTION_ID=expenses +PUBLIC_APPWRITE_TABLE_ID=expenses ``` The `PUBLIC_` prefix makes these variables available to our client-side code in Svelte. You'll need to replace `your-project-id` with your actual Appwrite project ID, which we'll create in the next step. @@ -72,9 +72,9 @@ Before we continue with the frontend implementation, we need to configure our Ap 1. Create a new project 2. Open the **Databases** tab from the sidebar and create a database with the ID "expense-db" -3. In your new database, create a collection with the ID "expenses" +3. In your new database, create a table with the ID "expenses" -The expenses collection needs several attributes to store the expense data effectively. Open the **Attributes** tab of your new collection and add the following attributes: +The expenses table needs several columns to store the expense data effectively. Open the **Columns** tab of your new table and add the following columns: ```md - `userId` (String, required) @@ -87,13 +87,13 @@ The expenses collection needs several attributes to store the expense data effec - `updatedAt` (DateTime, required) ``` -Notice that the `category` attribute is an enumerated type with a set of predefined values. This structured approach helps us organize and filter expenses effectively. We have both a `date` attribute and a `createdAt` attribute because when an expense is created is not necessarily the same as when it occurred. +Notice that the `category` column is an enumerated type with a set of predefined values. This structured approach helps us organize and filter expenses effectively. We have both a `date` column and a `createdAt` column because when an expense is created is not necessarily the same as when it occurred. -To ensure that users can only access their own expenses, Open the collection's **Settings** tab and scroll to **Permissions**. Click **Add role**, select **Users** and check **Create** permission. +To ensure that users can only access their own expenses, Open the table's **Settings** tab and scroll to **Permissions**. Click **Add role**, select **Users** and check **Create** permission. -Next, enable **Document Security** to allow users to access their documents. We'll ensure this by giving users the **Read** permission when creating documents in our code. +Next, enable **Row Security** to allow users to access their rows. We'll ensure this by giving users the **Read** permission when creating rows in our code. -![permissions-document-security](/images/blog/build-fullstack-svelte-appwrite/permissions-document-security.png) +![permissions-row-security](/images/blog/build-fullstack-svelte-appwrite/permissions-row-security.png) ## Project structure @@ -153,19 +153,19 @@ For the base `src/app.html` file, we'll use the default Sveltekit template, but ``` -This template provides the basic structure for our application. The `data-sveltekit-preload-data="hover"` attribute enables SvelteKit's built-in preloading feature to make navigation faster. +This template provides the basic structure for our application. The `data-sveltekit-preload-data="hover"` column enables SvelteKit's built-in preloading feature to make navigation faster. ## Setting up the Appwrite client Let's set up our connection to Appwrite. If you haven't already, create a new file in the `src/lib` directory named `appwrite.js`. We'll use this file to configure the Appwrite client and provide access to our database and account services. ```js -import { Client, Account, Databases } from 'appwrite' +import { Client, Account, TablesDB } from 'appwrite' import { PUBLIC_APPWRITE_ENDPOINT, PUBLIC_APPWRITE_PROJECT_ID, PUBLIC_APPWRITE_DATABASE_ID, - PUBLIC_APPWRITE_COLLECTION_ID + PUBLIC_APPWRITE_TABLE_ID } from '$env/static/public' const client = new Client() @@ -173,17 +173,17 @@ const client = new Client() client.setEndpoint(PUBLIC_APPWRITE_ENDPOINT).setProject(PUBLIC_APPWRITE_PROJECT_ID) export const account = new Account(client) -export const databases = new Databases(client) +export const tablesDB = new TablesDB(client) -// Collection IDs from environment variables -export const EXPENSES_COLLECTION_ID = PUBLIC_APPWRITE_COLLECTION_ID +// Table IDs from environment variables +export const EXPENSES_TABLE_ID = PUBLIC_APPWRITE_TABLE_ID export const DATABASE_ID = PUBLIC_APPWRITE_DATABASE_ID ``` -This configuration file initializes our connection to Appwrite. The `Client` class creates a new Appwrite client instance, which we configure with our endpoint and project ID from our environment variables. We then create instances of the `Databases` and `Account` services, which we'll use throughout our application for database operations and user authentication. +This configuration file initializes our connection to Appwrite. The `Client` class creates a new Appwrite client instance, which we configure with our endpoint and project ID from our environment variables. We then create instances of the `TablesDB` and `Account` services, which we'll use throughout our application for database operations and user authentication. -Finally, we export the collection IDs from our environment variables so that we can use them in other parts of our application. +Finally, we export the table IDs from our environment variables so that we can use them in other parts of our application. ## Managing authentication state @@ -533,7 +533,7 @@ We'll start with our imports and state management: @@ -76,17 +76,17 @@ const client = new Appwrite.Client() .setEndpoint('https://.cloud.appwrite.io/v1') // Your API Endpoint .setProject(''); // Your project ID -const database = new Appwrite.Databases(client); +const tablesDB = new Appwrite.TablesDB(client); document.querySelector('button').addEventListener('click', async () => { const promises = []; for (let i = 0; i < 200; i++) { - const promise = database.createDocument({ - databaseId: 'testDb', // Your database ID - collectionId: 'testCollection', // Your collection ID - documentId: Appwrite.ID.unique(), - data: { number: i + i } - }); + const promise = tablesDB.createRow( + 'testDb', // Your database ID + 'testTable', // Your table ID + Appwrite.ID.unique(), + { number: i + i } + ); promises.push(promise); console.log('Request initiated:', i+1); } @@ -95,7 +95,7 @@ document.querySelector('button').addEventListener('click', async () => { }); ``` -If you open the HTML page in your browser and click on the `Add documents` button, you will notice numerous errors in the console with the HTTP code `429`, as Appwrite's rate limits allow one client to create 120 requests per minute for this API endpoint. +If you open the HTML page in your browser and click on the `Add rows` button, you will notice numerous errors in the console with the HTTP code `429`, as Appwrite's rate limits allow one client to create 120 requests per minute for this API endpoint. ## Step 3: Create dev key @@ -114,7 +114,7 @@ const client = new Appwrite.Client() ## Step 4: Test the app -Reopen the HTML page in your browser. Clicking the `Add documents` button will allow all 200 requests to execute successfully. +Reopen the HTML page in your browser. Clicking the `Add rows` button will allow all 200 requests to execute successfully. # Next steps diff --git a/src/routes/blog/post/integrate-sql-nosql-vector-graph-or-any-database-into-your-appwrite-project/+page.markdoc b/src/routes/blog/post/integrate-sql-nosql-vector-graph-or-any-database-into-your-appwrite-project/+page.markdoc index 20b81e8686..d73ec0f906 100644 --- a/src/routes/blog/post/integrate-sql-nosql-vector-graph-or-any-database-into-your-appwrite-project/+page.markdoc +++ b/src/routes/blog/post/integrate-sql-nosql-vector-graph-or-any-database-into-your-appwrite-project/+page.markdoc @@ -119,7 +119,7 @@ export default async ({ req, res, log, error }) => { const location = `Street ${Math.round(Math.random() * 1000)}, Earth`; // Random address const capacity = 10 + Math.round(Math.random() * 10) * 10; // Random number: 10,20,30,...,90,100 - await client.db("main").collection("warehouses").insertOne({ + await client.db("main").table("warehouses").insertOne({ location, capacity }); @@ -127,7 +127,7 @@ export default async ({ req, res, log, error }) => { // Query data const page = 1; const limit = 100; - const cursor = client.db("main").collection("warehouses") + const cursor = client.db("main").table("warehouses") .find().limit(limit).skip((page - 1) * limit); const docs = []; diff --git a/src/routes/blog/post/introducing-functions-ecosystem/+page.markdoc b/src/routes/blog/post/introducing-functions-ecosystem/+page.markdoc index 8833a8855b..e513019f82 100644 --- a/src/routes/blog/post/introducing-functions-ecosystem/+page.markdoc +++ b/src/routes/blog/post/introducing-functions-ecosystem/+page.markdoc @@ -32,7 +32,7 @@ To combat this, we've introduced automatically generated, short-lived API keys f Here's how you can use them: ```jsx -import { Client, Databases } from 'node-appwrite'; +import { Client, TablesDB } from 'node-appwrite'; export default async ({ req, res }) => { const client = new Client() @@ -40,7 +40,7 @@ export default async ({ req, res }) => { .setProject(process.env.APPWRITE_FUNCTION_PROJECT_ID) .setKey(req.headers['x-appwrite-key']); - const databases = new Databases(client); + const tablesDB = new TablesDB(client); // Your function logic here @@ -64,15 +64,15 @@ const functions = new Functions(client); const invoiceDate = new Date(); invoiceDate.setDate(invoiceDate.getDate() + 30); -await functions.createExecution({ - functionId: 'invoicesApi', // Function ID - body: '{"userId":"ngu9ife0efwed"}', // Body - async: true, // Async execution - xpath: '/v1/invoices/exports', // Path - method: ExecutionMethod.POST, // Method - headers: {}, // Headers - scheduledAt: invoiceDate.toISOString() // New scheduledAt attribute -}); +await functions.createExecution( + 'invoicesApi', // Function ID + '{"userId":"ngu9ife0efwed"}', // Body + true, // Async execution + '/v1/invoices/exports', // Path + 'POST', // Method + {}, // Headers + invoiceDate.toISOString() // New scheduledAt column +); ``` This feature is perfect for scheduling marketing emails, cleanup tasks, or precisely timed events. @@ -116,7 +116,7 @@ This allows for new use cases like sending files to AI services or generating fi # Execution and deployment filtering -You can now filter Function executions and deployments based on various attributes, making it easier to monitor and debug your Functions: +You can now filter Function executions and deployments based on various columns, making it easier to monitor and debug your Functions: ```jsx // Example of filtering executions (specific implementation may vary) diff --git a/src/routes/blog/post/introducing-new-appwrite-cli/+page.markdoc b/src/routes/blog/post/introducing-new-appwrite-cli/+page.markdoc index 40d4569f8a..663faeee96 100644 --- a/src/routes/blog/post/introducing-new-appwrite-cli/+page.markdoc +++ b/src/routes/blog/post/introducing-new-appwrite-cli/+page.markdoc @@ -9,15 +9,15 @@ author: binyamin-yawitz category: product, announcement featured: false --- -We're excited to announce the new Appwrite CLI. This iteration focuses on local development and an enhanced CI/CD experience. Now, you can test changes to your functions locally, and easily apply changes to your Appwrite collection. +We're excited to announce the new Appwrite CLI. This iteration focuses on local development and an enhanced CI/CD experience. Now, you can test changes to your functions locally, and easily apply changes to your Appwrite table. Let’s dive into the updates to the new Appwrite CLI and how it will improve your building experience. # Understanding past limitations -Appwrite developers use the current generation of Appwrite CLI to initialize functions and collections and deploy those resources. +Appwrite developers use the current generation of Appwrite CLI to initialize functions and tables and deploy those resources. -When deploying collections, the only option is to override and delete all the collection's existing data, which is not the use case for most scenarios. +When deploying tables, the only option is to override and delete all the table's existing data, which is not the use case for most scenarios. The only way to test an Appwrite function is to continue deploying the function changes to the Appwrite instance, which can be aggravating for small changes. @@ -89,7 +89,7 @@ Appwrite CLI is now in listening mode. Try changing your code and seeing how the GitOps is a common way of tracking and migrating database changes. The latest Appwrite CLI generation includes a few features to help you migrate your database changes easily. -When running `appwrite push collection`, the CLI will compare your local `appwrite.config.json` collection definition against the currently deployed remote collection and will present you with a detailed table awaiting your decision, for example: +When running `appwrite push table`, the CLI will compare your local `appwrite.config.json` table definition against the currently deployed remote table and will present you with a detailed table awaiting your decision, for example: ``` Key │ Action │ Reason @@ -100,24 +100,24 @@ time │ adding │ Field isn't available on the remote server ──────────┼────────────┼───────────────────────────────────────────── timezone │ recreating │ size changed from 256 to 255 -ℹ Info: Attribute deletion will cause loss of data -ℹ Info: Attribute recreation will cause loss of data +ℹ Info: Column deletion will cause loss of data +ℹ Info: Column recreation will cause loss of data ? Would you like to apply these changes? Type "YES" to confirm. ``` -In this example, we can see that because we've renamed the attribute `times` to `time,` it will get deleted and read. We must also recreate the `timezone` attribute because we've changed its size from 256 to 255. +In this example, we can see that because we've renamed the column `times` to `time,` it will get deleted and read. We must also recreate the `timezone` column because we've changed its size from 256 to 255. To help with the decision, you can notice two warnings: deleting or recreating a field will cause data loss. -It's important to know that the data loss will affect only the recreated/deleted attribute and not the whole collection. +It's important to know that the data loss will affect only the recreated/deleted column and not the whole table. -As you can read in the next section, when pushing collections in CI/CD pipelines, you'll need to add the `--force` flag. +As you can read in the next section, when pushing tables in CI/CD pipelines, you'll need to add the `--force` flag. # CI/CD Adapting CI/CD pipelines ensures robust deployments. To support this, we have rewritten many parts of our CLI to fully accommodate non-interactive actions for all deployment-related commands. -You can add the `--force` flag to any command that may ask you questions, such as `appwrite push collections,` to pre-answer all of them with `YES.` Additionally, you can use the `--all` flag to push/pull all services' available resources. +You can add the `--force` flag to any command that may ask you questions, such as `appwrite push tables,` to pre-answer all of them with `YES.` Additionally, you can use the `--all` flag to push/pull all services' available resources. Till this generation, Appwrite CLI supported non-interactive login for API-key-based authorization only, as follows: diff --git a/src/routes/blog/post/introducing-new-database-operators/+page.markdoc b/src/routes/blog/post/introducing-new-database-operators/+page.markdoc index 2445f9ad8e..1e6c3315d4 100644 --- a/src/routes/blog/post/introducing-new-database-operators/+page.markdoc +++ b/src/routes/blog/post/introducing-new-database-operators/+page.markdoc @@ -14,7 +14,7 @@ We've added two new query methods, `or` and `contains`, to Appwrite Databases. B These two query methods have been highly requested by the Appwrite community, and we’re excited to show you how to use them, so let’s jump in and take a look! -- `contains` - partial text matches on string attributes, array element matching on array attributes +- `contains` - partial text matches on string columns, array element matching on array columns - `or` - write logical OR queries # Contains operator @@ -22,16 +22,17 @@ These two query methods have been highly requested by the Appwrite community, an The contains operator is a great addition to the existing text search operators such as `startsWith` & `endsWith`, and can be used in combination with these two. With contains, we can now perform a broader search by matching against any text within a substring. This is extremely useful when searching a large body of text or when the placement of keywords is unknown. ```js -db.listDocuments({ - databaseId: '', - collectionId: '', - queries: [ - Query.contains('content', ['happy', 'love']) - ] -}); + +db.listRows( + '', + '', + [ + Query.contains('content', ['happy', 'love']), + ] + ) ``` -It’s important to note that the contains operator also works on array attributes as well. For example, if we set a string attribute to act as an array, you could search this array in the same way you would search any other string. +It’s important to note that the contains operator also works on array columns as well. For example, if we set a string column to act as an array, you could search this array in the same way you would search any other string. ```js Query.contains('tags', ['mystery', 'comedy', 'PG-13']) @@ -45,10 +46,10 @@ The logical OR operator allows us to nest queries in an OR condition. This gives To use the OR operator pass `Query.or([...])` to the queries array and provide at least two queries within the nested array. ```js -db.listDocuments({ - databaseId: '', - collectionId: '', - queries: [ +db.listRows( + '', + '', + [ Query.or([ Query.contains('name','ivy'), Query.greaterThan('age',30) diff --git a/src/routes/blog/post/introducing-support-for-server-side-rendering/+page.markdoc b/src/routes/blog/post/introducing-support-for-server-side-rendering/+page.markdoc index 74dba2c010..0fd5e62790 100644 --- a/src/routes/blog/post/introducing-support-for-server-side-rendering/+page.markdoc +++ b/src/routes/blog/post/introducing-support-for-server-side-rendering/+page.markdoc @@ -34,7 +34,7 @@ npm install node-appwrite ## Create sessions server-side -To solve this issue, all existing server SDK methods that create a session now return a `secret` attribute. The following methods are: +To solve this issue, all existing server SDK methods that create a session now return a `secret` column. The following methods are: - `account.createEmailPasswordSession({email, password})` - `account.createAnonymousSession()` diff --git a/src/routes/blog/post/manage-user-permissions-with-labels-and-teams/+page.markdoc b/src/routes/blog/post/manage-user-permissions-with-labels-and-teams/+page.markdoc index d0185f750b..3b108aa871 100644 --- a/src/routes/blog/post/manage-user-permissions-with-labels-and-teams/+page.markdoc +++ b/src/routes/blog/post/manage-user-permissions-with-labels-and-teams/+page.markdoc @@ -11,11 +11,11 @@ featured: false --- -Teams and Labels allow us to categorize and group users together, allowing us to set permissions to resources at the Team and label level instead of at the individual user level. Grouping users together makes managing permissions to documents, files and functions much more efficient this way. +Teams and Labels allow us to categorize and group users together, allowing us to set permissions to resources at the Team and label level instead of at the individual user level. Grouping users together makes managing permissions to rows, files and functions much more efficient this way. ![Labels vs Teams](/images/blog/manage-user-permissions-with-labels-and-teams/labels3.png) -To think of this in real-world terms, imagine for a second we were building the next social media application. Teams can be used to create admins and moderators, and these admins and moderators would have permission to delete and flag posts that don’t meet community guidelines. The owner at the document level would have the ability to update and delete this post, but anyone who is on the moderator Team would also have the ability to delete the post, something that is normally restricted to only the owner of the post. We can also change which users have these permissions at any point by adding and removing users and updating the group-level permissions. +To think of this in real-world terms, imagine for a second we were building the next social media application. Teams can be used to create admins and moderators, and these admins and moderators would have permission to delete and flag posts that don’t meet community guidelines. The owner at the row level would have the ability to update and delete this post, but anyone who is on the moderator Team would also have the ability to delete the post, something that is normally restricted to only the owner of the post. We can also change which users have these permissions at any point by adding and removing users and updating the group-level permissions. Another example we can take a look at would be a streaming service like Amazon Video. How do we give users access to a movie or show they paid for? This is where Teams and Labels make our lives easier. By simply adding Labels to users we could decide which users have access to specific resources like movies or shows in our application. @@ -46,7 +46,7 @@ Labels are essentially custom tags that you can assign to a user. They act as cu Just like Teams, we can grant permissions to specific Labels. Labels are attached to individual user accounts and are used to categorize users on a one-to-one basis. Labels can be used in a similar manner to Teams but are a more lightweight and flexible way to manage users and permissions. -This would make Labels a great option for managing which users can view restricted content behind a paywall like a course on Udemy or an e-book on a digital library platform. In this example you would simply attach a label that is unique to a user after they pay to access a product, and from that point on, the label would grant the user permission. If this access was subscription-based and the user stopped paying, we could simply remove the label from the user, and their access would be revoked since they no longer carry the label that gives them access. +This would make Labels a great option for managing which users can view restricted content behind a paywall like a course on Udemy or an e-book on a digital library platform. In this example you would simply attach a label that is unique to a user after they pay to access a product, and from that point on, the label would grant the user permission. If this access was subscription-based and the user stopped paying, we could simply remove the label from the user, and their access would be revoked since they no longer carry the label that gives them access. ### Summary - Labels are great for tagging users and assigning permissions based on those tags, while Teams are for grouping users together and allowing role-based management within a Team. @@ -78,7 +78,7 @@ Let's start with creating a new Team, adding members, and assigning roles to tho 2 - Once your Team is created, you can add a Member to the Team by selecting the “Members” tab and clicking “Create Membership”. Here you will enter the user's email address (name can be left blank) and assign a role to the member. Roles are optional, so you can leave this part blank. -That’s it for creating a Team and adding members from the appwrite console. Now you can assign document, storage, and function permissions to your Teams. As an example, for collection level permissions, you can go to the “settings'' tab in a collection, and in the “permissions” section, choose “Select Teams'' to give permissions to an entire Team or “Custom permissions” if you want to assign permission to only users with a particular role within the Team. +That's it for creating a Team and adding members from the appwrite console. Now you can assign row, storage, and function permissions to your Teams. As an example, for table level permissions, you can go to the "settings'' tab in a table, and in the "permissions" section, choose "Select Teams'' to give permissions to an entire Team or "Custom permissions" if you want to assign permission to only users with a particular role within the Team. ## Labels diff --git a/src/routes/blog/post/messaging-explained/+page.markdoc b/src/routes/blog/post/messaging-explained/+page.markdoc index 7cda0dd445..9eb2daeb0c 100644 --- a/src/routes/blog/post/messaging-explained/+page.markdoc +++ b/src/routes/blog/post/messaging-explained/+page.markdoc @@ -13,7 +13,7 @@ Recently, Appwrite launched its newest product, [Appwrite Messaging](https://app # Unified messaging platform -To give you a quick refresher, Messaging covers three communication channels under one unified API, allowing you to send email, SMS, and push notifications from your Appwrite project. It connects with a variety of third-party providers, such as Vonage, Twilio, Mailgun, and more, to deliver your messages. Our [documentation](https://appwrite.io/docs/products/messaging) provides a full overview of providers. +To give you a quick refresher, Messaging covers three communication channels under one unified API, allowing you to send email, SMS, and push notifications from your Appwrite project. It connects with a variety of third-party providers, such as Vonage, Twilio, Mailgun, and more, to deliver your messages. Our [documentation](https://appwrite.io/docs/products/messaging) provides a full overview of providers. ![Messaging features](/images/blog/messaging-explained/features.gif) @@ -36,7 +36,7 @@ In Appwrite Messaging, you can use topics to deliver messages to groups of users ## Targets -Targets are different ways a user can be reached. For example, a user might have two emails, a phone number, and a phone and tablet installed with your app. This means the user has five different targets where you can deliver messages. +Targets are different ways a user can be reached. For example, a user might have two emails, a phone number, and a phone and tablet installed with your app. This means the user has five different targets where you can deliver messages. You can send a message directly to a set of targets or add them as subscribers to a topic. For example, if you want to send a payment reminder to a user manually, you could choose to send it just to a specific target. If you want to send out your monthly newsletter, you could have a topic many targets can subscribe to and then send the message to the topic instead. @@ -81,16 +81,16 @@ You can also watch the [product tour](https://www.youtube.com/watch?v=QdDgPeuBZ1 # How Messaging works -Now that we have learned about the three core concepts of Appwrite Messaging, let us dive deeper into how the product works. Each time you send or schedule a push notification, email, or SMS text, it's recorded in Appwrite as a **message** is displayed in the **Messages** tab. However, there is a lot more that goes on under the hood. +Now that we have learned about the three core concepts of Appwrite Messaging, let us dive deeper into how the product works. Each time you send or schedule a push notification, email, or SMS text, it's recorded in Appwrite as a **message** is displayed in the **Messages** tab. However, there is a lot more that goes on under the hood. Here's a step-by-step breakdown of how Appwrite Messaging works: 1. Validate input on API request - The process starts when the Appwrite API is called to create a message, which validates the input. -2. Create message document - - A document containing details about the message, including recipients and content, is created on the internal database. -3. Create schedule document - - A schedule document is created on the internal database, specifying when the message should be sent. This document is necessary for the message scheduler. +2. Create message row + - A row containing details about the message, including recipients and content, is created on the internal database. +3. Create schedule row + - A schedule row is created on the internal database, specifying when the message should be sent. This row is necessary for the message scheduler. 4. Queue the message via the message scheduler - Every minute, the scheduler checks the internal database for scheduled messages. - For each scheduled message, it checks if the message should be sent at that time. diff --git a/src/routes/blog/post/offline-first-journal/+page.markdoc b/src/routes/blog/post/offline-first-journal/+page.markdoc index 4b237aabd0..2abc80f0ef 100644 --- a/src/routes/blog/post/offline-first-journal/+page.markdoc +++ b/src/routes/blog/post/offline-first-journal/+page.markdoc @@ -42,9 +42,9 @@ Our tech stack for this app will be: ## Configure your Appwrite project -First, [create an Appwrite Cloud account](https://cloud.appwrite.io/) if you haven’t already. Once your project is ready, go to the **Settings** page and copy your project ID and API endpoint for further usage. Next, go to the **Databases** page from the left sidebar, create a new database with the ID `journals`, and then a collection with the ID `entries` (save both IDs for further usage). +First, [create an Appwrite Cloud account](https://cloud.appwrite.io/) if you haven’t already. Once your project is ready, go to the **Settings** page and copy your project ID and API endpoint for further usage. Next, go to the **Databases** page from the left sidebar, create a new database with the ID `journals`, and then a table with the ID `entries` (save both IDs for further usage). -Next, head to the **Attributes** tab and add the following: +Next, head to the **Columns** tab and add the following: | Key | Type | Size | Required | | --- | --- | --- | --- | @@ -54,9 +54,9 @@ Next, head to the **Attributes** tab and add the following: | `updatedAt` | Integer | | Yes | | `deleted` | Boolean | | Yes | -> **Note:** The `deleted` attribute is necessary to add because RxDB does not hard delete any data, only soft deletes to prevent data loss in offline scenarios. +> **Note:** The `deleted` column is necessary to add because RxDB does not hard delete any data, only soft deletes to prevent data loss in offline scenarios. -Then, head to the **Settings** tab of your collection, scroll down to the **Permissions** section, and add the following: +Then, head to the **Settings** tab of your table, scroll down to the **Permissions** section, and add the following: | Role | Create | Read | Update | Delete | | --- | --- | --- | --- | --- | @@ -64,7 +64,7 @@ Then, head to the **Settings** tab of your collection, scroll down to the **Perm ## Prepare the app logic -Once our Appwrite project is set up, let’s start building our app. +Once our Appwrite project is set up, let’s start building our app. ### Create a SvelteKit app @@ -103,7 +103,7 @@ In the root directory of your app, create a `.env` file and add the information PUBLIC_APPWRITE_ENDPOINT=your-appwrite-cloud-endpoint PUBLIC_APPWRITE_PROJECT_ID=your-project-id PUBLIC_APPWRITE_DATABASE_ID=your-database-id -PUBLIC_APPWRITE_COLLECTION_ID=your-collection-id +PUBLIC_APPWRITE_TABLE_ID=your-table-id ``` Next, in the `src/lib` subdirectory, create a file `appwrite.js` and add the following code: @@ -114,14 +114,14 @@ import { PUBLIC_APPWRITE_ENDPOINT, PUBLIC_APPWRITE_PROJECT_ID, PUBLIC_APPWRITE_DATABASE_ID, - PUBLIC_APPWRITE_COLLECTION_ID + PUBLIC_APPWRITE_TABLE_ID } from '$env/static/public'; export const appwriteConfig = { endpoint: PUBLIC_APPWRITE_ENDPOINT, projectId: PUBLIC_APPWRITE_PROJECT_ID, databaseId: PUBLIC_APPWRITE_DATABASE_ID, - collectionId: PUBLIC_APPWRITE_COLLECTION_ID + tableId: PUBLIC_APPWRITE_TABLE_ID }; export const client = new Client() @@ -156,7 +156,7 @@ addRxPlugin(RxDBQueryBuilderPlugin); addRxPlugin(RxDBUpdatePlugin); ``` -The RxDB imports include core RxDB functionalities to create databases and collections and to add plugins, the query builder plugin for complex read queries, the update plugin for updating data, the Dexie.js storage plugin to use IndexedDB as the local database, and the Appwrite replication plugin to manage data replication in the external Appwrite database. +The RxDB imports include core RxDB functionalities to create databases and tables and to add plugins, the query builder plugin for complex read queries, the update plugin for updating data, the Dexie.js storage plugin to use IndexedDB as the local database, and the Appwrite replication plugin to manage data replication in the external Appwrite database. ### Create a local database @@ -190,7 +190,7 @@ const journalSchema = { }; ``` -Then, we create the database and collection using the Dexie.js plugin by adding the following code just after the schema: +Then, we create the database and table using the Dexie.js plugin by adding the following code just after the schema: ```js let dbPromise = null; @@ -207,9 +207,9 @@ export const getDB = async () => { const db = await dbPromise; - // Add collections - await db.addCollections({ - entries: { // Name must match the collection ID from Appwrite + // Add tables + await db.addTables({ + entries: { // Name must match the table ID from Appwrite schema: journalSchema } }); @@ -237,9 +237,9 @@ const setupReplication = async (db) => { replicationIdentifier: 'journals-replication', client, databaseId: appwriteConfig.databaseId, - collectionId: appwriteConfig.collectionId, + tableId: appwriteConfig.tableId, deletedField: 'deleted', - collection: db.entries, + table: db.entries, pull: { batchSize: 25 // Can be updated }, @@ -347,7 +347,7 @@ This will pre-load all journal entries before the page renders. Then, open the ` ```html