diff --git a/.github/workflows/accessibility_scan.yml b/.github/workflows/accessibility_scan.yml index a1acd6c413b..f5c0859ba9a 100644 --- a/.github/workflows/accessibility_scan.yml +++ b/.github/workflows/accessibility_scan.yml @@ -20,6 +20,8 @@ jobs: uses: ./.github/actions/set_yarn_berry - name: Install dependencies run: yarn + - name: Install Chrome for Puppeteer + run: npx puppeteer browsers install chrome - name: Build run: yarn build env: diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fad3b482eb6..ba34ae52a3d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -20,6 +20,8 @@ jobs: uses: ./.github/actions/set_yarn_berry - name: Install Dependencies run: yarn + - name: Install Chrome for Puppeteer + run: npx puppeteer browsers install chrome - name: Run tests run: yarn prebuild && yarn test:unit - name: Run Build diff --git a/.github/workflows/check_for_console_errors.yml b/.github/workflows/check_for_console_errors.yml index cfe03307c56..577bafff1c9 100644 --- a/.github/workflows/check_for_console_errors.yml +++ b/.github/workflows/check_for_console_errors.yml @@ -21,6 +21,8 @@ jobs: uses: ./.github/actions/set_yarn_berry - name: Install Dependencies run: yarn + - name: Install Chrome for Puppeteer + run: npx puppeteer browsers install chrome - name: Run Build run: yarn build:release env: diff --git a/.github/workflows/check_pr_for_broken_links.yml b/.github/workflows/check_pr_for_broken_links.yml index 3913aa7a167..362f2fd87eb 100644 --- a/.github/workflows/check_pr_for_broken_links.yml +++ b/.github/workflows/check_pr_for_broken_links.yml @@ -19,6 +19,8 @@ jobs: uses: ./.github/actions/set_yarn_berry - name: Install Dependencies run: yarn + - name: Install Chrome for Puppeteer + run: npx puppeteer browsers install chrome - name: Run Build run: yarn build env: diff --git a/cspell.json b/cspell.json index eea51669780..84db10cb3d8 100644 --- a/cspell.json +++ b/cspell.json @@ -11,6 +11,8 @@ "_userauth.auth_fail", "_userauth.sign_in", "_userauth.sign_up", + "agentic", + "awslabs", "@aws-amplify/auth", "@aws-amplify/core", "@aws-amplify/storage", @@ -1122,7 +1124,9 @@ "reactnative", "realtime", "reCaptcha", + "recordcache", "RecyclerView", + "retryable", "redirect_to", "referrerpolicy", "refetches", @@ -1346,6 +1350,7 @@ "UIViewController", "unauth", "Unauth", + "uncategorized", "uncommenting", "unencrypted", "unioned", @@ -1522,6 +1527,7 @@ "CHALLENGEANSWER", "Fargate", "Dockerizing", + "duckdb", "WORKDIR", "endregion", "entrypoint", diff --git a/package.json b/package.json index e1f3f2292da..ccf8e47ce8d 100644 --- a/package.json +++ b/package.json @@ -129,5 +129,5 @@ "overrides": { "tmp": "^0.2.4" }, - "packageManager": "yarn@4.9.0" + "packageManager": "yarn@4.14.1" } diff --git a/redirects.json b/redirects.json index f4e1604aa7b..876cd77c5d8 100644 --- a/redirects.json +++ b/redirects.json @@ -129,46 +129,6 @@ "target": "//frontend/server-side-rendering/<*>", "status": "301" }, - { - "source": "//build-a-backend/add-aws-services/analytics/<*>", - "target": "//frontend/analytics/<*>", - "status": "301" - }, - { - "source": "//build-a-backend/add-aws-services/geo/<*>", - "target": "//frontend/geo/<*>", - "status": "301" - }, - { - "source": "//build-a-backend/add-aws-services/in-app-messaging/<*>", - "target": "//frontend/in-app-messaging/<*>", - "status": "301" - }, - { - "source": "//build-a-backend/add-aws-services/rest-api/<*>", - "target": "//frontend/rest-api/<*>", - "status": "301" - }, - { - "source": "//build-a-backend/add-aws-services/predictions/<*>", - "target": "//frontend/predictions/<*>", - "status": "301" - }, - { - "source": "//build-a-backend/add-aws-services/logging/<*>", - "target": "//frontend/logging/<*>", - "status": "301" - }, - { - "source": "//build-a-backend/add-aws-services/interactions/<*>", - "target": "//frontend/interactions/<*>", - "status": "301" - }, - { - "source": "//build-a-backend/add-aws-services/pubsub/<*>", - "target": "//frontend/pubsub/<*>", - "status": "301" - }, { "source": "//ai/conversation/<*>", "target": "//frontend/ai/conversation/<*>", @@ -10453,5 +10413,20 @@ "source": "/vue/start/connect-existing-aws-resources", "target": "/vue/start/connect-to-aws-resources", "status": "301" + }, + { + "source": "//build-a-backend/q-developer/", + "target": "//develop-with-ai/q-developer/", + "status": "301" + }, + { + "source": "//start/mcp-server/", + "target": "//develop-with-ai/mcp-server/", + "status": "301" + }, + { + "source": "//start/mcp-server/<*>", + "target": "//develop-with-ai/mcp-server/<*>", + "status": "301" } ] \ No newline at end of file diff --git a/src/components/Breadcrumbs/__tests__/Breadcrumbs.test.tsx b/src/components/Breadcrumbs/__tests__/Breadcrumbs.test.tsx index a4931f4100b..5590c1996a7 100644 --- a/src/components/Breadcrumbs/__tests__/Breadcrumbs.test.tsx +++ b/src/components/Breadcrumbs/__tests__/Breadcrumbs.test.tsx @@ -6,7 +6,7 @@ describe('Breadcrumbs', () => { it('should render the Breadcrumbs component', async () => { const component = ( ); @@ -18,28 +18,25 @@ describe('Breadcrumbs', () => { it('should render links for Breadcrumbs including platform', async () => { const component = ( ); render(component); - const routeList = component.props.route.split('/').filter(function (el) { - return el != ''; - }); const breadcrumbsNode = await screen.findByLabelText('Breadcrumb'); const breadcrumbsList = breadcrumbsNode.getElementsByClassName('breadcrumb__item'); - let route = ''; + // Verify each rendered breadcrumb has a valid link for (let i = 0; i < breadcrumbsList.length; i++) { const breadcrumbLink = breadcrumbsList[i].getElementsByClassName( 'amplify-breadcrumbs__link' )[0]; - route = route + '/' + routeList[i]; - expect(breadcrumbLink).toBeInTheDocument(); - expect(breadcrumbLink).toHaveAttribute('href', route); + expect(breadcrumbLink).toHaveAttribute('href'); } + // Should have breadcrumbs for: [platform], build-a-backend, auth, set-up-auth + expect(breadcrumbsList.length).toBeGreaterThanOrEqual(4); }); it('should replace "prev" with applicable version in Breadcrumbs text', async () => { @@ -83,49 +80,48 @@ describe('Breadcrumbs', () => { it('should render links for Breadcrumbs for gen2', async () => { const component = ( - + ); render(component); - const routeList = component.props.route.split('/').filter(function (el) { - return el != ''; - }); const breadcrumbsNode = await screen.findByLabelText('Breadcrumb'); const breadcrumbsList = breadcrumbsNode.getElementsByClassName('breadcrumb__item'); - let route = ''; for (let i = 0; i < breadcrumbsList.length; i++) { const breadcrumbLink = breadcrumbsList[i].getElementsByClassName( 'amplify-breadcrumbs__link' )[0]; - route = route + '/' + routeList[i]; - expect(breadcrumbLink).toBeInTheDocument(); - expect(breadcrumbLink).toHaveAttribute('href', route); + expect(breadcrumbLink).toHaveAttribute('href'); } }); - it('should render links for Breadcrumbs with no platform', async () => { + it('should skip segments without directory entries', async () => { const component = ( - + ); render(component); - const routeList = component.props.route.split('/').filter(function (el) { - return el != ''; - }); const breadcrumbsNode = await screen.findByLabelText('Breadcrumb'); - const breadcrumbsList = - breadcrumbsNode.getElementsByClassName('breadcrumb__item'); - - let route = ''; - for (let i = 0; i < breadcrumbsList.length; i++) { - const breadcrumbLink = breadcrumbsList[i].getElementsByClassName( - 'amplify-breadcrumbs__link' - )[0]; - route = route + '/' + routeList[i]; + const breadcrumbsList = breadcrumbsNode.getElementsByClassName( + 'amplify-breadcrumbs__link' + ); - expect(breadcrumbLink).toBeInTheDocument(); - expect(breadcrumbLink).toHaveAttribute('href', route); - } + const labels = Array.from(breadcrumbsList).map((el) => el.textContent); + // "add-aws-services" should be skipped since it has no directory entry + expect(labels).not.toContain('add-aws-services'); + expect(labels).not.toContain( + '/[platform]/build-a-backend/add-aws-services' + ); + // But other segments should still be present + expect(labels).toContain('React'); + expect(labels).toContain('Build a Backend'); }); }); diff --git a/src/components/Breadcrumbs/index.tsx b/src/components/Breadcrumbs/index.tsx index 8b7d0f337d3..42b6208e651 100644 --- a/src/components/Breadcrumbs/index.tsx +++ b/src/components/Breadcrumbs/index.tsx @@ -67,6 +67,7 @@ function generateBreadcrumbs( if (url.includes('[platform]')) { href['query'] = { platform }; } + let label = directoryEntry ? directoryEntry.title : url; const override = overrides[url] @@ -77,6 +78,11 @@ function generateBreadcrumbs( label = override; } + // Skip path segments that have no directory entry and no override. + // These are intermediate URL segments that were flattened out of + // the directory tree (e.g. "add-aws-services"). + if (!directoryEntry && !override) return; + breadcrumbs.push({ href, label diff --git a/src/components/Layout/Layout.tsx b/src/components/Layout/Layout.tsx index 6cb2910ee9d..0aed23c6d51 100644 --- a/src/components/Layout/Layout.tsx +++ b/src/components/Layout/Layout.tsx @@ -374,9 +374,11 @@ export const Layout = ({ )} {(asPathWithNoHash.includes('/push-notifications/') || asPathWithNoHash.includes('/analytics/') || - asPathWithNoHash.includes('/in-app-messaging/')) && ( - - )} + asPathWithNoHash.includes('/in-app-messaging/')) && + !asPathWithNoHash.includes('/kinesis') && + !asPathWithNoHash.includes('/firehose') && ( + + )} {asPathWithNoHash.includes('/interactions/') && ( )} diff --git a/src/data/__tests__/sections.test.ts b/src/data/__tests__/sections.test.ts index 8e86b6ffeeb..19fe3d2d477 100644 --- a/src/data/__tests__/sections.test.ts +++ b/src/data/__tests__/sections.test.ts @@ -16,6 +16,12 @@ describe('getSectionFromPath', () => { ); }); + it('returns ai for /develop-with-ai/ paths', () => { + expect(getSectionFromPath('/react/develop-with-ai/q-developer/')).toBe( + 'ai' + ); + }); + it('returns backend for /build-a-backend/ paths', () => { expect(getSectionFromPath('/react/build-a-backend/auth/set-up-auth/')).toBe( 'backend' @@ -81,16 +87,23 @@ describe('getDefaultPathForSection', () => { '/swift/reference/' ); }); + + it('returns ai path', () => { + expect(getDefaultPathForSection('ai', 'react')).toBe( + '/react/develop-with-ai/' + ); + }); }); describe('SECTIONS config', () => { - it('has all 6 sections defined', () => { + it('has all 7 sections defined', () => { const keys = Object.keys(SECTIONS) as SectionKey[]; expect(keys).toEqual([ 'quickstart', 'backend', 'frontend', 'ui', + 'ai', 'hosting', 'reference' ]); diff --git a/src/data/sections.ts b/src/data/sections.ts index 767b0b8fdee..ab50f8afb7e 100644 --- a/src/data/sections.ts +++ b/src/data/sections.ts @@ -2,8 +2,9 @@ export type SectionKey = | 'quickstart' | 'backend' | 'frontend' - | 'hosting' | 'ui' + | 'ai' + | 'hosting' | 'reference'; export interface SectionConfig { @@ -18,6 +19,7 @@ export const SECTIONS: Record = { backend: { label: 'Build a Backend', subtitle: 'What runs on AWS' }, frontend: { label: 'Frontend Libraries', subtitle: 'What runs in your app' }, ui: { label: 'UI Libraries', subtitle: 'Pre-built components' }, + ai: { label: 'Develop with AI' }, hosting: { label: 'Hosting' }, reference: { label: 'Reference' } }; @@ -40,6 +42,8 @@ export function getDefaultPathForSection( return `/${platform}/deploy-and-host/`; case 'ui': return `/${platform}/build-ui/`; + case 'ai': + return `/${platform}/develop-with-ai/`; case 'reference': return `/${platform}/reference/`; default: @@ -77,6 +81,7 @@ export function getSectionFromPath(path: string): SectionKey | undefined { if (/\/start(\/|$)/.test(path) || /\/how-amplify-works(\/|$)/.test(path)) { return 'quickstart'; } + if (/\/develop-with-ai(\/|$)/.test(path)) return 'ai'; if (/\/deploy-and-host(\/|$)/.test(path)) return 'hosting'; if (/\/reference(\/|$)/.test(path)) return 'reference'; if (/\/build-ui(\/|$)/.test(path)) return 'ui'; diff --git a/src/directory/directory.mjs b/src/directory/directory.mjs index ffe67e4bb6d..317f4c105d9 100644 --- a/src/directory/directory.mjs +++ b/src/directory/directory.mjs @@ -474,6 +474,10 @@ export const directory = { path: 'src/pages/[platform]/build-a-backend/add-aws-services/analytics/set-up-analytics/index.mdx', section: 'backend' }, + { + path: 'src/pages/[platform]/build-a-backend/add-aws-services/analytics/kinesis/index.mdx', + section: 'backend' + }, { path: 'src/pages/[platform]/build-a-backend/add-aws-services/analytics/existing-resources/index.mdx', section: 'backend' @@ -800,6 +804,9 @@ export const directory = { { path: 'src/pages/[platform]/frontend/analytics/index.mdx', children: [ + { + path: 'src/pages/[platform]/frontend/analytics/kinesis/index.mdx' + }, { path: 'src/pages/[platform]/frontend/analytics/record-events/index.mdx' }, diff --git a/src/pages/[platform]/build-a-backend/add-aws-services/analytics/firehose/index.mdx b/src/pages/[platform]/build-a-backend/add-aws-services/analytics/firehose/index.mdx new file mode 100644 index 00000000000..f6ec03b0895 --- /dev/null +++ b/src/pages/[platform]/build-a-backend/add-aws-services/analytics/firehose/index.mdx @@ -0,0 +1,108 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Amazon Data Firehose', + description: 'Set up an Amazon Data Firehose delivery stream and configure IAM permissions for the Amplify Firehose client.', + platforms: [ + 'android', + 'swift' + ], +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + +Use the [AWS Cloud Development Kit (AWS CDK)](https://docs.aws.amazon.com/cdk/latest/guide/home.html) to create an [Amazon Data Firehose](https://aws.amazon.com/firehose/) delivery stream and grant your app the permissions it needs. For more on adding custom AWS resources to your Amplify backend, see [Custom resources](/[platform]/build-a-backend/add-aws-services/custom-resources/). + +## Set up a Firehose delivery stream + +```ts title="amplify/backend.ts" +import { defineBackend } from "@aws-amplify/backend"; +import { auth } from "./auth/resource"; +import { data } from "./data/resource"; +import { storage } from "./storage/resource"; +import { CfnDeliveryStream } from "aws-cdk-lib/aws-kinesisfirehose"; +import { Stack } from "aws-cdk-lib/core"; +import { + Policy, + PolicyStatement, + Role, + ServicePrincipal, +} from "aws-cdk-lib/aws-iam"; + +const backend = defineBackend({ + auth, + data, + storage, +}); + +const firehoseStack = backend.createStack("firehose-stack"); + +// Access the S3 bucket resource +const s3Bucket = backend.storage.resources.bucket; + +// Create a new IAM role for the Firehose +const firehoseRole = new Role(firehoseStack, "FirehoseRole", { + assumedBy: new ServicePrincipal("firehose.amazonaws.com"), +}); + +// Grant the Firehose role read/write permissions to the S3 bucket +s3Bucket.grantReadWrite(firehoseRole); + +// Create a Firehose delivery stream +const myFirehose = new CfnDeliveryStream(firehoseStack, "MyFirehose", { + deliveryStreamType: "DirectPut", + s3DestinationConfiguration: { + bucketArn: s3Bucket.bucketArn, + roleArn: firehoseRole.roleArn, + }, + deliveryStreamName: "myFirehose", +}); + +// Grant PutRecordBatch permission to authenticated users +const firehosePolicy = new Policy(firehoseStack, "FirehosePolicy", { + statements: [ + new PolicyStatement({ + actions: ["firehose:PutRecordBatch"], + resources: [myFirehose.attrArn], + }), + ], +}); + +backend.auth.resources.authenticatedUserIamRole.attachInlinePolicy(firehosePolicy); +``` + +If you are not using the CDK, ensure your authenticated IAM role has the `firehose:PutRecordBatch` permission on your target delivery stream: + +```json +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": "firehose:PutRecordBatch", + "Resource": "arn:aws:firehose:::deliverystream/" + }] +} +``` + +For more information, see the [Amazon Data Firehose Developer Documentation](https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html). + + + +Ensure your S3 destination bucket is properly secured. See [Security best practices for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/security-best-practices.html) for guidance on encryption, access control, and logging. + + + +## Next steps + +Use the [Firehose client](/[platform]/frontend/analytics/firehose/) to stream data from your app. diff --git a/src/pages/[platform]/build-a-backend/add-aws-services/analytics/kinesis/index.mdx b/src/pages/[platform]/build-a-backend/add-aws-services/analytics/kinesis/index.mdx new file mode 100644 index 00000000000..f18b396fe63 --- /dev/null +++ b/src/pages/[platform]/build-a-backend/add-aws-services/analytics/kinesis/index.mdx @@ -0,0 +1,81 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Kinesis Data Streams', + description: 'Set up an Amazon Kinesis Data Stream and configure IAM permissions for the Amplify Kinesis Data Streams client.', + platforms: [ + 'android', + 'flutter', + 'swift' + ], +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + +Use the [AWS Cloud Development Kit (AWS CDK)](https://docs.aws.amazon.com/cdk/latest/guide/home.html) to create an [Amazon Kinesis Data Stream](https://aws.amazon.com/kinesis/data-streams/) and grant your app the permissions it needs. For more on adding custom AWS resources to your Amplify backend, see [Custom resources](/[platform]/build-a-backend/add-aws-services/custom-resources/). + +## Set up a Kinesis stream + +```ts title="amplify/backend.ts" +import { defineBackend } from "@aws-amplify/backend"; +import { auth } from "./auth/resource"; +import { data } from "./data/resource"; +import { Policy, PolicyStatement } from "aws-cdk-lib/aws-iam"; +import { Stream } from "aws-cdk-lib/aws-kinesis"; +import { Stack } from "aws-cdk-lib/core"; + +const backend = defineBackend({ + auth, + data, +}); + +const kinesisStack = backend.createStack("kinesis-stack"); + +// Create a Kinesis stream +const kinesisStream = new Stream(kinesisStack, "KinesisStream", { + streamName: "myKinesisStream", + shardCount: 1, +}); + +// Grant PutRecords permission to authenticated users +const kinesisPolicy = new Policy(kinesisStack, "KinesisPolicy", { + statements: [ + new PolicyStatement({ + actions: ["kinesis:PutRecords"], + resources: [kinesisStream.streamArn], + }), + ], +}); + +backend.auth.resources.authenticatedUserIamRole.attachInlinePolicy(kinesisPolicy); +``` + +If you are not using the CDK, ensure your authenticated IAM role has the `kinesis:PutRecords` permission on your target stream: + +```json +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": "kinesis:PutRecords", + "Resource": "arn:aws:kinesis:::stream/" + }] +} +``` + +For more information, see the [Amazon Kinesis Developer Documentation](https://docs.aws.amazon.com/streams/latest/dev/controlling-access.html). + +## Next steps + +Use the [Kinesis Data Streams client](/[platform]/frontend/analytics/kinesis/) to stream data from your app. diff --git a/src/pages/[platform]/develop-with-ai/agent-plugins/index.mdx b/src/pages/[platform]/develop-with-ai/agent-plugins/index.mdx new file mode 100644 index 00000000000..c03b358ff37 --- /dev/null +++ b/src/pages/[platform]/develop-with-ai/agent-plugins/index.mdx @@ -0,0 +1,151 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Agent plugins', + description: + 'Install the AWS Amplify agent plugin to get guided, phased workflows for building fullstack apps with Claude Code, Cursor, and other AI coding assistants.', + platforms: [ + 'android', + 'angular', + 'flutter', + 'javascript', + 'nextjs', + 'react', + 'react-native', + 'swift', + 'vue' + ] +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + +[Agent Plugins for AWS](https://github.com/awslabs/agent-plugins) are reusable packages that extend AI coding assistants with guided workflows, MCP servers, hooks, and reference materials for AWS development. The **aws-amplify** plugin provides a structured, phased workflow for building fullstack Amplify Gen 2 applications. + +Agent plugins are currently supported by **Claude Code** (version 2.1.29 or later) and **Cursor** (version 2.5 or later). + +## What the Amplify plugin includes + +The aws-amplify plugin packages four types of capabilities: + +- **Skills** — Step-by-step workflows that guide the AI through complex tasks. The Amplify plugin includes an `amplify-workflow` skill that orchestrates the entire development process. +- **MCP Servers** — Provides access to AWS documentation and Standard Operating Procedures (SOPs) through the AWS MCP Server. +- **Hooks** — Automation that validates prerequisites before you start building. +- **References** — Backend, frontend, and deployment instructions that the skill consults during each phase. + +## Phased workflow + +When you install the Amplify plugin and ask the AI to build an app, it follows a structured 4-phase workflow. Only applicable phases are executed based on your request. + +**Phase 1: Backend** — Creates Amplify Gen 2 resources in the `amplify/` directory including authentication, data models, storage, and serverless functions. + +**Phase 2: Sandbox** — Deploys your backend to a cloud sandbox environment for testing. + +**Phase 3: Frontend** — Connects your frontend framework (React, Next.js, Vue, Angular, React Native, Flutter, Swift, or Android) to the Amplify backend. + +**Phase 4: Production** — Deploys your application to production via CI/CD. + +For example, if you ask "add authentication to my Amplify backend", only Phase 1 and Phase 2 run. If you ask "build me a fullstack task management app", all four phases run in sequence. The AI presents a plan and waits for your confirmation before proceeding to each phase. + +## Install with Claude Code + +Claude Code is available as a [CLI tool](https://docs.anthropic.com/en/docs/claude-code), a [VS Code extension](https://marketplace.visualstudio.com/items?itemName=anthropics.claude-code), and a [JetBrains plugin](https://plugins.jetbrains.com/plugin/27189-claude-code). The agent plugin works the same way in all environments. + +### Step 1: Add the marketplace + +Open Claude Code and run: + +```bash +/plugin marketplace add awslabs/agent-plugins +``` + +This registers the Agent Plugins for AWS marketplace, giving you access to all available AWS plugins. + +### Step 2: Install the Amplify plugin + +```bash +/plugin install aws-amplify@agent-plugins-for-aws +``` + +### Step 3: Verify installation + +You can confirm the plugin is installed by running: + +```bash +/plugin list +``` + +You should see `aws-amplify@agent-plugins-for-aws` in the list. + +### Step 4: Start building + +The plugin activates automatically when your prompt relates to Amplify development. Try prompts like: + +- "Build me a task management app with Amplify" +- "Add authentication to my Amplify backend" +- "Add a storage bucket for file uploads" +- "Deploy my Amplify app to sandbox" +- "Connect my React frontend to the Amplify backend" + +The plugin will validate your prerequisites, present a phased plan, and guide you through each step with confirmation checkpoints. + + + +If your AWS credentials are not configured, the plugin will detect this during the prerequisite check and guide you to set them up. See [Configure AWS for local development](/[platform]/start/account-setup/) for details. + + + +## Install with Cursor + +### From settings + +1. Open **Cursor** and go to **Settings** (gear icon) +2. Navigate to **Plugins** +3. Search for **"AWS Amplify"** +4. Click **"Add to Cursor"** and select your installation scope (user or workspace) + +### From the marketplace + +Alternatively, visit [cursor.com](https://cursor.com), browse the marketplace for the AWS Amplify plugin, and click **Install**. + +After installation, the plugin activates when you ask Amplify-related questions in Cursor's AI chat. + +## Prerequisites + +Before using the Amplify plugin, ensure you have: + +- **Node.js** version 18 or later +- **npm** +- **AWS CLI** with configured credentials + +The plugin runs a prerequisite check script automatically and will prompt you to configure credentials if they're missing. For credential setup, see [Configure AWS for local development](/[platform]/start/account-setup/). + +## Other AWS plugins + +The Agent Plugins for AWS repository includes additional plugins that complement Amplify development: + +| Plugin | Description | +|--------|-------------| +| **aws-serverless** | Build serverless apps with Lambda, API Gateway, EventBridge, and Step Functions | +| **deploy-on-aws** | Analyze codebases, recommend AWS architectures, estimate costs, and generate Infrastructure as Code | +| **databases-on-aws** | Database architecture guidance for Aurora DSQL | +| **amazon-location-service** | Add maps, geocoding, routing, and location features | +| **migration-to-aws** | Migrate infrastructure from GCP to AWS | + +Install any plugin with: + +```bash +/plugin install @agent-plugins-for-aws +``` + +For the full list and source code, see the [Agent Plugins for AWS repository](https://github.com/awslabs/agent-plugins). diff --git a/src/pages/[platform]/develop-with-ai/index.mdx b/src/pages/[platform]/develop-with-ai/index.mdx new file mode 100644 index 00000000000..e86075608db --- /dev/null +++ b/src/pages/[platform]/develop-with-ai/index.mdx @@ -0,0 +1,39 @@ +import { getChildPageNodes } from '@/utils/getChildPageNodes'; +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Develop with AI', + description: + 'Use AI coding assistants to accelerate Amplify development. Set up agent plugins for Claude Code and Cursor, Kiro, MCP Server, and project-level steering files.', + route: '/[platform]/develop-with-ai', + platforms: [ + 'android', + 'angular', + 'flutter', + 'javascript', + 'nextjs', + 'react', + 'react-native', + 'swift', + 'vue' + ] +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + const childPageNodes = getChildPageNodes(meta.route); + return { + props: { + platform: context.params.platform, + meta, + childPageNodes + } + }; +} + +Use AI coding assistants to build Amplify applications faster. These tools understand Amplify's TypeScript-first patterns and can help you scaffold backends, generate frontend code, and deploy your app. + + diff --git a/src/pages/[platform]/develop-with-ai/kiro/index.mdx b/src/pages/[platform]/develop-with-ai/kiro/index.mdx new file mode 100644 index 00000000000..bf4c02a104c --- /dev/null +++ b/src/pages/[platform]/develop-with-ai/kiro/index.mdx @@ -0,0 +1,123 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Use Kiro with Amplify', + description: + 'Learn how to use Kiro, an agentic IDE, to build Amplify applications with powers, specs, hooks, and steering.', + platforms: [ + 'android', + 'angular', + 'flutter', + 'javascript', + 'nextjs', + 'react', + 'react-native', + 'swift', + 'vue' + ] +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + +[Kiro](https://kiro.dev) is an agentic IDE that helps you move from prototype to production with features like powers, specs, steering, hooks, and agentic chat. Kiro understands your full project context and can generate code across multiple files. + +## Getting started + +1. [Download and install Kiro](https://kiro.dev/docs/getting-started/installation/) for macOS, Windows, or Linux. +2. Open your Amplify project in Kiro. +3. Install the AWS Amplify power (see below). +4. Start building with agentic chat or create a spec for your feature. + +For a hands-on walkthrough, see the [Kiro quickstart](https://kiro.dev/docs/getting-started/first-project/). + +## AWS Amplify Power + +Powers are packaged capabilities that give Kiro on-demand access to specialized knowledge and workflows. The **AWS Amplify power** provides guided workflows for building fullstack Amplify Gen 2 applications — including authentication, data models, storage, serverless functions, and deployment. + +Unlike traditional MCP servers that load all tools upfront, powers activate dynamically based on your conversation context. When you mention Amplify-related work, Kiro loads the Amplify power automatically. + +### Installing the Amplify power + +Browse powers directly in Kiro or visit [kiro.dev/powers](https://kiro.dev/powers): + +1. Search for **"AWS Amplify"** +2. Click **Install** — the power registers automatically with no manual configuration + +### What the power provides + +The Amplify power includes: + +- **Steering files** — A workflow orchestrator (`amplify-workflow.md`) that guides Kiro through a phased development process, plus phase-specific instructions for backend, sandbox, frontend, and production work. +- **MCP configuration** — Connects to the AWS MCP Server for access to AWS documentation and Amplify Standard Operating Procedures (SOPs). + +### Phased workflow + +When you ask Kiro to build an Amplify app, the power's orchestrator determines which phases apply and executes them in order: + +1. **Backend** — Create auth, data models, storage, and functions in the `amplify/` directory +2. **Sandbox** — Deploy to a sandbox environment for testing +3. **Frontend** — Connect your frontend framework to the Amplify backend +4. **Production** — Deploy to production via CI/CD + +Only applicable phases run. For example, "add a data model" triggers only Phase 1 and 2, while "build me a fullstack app" runs all four phases. Kiro presents a plan and confirms with you before each phase. + +### Example prompts + +``` +Build me a task management app with Amplify +``` + +``` +Add owner-based authentication to my data model +``` + +``` +Deploy my Amplify app to sandbox +``` + +For the source code and steering files, see the [aws-amplify power on GitHub](https://github.com/kirodotdev/powers/tree/main/aws-amplify). + +## Key features for Amplify development + +### Specs + +Specs let you plan and build features using structured specifications. For Amplify projects, you can describe a feature in natural language and Kiro will create a spec that breaks it into tasks — from defining your data model in `amplify/data/resource.ts` to connecting the frontend. + +For example, you can ask Kiro to create a spec for "add a comments feature with real-time subscriptions" and it will plan the backend schema changes, authorization rules, and frontend integration. + +Learn more in the [Kiro specs documentation](https://kiro.dev/docs/features/specs/). + +### Hooks + +Hooks automate repetitive development workflows by responding to events. You can set up hooks to automatically run `npx ampx sandbox` when backend files change, run tests after code generation, or lint your Amplify resource definitions. + +Learn more in the [Kiro hooks documentation](https://kiro.dev/docs/features/hooks/). + +### Steering + +Steering files guide Kiro's behavior with project-specific rules and context. For Amplify projects, you can create steering rules that teach Kiro about your project's conventions — such as which authorization patterns to use, how your data models are structured, and which Amplify libraries to import. + +See the [steering files](/[platform]/develop-with-ai/steering-files/) page for Amplify-specific steering examples. + +### MCP Servers + +Kiro supports the Model Context Protocol (MCP), which lets you connect external tools and data sources. The [AWS MCP Server](/[platform]/develop-with-ai/mcp-server/) provides pre-built Amplify workflows that Kiro can use to scaffold backends, generate frontend code, and set up deployments. + +Learn more in the [Kiro MCP documentation](https://kiro.dev/docs/features/mcp-servers/). + +### Agentic chat + +Kiro's agentic chat lets you build features through natural language conversation. It understands your full Amplify project — including your data schema, auth configuration, and frontend code — and can make changes across multiple files in a single interaction. + +Learn more in the [Kiro chat documentation](https://kiro.dev/docs/features/agentic-chat/). diff --git a/src/pages/[platform]/start/mcp-server/amplify-workflows/index.mdx b/src/pages/[platform]/develop-with-ai/mcp-server/amplify-workflows/index.mdx similarity index 98% rename from src/pages/[platform]/start/mcp-server/amplify-workflows/index.mdx rename to src/pages/[platform]/develop-with-ai/mcp-server/amplify-workflows/index.mdx index 66d108d7bfc..c8dae7c9b05 100644 --- a/src/pages/[platform]/start/mcp-server/amplify-workflows/index.mdx +++ b/src/pages/[platform]/develop-with-ai/mcp-server/amplify-workflows/index.mdx @@ -3,7 +3,7 @@ import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; export const meta = { title: 'Guided workflows', description: 'Learn about the guided workflows available in AWS MCP Server for building Amplify applications.', - route: '/[platform]/start/mcp-server/amplify-workflows', + route: '/[platform]/develop-with-ai/mcp-server/amplify-workflows', platforms: [ 'android', 'angular', @@ -401,7 +401,7 @@ When working on Amplify projects, use the AWS MCP Server guided workflows: Now that you understand the pre-built workflows, try using them in your next Amplify project: -1. [Set up AWS MCP Server](/[platform]/start/mcp-server/set-up-mcp/) if you haven't already +1. [Set up AWS MCP Server](/[platform]/develop-with-ai/mcp-server/set-up-mcp/) if you haven't already 2. Start a conversation with your AI assistant about what you want to build 3. Let the pre-built workflows guide you through implementation diff --git a/src/pages/[platform]/start/mcp-server/index.mdx b/src/pages/[platform]/develop-with-ai/mcp-server/index.mdx similarity index 94% rename from src/pages/[platform]/start/mcp-server/index.mdx rename to src/pages/[platform]/develop-with-ai/mcp-server/index.mdx index 44a971f3927..ba5457ad5c5 100644 --- a/src/pages/[platform]/start/mcp-server/index.mdx +++ b/src/pages/[platform]/develop-with-ai/mcp-server/index.mdx @@ -2,9 +2,9 @@ import { getChildPageNodes } from '@/utils/getChildPageNodes'; import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; export const meta = { - title: 'Build with AI assistants', - description: 'Use AI coding assistants to build fullstack Amplify applications faster with AWS MCP Server.', - route: '/[platform]/start/mcp-server', + title: 'AWS MCP Server', + description: 'Use the AWS MCP Server to connect AI coding assistants to Amplify documentation, APIs, and guided workflows.', + route: '/[platform]/develop-with-ai/mcp-server', platforms: [ 'android', 'angular', @@ -15,8 +15,7 @@ export const meta = { 'react-native', 'swift', 'vue' - ], - isNew: true + ] }; export const getStaticPaths = async () => { diff --git a/src/pages/[platform]/start/mcp-server/set-up-mcp/index.mdx b/src/pages/[platform]/develop-with-ai/mcp-server/set-up-mcp/index.mdx similarity index 92% rename from src/pages/[platform]/start/mcp-server/set-up-mcp/index.mdx rename to src/pages/[platform]/develop-with-ai/mcp-server/set-up-mcp/index.mdx index 8c7e303ddc1..79ae31bb728 100644 --- a/src/pages/[platform]/start/mcp-server/set-up-mcp/index.mdx +++ b/src/pages/[platform]/develop-with-ai/mcp-server/set-up-mcp/index.mdx @@ -3,7 +3,7 @@ import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; export const meta = { title: 'Set up AWS MCP Server', description: 'Learn how to configure AWS MCP Server with your AI coding assistant.', - route: '/[platform]/start/mcp-server/set-up-mcp', + route: '/[platform]/develop-with-ai/mcp-server/set-up-mcp', platforms: [ 'android', 'angular', @@ -63,4 +63,4 @@ Your assistant should describe the Backend Implementation, Frontend Integration, ## Next steps -Now that you have AWS MCP Server configured, learn about the [guided workflows](/[platform]/start/mcp-server/amplify-workflows/) available to accelerate your Amplify development. +Now that you have AWS MCP Server configured, learn about the [guided workflows](/[platform]/develop-with-ai/mcp-server/amplify-workflows/) available to accelerate your Amplify development. diff --git a/src/pages/[platform]/build-a-backend/q-developer/index.mdx b/src/pages/[platform]/develop-with-ai/q-developer/index.mdx similarity index 100% rename from src/pages/[platform]/build-a-backend/q-developer/index.mdx rename to src/pages/[platform]/develop-with-ai/q-developer/index.mdx diff --git a/src/pages/[platform]/develop-with-ai/steering-files/index.mdx b/src/pages/[platform]/develop-with-ai/steering-files/index.mdx new file mode 100644 index 00000000000..647159ec73d --- /dev/null +++ b/src/pages/[platform]/develop-with-ai/steering-files/index.mdx @@ -0,0 +1,126 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Steering files', + description: + 'Configure project-level context files to guide AI coding assistants when building Amplify applications.', + platforms: [ + 'android', + 'angular', + 'flutter', + 'javascript', + 'nextjs', + 'react', + 'react-native', + 'swift', + 'vue' + ] +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + +Steering files are project-level configuration files that guide AI coding assistants with your project's conventions, patterns, and constraints. When an AI assistant reads these files, it generates code that follows your team's standards and Amplify best practices. + +## Why use steering files + +AI assistants work best when they understand your project's context. Without steering files, the assistant may: +- Use outdated API patterns or incorrect import paths +- Generate code that doesn't match your auth/data model conventions +- Miss Amplify-specific patterns like `defineAuth()`, `defineData()`, or `defineStorage()` + +Steering files solve this by giving the assistant persistent context about your project. + +## Kiro steering + +[Kiro](/[platform]/develop-with-ai/kiro/) uses steering files to maintain consistency across your project. Create steering rules that describe your Amplify project conventions: + +- Data model naming patterns and authorization rules +- Auth configuration preferences (social providers, MFA settings) +- Frontend framework patterns (React hooks, Next.js App Router vs Pages Router) +- Testing conventions + +Learn more in the [Kiro steering documentation](https://kiro.dev/docs/features/steering/). + +## CLAUDE.md for Claude Code + +If you use [Claude Code](https://docs.anthropic.com/en/docs/claude-code), create a `CLAUDE.md` file in your project root. This file is automatically loaded into every conversation. + +Example `CLAUDE.md` for an Amplify project: + +```markdown +# Project conventions + +This is an AWS Amplify Gen 2 project using TypeScript. + +## Structure +- `amplify/` — Backend resources (auth, data, storage, functions) +- `amplify/data/resource.ts` — Data model schema using `@aws-amplify/backend` +- `amplify/auth/resource.ts` — Auth configuration using `defineAuth()` +- `src/` — Frontend application code + +## Amplify patterns +- Use `a.model()` for data models with `.authorization()` chains +- Use `allow.owner()` for user-owned data, `allow.guest()` for public read +- Import client operations from `aws-amplify/api` with `generateClient()` +- Use `Amplify.configure(outputs)` with the generated `amplify_outputs.json` + +## Testing +- Run `npx ampx sandbox` for local development +- Backend changes auto-deploy on file save during sandbox +``` + +## Cursor rules + +For [Cursor](https://cursor.com), create rules in the `.cursor/rules/` directory: + +``` +.cursor/ + rules/ + amplify.mdc # Amplify-specific conventions +``` + +Example `.cursor/rules/amplify.mdc`: + +```markdown +--- +description: Amplify Gen 2 conventions +globs: amplify/**/* +--- + +- Backend resources are defined in `amplify/` using TypeScript +- Data models use `a.schema({})` with `a.model()` definitions +- Authorization uses `.authorization(allow => [...])` chains +- Auth is configured with `defineAuth()` in `amplify/auth/resource.ts` +- Storage uses `defineStorage()` in `amplify/storage/resource.ts` +- Use `@aws-amplify/backend` for backend definitions +- Use `aws-amplify` for frontend client operations +``` + +## Amazon Q Developer context + +[Amazon Q Developer](/[platform]/develop-with-ai/q-developer/) supports workspace context through the `@workspace` command. You can add reference files to help Q Developer understand your Amplify conventions: + +- general.md — General Amplify Gen 2 conventions and file structure +- authentication.md — Auth configuration patterns and examples +- modeling-schema.md — Data model schema best practices +- modeling-relationships.md — Data relationship patterns + +Download these files and place them in a `context/` folder in your project root. + +## Tips for effective steering + +- **Be specific about imports** — Tell the assistant which packages to import from (`@aws-amplify/backend` for backend, `aws-amplify` for frontend) +- **Document your auth model** — Describe which authorization strategies your app uses (owner-based, group-based, public) +- **Include file paths** — Reference where backend resources are defined so the assistant edits the right files +- **Keep it updated** — Update steering files when you add new Amplify features or change conventions diff --git a/src/pages/[platform]/frontend/analytics/firehose/index.mdx b/src/pages/[platform]/frontend/analytics/firehose/index.mdx new file mode 100644 index 00000000000..970855d92ec --- /dev/null +++ b/src/pages/[platform]/frontend/analytics/firehose/index.mdx @@ -0,0 +1,392 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Amazon Data Firehose client', + description: 'A standalone client for streaming data to Amazon Data Firehose with offline support, automatic batching, and configurable flushing.', + platforms: [ + 'android', + 'swift', + ], +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + +`AmplifyFirehoseClient` is a standalone client for streaming data to [Amazon Data Firehose](https://aws.amazon.com/firehose/) delivery streams. It provides: + +- Local persistence for offline support +- Automatic retry for failed records +- Automatic batching (up to 500 records or 4 MB per request) +- Interval-based automatic flushing (default: every 30 seconds) +- Enable/disable toggle that silently drops new records while preserving cached ones + + + +This is a standalone client, separate from the Amplify Analytics category plugin. It communicates directly with the Firehose API using `PutRecordBatch`. + + + + + +Before using this client, ensure your backend is configured with the required IAM permissions. See [Set up Amazon Data Firehose](/[platform]/build-a-backend/add-aws-services/analytics/firehose/). + + + +## Getting started + +### Installation + + + +Add the dependency to your module's `build.gradle.kts`: + +```kotlin +dependencies { + implementation("com.amplifyframework:aws-kinesis:LATEST_VERSION") +} +``` + + + + + +Add `AmplifyFirehoseClient` to your project using Swift Package Manager. In Xcode, go to **File > Add Package Dependencies** and enter the repository URL for the Amplify Swift SDK. + + + +### Initialize the client + + + +```kotlin +import com.amplifyframework.firehose.AmplifyFirehoseClient + +val firehose = AmplifyFirehoseClient( + context = applicationContext, + region = "us-east-1", + credentialsProvider = credentialsProvider +) +``` + + + + + +```swift +import AmplifyFirehoseClient + +let firehose = try AmplifyFirehoseClient( + region: "us-east-1", + credentialsProvider: credentialsProvider +) +``` + + + +### Configuration options + +You can customize the client behavior by passing an options object: + + + +| Option | Default | Description | +|---|---|---| +| `cacheMaxBytes` | 5 MB | Maximum size of the local record cache in bytes. | +| `maxRetries` | 5 | Maximum retry attempts per record before it is discarded. | +| `flushStrategy` | `FlushStrategy.Interval(30.seconds)` | Automatic flush interval. Use `FlushStrategy.None` for manual-only flushing. | +| `configureClient` | `null` | Escape hatch to customize the underlying AWS SDK `FirehoseClient`. | + + + + + +| Option | Default | Description | +|---|---|---| +| `cacheMaxBytes` | 5 MB | Maximum size of the local record cache in bytes. | +| `maxRetries` | 5 | Maximum retry attempts per record before it is discarded. | +| `flushStrategy` | `.interval(30)` | Automatic flush interval in seconds. Use `.none` for manual-only flushing. | +| `configureClient` | `nil` | Closure to customize the underlying `FirehoseClientConfiguration`. | + + + + + +```kotlin +import com.amplifyframework.firehose.AmplifyFirehoseClient +import com.amplifyframework.firehose.AmplifyFirehoseClientOptions +import com.amplifyframework.recordcache.FlushStrategy +import kotlin.time.Duration.Companion.seconds + +val firehose = AmplifyFirehoseClient( + context = applicationContext, + region = "us-east-1", + credentialsProvider = credentialsProvider, + options = AmplifyFirehoseClientOptions { + cacheMaxBytes = 10L * 1024 * 1024 // 10 MB + maxRetries = 5 + flushStrategy = FlushStrategy.Interval(30.seconds) + configureClient { + retryStrategy { maxAttempts = 10 } + } + } +) +``` + +To disable automatic flushing: + +```kotlin +options = AmplifyFirehoseClientOptions { + flushStrategy = FlushStrategy.None +} +``` + + + + + +```swift +let firehose = try AmplifyFirehoseClient( + region: "us-east-1", + credentialsProvider: credentialsProvider, + options: .init( + cacheMaxBytes: 10 * 1_024 * 1_024, // 10 MB + maxRetries: 5, + flushStrategy: .interval(30), + configureClient: { config in + // Customize the underlying FirehoseClientConfiguration + } + ) +) +``` + +To disable automatic flushing: + +```swift +options: .init(flushStrategy: .none) +``` + + + +## Usage + +### Record data + +Use `record()` to persist data to the local cache. Records are sent to Firehose during the next flush cycle (automatic or manual). + + + +```kotlin +val result = firehose.record( + data = "Hello Firehose".toByteArray(), + streamName = "my-delivery-stream" +) +when (result) { + is Result.Success -> { /* recorded successfully */ } + is Result.Failure -> { /* handle error */ } +} +``` + + + + + +```swift +let result = try await firehose.record( + data: "Hello Firehose".data(using: .utf8)!, + streamName: "my-delivery-stream" +) +``` + + + +Records submitted while the client is disabled are silently dropped. + +### Flush records + +The client automatically flushes cached records at the configured interval (default: 30 seconds). You can also trigger a manual flush: + + + +```kotlin +when (val result = firehose.flush()) { + is Result.Success -> println("Flushed ${result.data.recordsFlushed} records") + is Result.Failure -> println("Flush error: ${result.error}") +} +``` + + + + + +```swift +let flushResult = try await firehose.flush() +print("Flushed \(flushResult.recordsFlushed) records") +``` + + + +Each flush sends at most one batch per stream (up to 500 records or 4 MB). Remaining records are picked up in subsequent flush cycles. If a flush is already in progress, the call returns immediately with `flushInProgress: true`. + +Manual flushes work even when the client is disabled, allowing you to drain cached records without re-enabling collection. + +### Clear cache + +Delete all cached records from local storage: + + + +```kotlin +firehose.clearCache() +``` + + + + + +```swift +let cleared = try await firehose.clearCache() +``` + + + +### Enable and disable + +You can toggle record collection and automatic flushing at runtime. When disabled, new records are silently dropped but already-cached records remain in storage. + + + +```kotlin +firehose.disable() +// Records are dropped, auto-flush paused + +firehose.enable() +// Collection and auto-flush resume +``` + + + + + +```swift +await firehose.disable() +// Records are dropped, auto-flush paused + +await firehose.enable() +// Collection and auto-flush resume +``` + + + +## Advanced + +### Escape hatch + +Access the underlying AWS SDK `FirehoseClient` for operations not covered by this client's API: + + + +```kotlin +val sdkClient = firehose.firehoseClient +// Use sdkClient for direct Firehose API calls +``` + + + + + +```swift +let sdkClient = firehose.getFirehoseClient() +// Use sdkClient for direct Firehose API calls +``` + + + +### Error handling + +All operations surface errors through a sealed exception hierarchy: + + + +| Error type | Description | +|---|---| +| `AmplifyFirehoseValidationException` | Record input validation failed (oversized record). | +| `AmplifyFirehoseLimitExceededException` | Local cache is full. Call `flush()` or `clearCache()` to free space. | +| `AmplifyFirehoseStorageException` | Local database error. | +| `AmplifyFirehoseUnknownException` | Unexpected or uncategorized error. | + +Operations return `Result`: + +```kotlin +when (val result = firehose.record(...)) { + is Result.Success -> { /* success */ } + is Result.Failure -> when (result.error) { + is AmplifyFirehoseValidationException -> { /* invalid input */ } + is AmplifyFirehoseLimitExceededException -> { /* cache full */ } + is AmplifyFirehoseStorageException -> { /* database error */ } + is AmplifyFirehoseUnknownException -> { /* unexpected error */ } + } +} +``` + + + + + +| Error type | Description | +|---|---| +| `FirehoseError.validation` | Record input validation failed (oversized record). | +| `FirehoseError.cacheLimitExceeded` | Local cache is full. Call `flush()` or `clearCache()` to free space. | +| `FirehoseError.cache` | Local database error. | +| `FirehoseError.unknown` | Unexpected or uncategorized error. | + +Operations throw `FirehoseError`: + +```swift +do { + try await firehose.record( + data: payload, + streamName: "stream" + ) +} catch let error as FirehoseError { + switch error { + case .validation(let desc, _, _): + print("Validation error: \(desc)") + case .cacheLimitExceeded: + print("Cache full") + case .cache(let desc, _, _): + print("Storage error: \(desc)") + case .unknown(let desc, _, _): + print("Unknown error: \(desc)") + } +} +``` + + + +### Retry behavior + +- All `PutRecordBatch` error codes (`ServiceUnavailableException`, `InternalFailure`) are treated as retryable. +- Each failed record's retry count is incremented after each attempt. +- Records exceeding `maxRetries` (default: 5) are permanently deleted from the cache. +- SDK-level Firehose errors are logged and skipped per-stream, so other streams can still flush. +- Non-SDK errors (network failures, storage errors) abort the flush entirely. + +### Firehose service limits + +The client enforces these limits before sending to the service: + +| Limit | Value | +|---|---| +| Max records per `PutRecordBatch` request | 500 | +| Max single record size | 1,000 KiB | +| Max total payload per `PutRecordBatch` request | 4 MB | diff --git a/src/pages/[platform]/frontend/analytics/kinesis/index.mdx b/src/pages/[platform]/frontend/analytics/kinesis/index.mdx new file mode 100644 index 00000000000..340d4d72ca5 --- /dev/null +++ b/src/pages/[platform]/frontend/analytics/kinesis/index.mdx @@ -0,0 +1,553 @@ +import { getCustomStaticPath } from '@/utils/getCustomStaticPath'; + +export const meta = { + title: 'Kinesis Data Streams client', + description: 'A standalone client for streaming data to Amazon Kinesis Data Streams with offline support, automatic batching, and configurable flushing.', + platforms: [ + 'swift', + 'android', + 'flutter' + ], +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta + } + }; +} + +`AmplifyKinesisClient` is a standalone client for streaming data to [Amazon Kinesis Data Streams](https://aws.amazon.com/kinesis/data-streams/). It provides: + +- Local persistence for offline support +- Automatic retry for failed records +- Automatic batching (up to 500 records or 10 MB per request) +- Interval-based automatic flushing (default: every 30 seconds) +- Enable/disable toggle that silently drops new records while preserving cached ones + + + +This is a standalone client, separate from the Amplify Analytics category plugin. It communicates directly with the Kinesis Data Streams API using `PutRecords`. + + + + + +Before using this client, ensure your backend is configured with the required IAM permissions. See [Set up Kinesis Data Streams](/[platform]/build-a-backend/add-aws-services/analytics/kinesis/). + + + +## Getting started + +### Installation + + + +Add the dependency to your module's `build.gradle.kts`: + +```kotlin +dependencies { + implementation("com.amplifyframework:aws-kinesis:LATEST_VERSION") +} +``` + + + + + +Add `AmplifyKinesisClient` to your project using Swift Package Manager. In Xcode, go to **File > Add Package Dependencies** and enter the repository URL for the Amplify Swift SDK. + + + + + +Add the dependency to your `pubspec.yaml`: + +```yaml +dependencies: + amplify_kinesis: ^2.11.0 +``` + + + +### Initialize the client + + + +```kotlin +import com.amplifyframework.kinesis.AmplifyKinesisClient + +val kinesis = AmplifyKinesisClient( + context = applicationContext, + region = "us-east-1", + credentialsProvider = credentialsProvider +) +``` + + + + + +```swift +import AmplifyKinesisClient + +let kinesis = try AmplifyKinesisClient( + region: "us-east-1", + credentialsProvider: credentialsProvider +) +``` + + + + + +```dart +import 'package:amplify_kinesis/amplify_kinesis.dart'; + +final kinesis = await AmplifyKinesisClient.create( + region: 'us-east-1', + credentialsProvider: credentialsProvider, +); +``` + + + +### Configuration options + +You can customize the client behavior by passing an options object: + + + +| Option | Default | Description | +|---|---|---| +| `cacheMaxBytes` | 5 MB | Maximum size of the local record cache in bytes. | +| `maxRetries` | 5 | Maximum retry attempts per record before it is discarded. | +| `flushStrategy` | `FlushStrategy.Interval(30.seconds)` | Automatic flush interval. Use `FlushStrategy.None` for manual-only flushing. | +| `configureClient` | `null` | Escape hatch to customize the underlying AWS SDK `KinesisClient`. | + + + + + +| Option | Default | Description | +|---|---|---| +| `cacheMaxBytes` | 5 MB | Maximum size of the local record cache in bytes. | +| `maxRetries` | 5 | Maximum retry attempts per record before it is discarded. | +| `flushStrategy` | `.interval(30)` | Automatic flush interval in seconds. Use `.none` for manual-only flushing. | +| `configureClient` | `nil` | Closure to customize the underlying `KinesisClientConfiguration`. | + + + + + +| Option | Default | Description | +|---|---|---| +| `cacheMaxBytes` | 5 MB | Maximum size of the local record cache in bytes. | +| `maxRetries` | 5 | Maximum retry attempts per record before it is discarded. | +| `flushStrategy` | `FlushInterval(interval: Duration(seconds: 30))` | Automatic flush interval. Use `FlushNone()` for manual-only flushing. | + + + + + +```kotlin +import com.amplifyframework.kinesis.AmplifyKinesisClient +import com.amplifyframework.kinesis.AmplifyKinesisClientOptions +import com.amplifyframework.recordcache.FlushStrategy +import kotlin.time.Duration.Companion.seconds + +val kinesis = AmplifyKinesisClient( + context = applicationContext, + region = "us-east-1", + credentialsProvider = credentialsProvider, + options = AmplifyKinesisClientOptions { + cacheMaxBytes = 10L * 1024 * 1024 // 10 MB + maxRetries = 3 + flushStrategy = FlushStrategy.Interval(60.seconds) + configureClient { + retryStrategy { maxAttempts = 10 } + } + } +) +``` + +To disable automatic flushing: + +```kotlin +options = AmplifyKinesisClientOptions { + flushStrategy = FlushStrategy.None +} +``` + + + + + +```swift +let kinesis = try AmplifyKinesisClient( + region: "us-east-1", + credentialsProvider: credentialsProvider, + options: .init( + cacheMaxBytes: 10 * 1_024 * 1_024, // 10 MB + maxRetries: 3, + flushStrategy: .interval(60), + configureClient: { config in + // Customize the underlying KinesisClientConfiguration + } + ) +) +``` + +To disable automatic flushing: + +```swift +options: .init(flushStrategy: .none) +``` + + + + + +```dart +final kinesis = await AmplifyKinesisClient.create( + region: 'us-east-1', + credentialsProvider: credentialsProvider, + options: AmplifyKinesisClientOptions( + cacheMaxBytes: 10 * 1024 * 1024, // 10 MB + maxRetries: 3, + flushStrategy: FlushInterval(interval: Duration(seconds: 60)), + ), +); +``` + +To disable automatic flushing: + +```dart +options: AmplifyKinesisClientOptions( + flushStrategy: FlushNone(), +), +``` + + + +## Usage + +### Record data + +Use `record()` to persist data to the local cache. Records are sent to Kinesis during the next flush cycle (automatic or manual). + + + +```kotlin +val result = kinesis.record( + data = "Hello Kinesis".toByteArray(), + partitionKey = "partition-1", + streamName = "my-stream" +) +when (result) { + is Result.Success -> { /* recorded successfully */ } + is Result.Failure -> { /* handle error */ } +} +``` + + + + + +```swift +let result = try await kinesis.record( + data: "Hello Kinesis".data(using: .utf8)!, + partitionKey: "partition-1", + streamName: "my-stream" +) +``` + + + + + +```dart +final result = await kinesis.record( + data: Uint8List.fromList(utf8.encode('Hello Kinesis')), + partitionKey: 'partition-1', + streamName: 'my-stream', +); +switch (result) { + case Ok(): print('Recorded'); + case Error(:final error): print('Error: $error'); +} +``` + + + +Records submitted while the client is disabled are silently dropped. + +### Flush records + +The client automatically flushes cached records at the configured interval (default: 30 seconds). You can also trigger a manual flush: + + + +```kotlin +when (val result = kinesis.flush()) { + is Result.Success -> println("Flushed ${result.data.recordsFlushed} records") + is Result.Failure -> println("Flush error: ${result.error}") +} +``` + + + + + +```swift +let flushResult = try await kinesis.flush() +print("Flushed \(flushResult.recordsFlushed) records") +``` + + + + + +```dart +switch (await kinesis.flush()) { + case Ok(:final value): + print('Flushed ${value.recordsFlushed} records'); + case Error(:final error): + print('Flush failed: $error'); +} +``` + + + +Each flush sends at most one batch per stream (up to 500 records or 10 MB). Remaining records are picked up in subsequent flush cycles. If a flush is already in progress, the call returns immediately with `flushInProgress: true`. + +Manual flushes work even when the client is disabled, allowing you to drain cached records without re-enabling collection. + +### Clear cache + +Delete all cached records from local storage: + + + +```kotlin +kinesis.clearCache() +``` + + + + + +```swift +let cleared = try await kinesis.clearCache() +``` + + + + + +```dart +await kinesis.clearCache(); +``` + + + +### Enable and disable + +You can toggle record collection and automatic flushing at runtime. When disabled, new records are silently dropped but already-cached records remain in storage. + + + +```kotlin +kinesis.disable() +// Records are dropped, auto-flush paused + +kinesis.enable() +// Collection and auto-flush resume +``` + + + + + +```swift +await kinesis.disable() +// Records are dropped, auto-flush paused + +await kinesis.enable() +// Collection and auto-flush resume +``` + + + + + +```dart +kinesis.disable(); +// Records are dropped, auto-flush paused + +kinesis.enable(); +// Collection and auto-flush resume +``` + + + + + +### Close the client + +When you're done with the client, release its resources. The client cannot be reused after closing. + +```dart +await kinesis.close(); +``` + + + +## Advanced + +### Escape hatch + +Access the underlying AWS SDK `KinesisClient` for operations not covered by this client's API: + + + +```kotlin +val sdkClient = kinesis.kinesisClient +// Use sdkClient for direct Kinesis API calls +``` + + + + + +```swift +let sdkClient = kinesis.getKinesisClient() +// Use sdkClient for direct Kinesis API calls +``` + + + + + +```dart +final sdkClient = kinesis.kinesisClient; +// Use sdkClient for direct Kinesis API calls +``` + + + +### Error handling + +All operations surface errors through a sealed exception hierarchy: + + + +| Error type | Description | +|---|---| +| `AmplifyKinesisValidationException` | Record input validation failed (oversized record, invalid partition key). | +| `AmplifyKinesisLimitExceededException` | Local cache is full. Call `flush()` or `clearCache()` to free space. | +| `AmplifyKinesisStorageException` | Local database error. | +| `AmplifyKinesisUnknownException` | Unexpected or uncategorized error. | + +Operations return `Result`: + +```kotlin +when (val result = kinesis.record(...)) { + is Result.Success -> { /* success */ } + is Result.Failure -> when (result.error) { + is AmplifyKinesisValidationException -> { /* invalid input */ } + is AmplifyKinesisLimitExceededException -> { /* cache full */ } + is AmplifyKinesisStorageException -> { /* database error */ } + is AmplifyKinesisUnknownException -> { /* unexpected error */ } + } +} +``` + + + + + +| Error type | Description | +|---|---| +| `KinesisError.validation` | Record input validation failed (oversized record, invalid partition key). | +| `KinesisError.cacheLimitExceeded` | Local cache is full. Call `flush()` or `clearCache()` to free space. | +| `KinesisError.cache` | Local database error. | +| `KinesisError.unknown` | Unexpected or uncategorized error. | + +Operations throw `KinesisError`: + +```swift +do { + try await kinesis.record( + data: payload, + partitionKey: "key", + streamName: "stream" + ) +} catch let error as KinesisError { + switch error { + case .validation(let desc, _, _): + print("Validation error: \(desc)") + case .cacheLimitExceeded: + print("Cache full") + case .cache(let desc, _, _): + print("Storage error: \(desc)") + case .unknown(let desc, _, _): + print("Unknown error: \(desc)") + } +} +``` + + + + + +| Error type | Description | +|---|---| +| `KinesisValidationException` | Record input validation failed (oversized record, invalid partition key). | +| `KinesisLimitExceededException` | Local cache is full. Call `flush()` or `clearCache()` to free space. | +| `KinesisStorageException` | Local database error. | +| `KinesisUnknownException` | Unexpected or uncategorized error. | +| `ClientClosedException` | The client was closed and cannot be used. | + +Operations return `Result` with `AmplifyKinesisException` subtypes: + +```dart +switch (await kinesis.record(...)) { + case Ok(): break; + case Error(:final error): + switch (error) { + case KinesisValidationException(): // invalid input + case KinesisLimitExceededException(): // cache full + case KinesisStorageException(): // database error + case KinesisUnknownException(): // unexpected error + case ClientClosedException(): // client was closed + } +} +``` + + + +### Retry behavior + +- All `PutRecords` error codes (`ProvisionedThroughputExceededException`, `InternalFailure`) are treated as retryable. +- Each failed record's retry count is incremented after each attempt. +- Records exceeding `maxRetries` (default: 5) are permanently deleted from the cache. +- SDK-level Kinesis errors are logged and skipped per-stream, so other streams can still flush. +- Non-SDK errors (network failures, storage errors) abort the flush entirely. + +### Kinesis service limits + +The client enforces these limits before sending to the service: + +| Limit | Value | +|---|---| +| Max records per `PutRecords` request | 500 | +| Max single record size | 10 MB | +| Max total payload per `PutRecords` request | 10 MB | +| Max partition key length | 256 characters | diff --git a/src/pages/[platform]/frontend/storage/download-files/index.mdx b/src/pages/[platform]/frontend/storage/download-files/index.mdx index d53627f366d..28ed2aec660 100644 --- a/src/pages/[platform]/frontend/storage/download-files/index.mdx +++ b/src/pages/[platform]/frontend/storage/download-files/index.mdx @@ -114,6 +114,7 @@ const linkToStorageFile = await getUrl({ Option | Type | Default | Description | | :--: | :--: | :--: | ----------- | +| method | 'GET' \| 'PUT' | 'GET' | The HTTP method for the presigned URL. `'GET'` generates a URL for downloading an object. `'PUT'` generates a URL for uploading an object.

Read more at [Upload using a presigned URL](/[platform]/frontend/storage/upload-files/#upload-using-a-presigned-url) | | bucket | string \|
\{ bucketName: string;
region: string; \} | Default bucket and region from Amplify configuration | A string representing the target bucket's assigned name in Amplify Backend or an object specifying the bucket name and region from the console.

Read more at [Configure additional storage buckets](/[platform]/build-a-backend/storage/set-up-storage/#configure-additional-storage-buckets) | validateObjectExistence | boolean | false | Whether to head object to make sure the object existence before downloading. | | expiresIn | number | 900 | Number of seconds till the URL expires.

The expiration time of the presigned url is dependent on the session and will max out at 1 hour. | @@ -279,10 +280,18 @@ let url = try await Amplify.Storage.getURL( ### All `getURL` options -Option | Type | Description | -| -- | -- | ----------- | -| expires | Int | Number of seconds before the URL expires | -| bucket | StorageBucket | The bucket in which the object is stored | +Option | Type | Default | Description | +| -- | -- | :--: | ----------- | +| expires | Int | 18000 | Number of seconds before the URL expires | +| bucket | StorageBucket | Default bucket from Amplify configuration | The bucket in which the object is stored | +| pluginOptions.method | StorageAccessMethod | .get | `.get` generates a download URL. `.put` generates an upload URL. | +| pluginOptions.validateObjectExistence | Bool | false | Whether to check the object exists before generating the URL. Skipped when method is `.put`. | + + + +You can also use `getURL` with `method: .put` to generate presigned URLs for uploading files directly to S3. Learn more at [Upload using a presigned URL](/[platform]/frontend/storage/upload-files/#upload-using-a-presigned-url). + + diff --git a/src/pages/[platform]/frontend/storage/upload-files/index.mdx b/src/pages/[platform]/frontend/storage/upload-files/index.mdx index e4a08c765c5..27968fe04d4 100644 --- a/src/pages/[platform]/frontend/storage/upload-files/index.mdx +++ b/src/pages/[platform]/frontend/storage/upload-files/index.mdx @@ -1572,6 +1572,56 @@ func getTempUrls(securityScopedUrls: [URL]) -> [URL] { } ``` +## Upload using a presigned URL + +You can use the `getURL` API with `method: .put` to generate a presigned URL for uploading files directly to S3. This is useful when: + +- You need to integrate with third-party tools or libraries that only accept standard HTTP URL endpoints +- You want to share a temporary upload link with another client or service +- You need to upload from a context where the Amplify SDK is not available + +```swift +import Amplify +import AWSS3StoragePlugin + +// Generate a presigned URL for uploading +let uploadUrl = try await Amplify.Storage.getURL( + path: .fromString("public/uploads/photo.jpg"), + options: .init( + pluginOptions: AWSStorageGetURLOptions( + method: .put + ) + ) +) +``` + +Then use the presigned URL to upload the file with a standard HTTP `PUT` request: + +```swift +var request = URLRequest(url: uploadUrl) +request.httpMethod = "PUT" +request.httpBody = imageData + +let (_, response) = try await URLSession.shared.data(for: request) +let httpResponse = response as? HTTPURLResponse +print("Upload status: \(httpResponse?.statusCode ?? 0)") +``` + + + +When `method: .put` is specified, the `validateObjectExistence` option is ignored since the object may not exist yet. + + + +### Presigned URL upload options + +Option | Type | Default | Description | +| -- | -- | :--: | ----------- | +| pluginOptions.method | StorageAccessMethod | .get | `.get` generates a download URL. `.put` generates an upload URL. | +| expires | Int | 18000 | Number of seconds before the URL expires. | +| bucket | StorageBucket | Default bucket from Amplify configuration | The bucket in which the object is stored. | +| pluginOptions.validateObjectExistence | Bool | false | Whether to check the object exists before generating the URL. Skipped when method is `.put`. | + @@ -1642,3 +1692,90 @@ Uploads that were initiated over one hour ago will be cancelled automatically. T ## MultiPart upload Amplify will automatically perform an Amazon S3 multipart upload for objects that are larger than 5MB. For more information about S3's multipart upload, see [Uploading and copying objects using multipart upload](https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html) + + + +## Upload using a presigned URL + +You can use the `getUrl` API with `method: 'PUT'` to generate a presigned URL for uploading files directly to S3. This is useful when: + +- You need to integrate with third-party tools or libraries that only accept standard HTTP URL endpoints (e.g. DuckDB, database export tools) +- You want to upload from server-side environments such as Next.js API routes or other SSR frameworks +- You need to share a temporary upload link with another client or service + +```typescript +import { getUrl } from 'aws-amplify/storage'; + +// Generate a presigned URL for uploading +const { url, expiresAt } = await getUrl({ + path: 'album/2024/1.jpg', + options: { + method: 'PUT', + expiresIn: 3600, // URL valid for 1 hour + contentType: 'image/jpeg', + } +}); + +console.log('Upload URL: ', url); +console.log('URL expires at: ', expiresAt); +``` + +Then use the presigned URL to upload the file with a standard HTTP `PUT` request: + +```typescript +await fetch(url, { + method: 'PUT', + body: file, + headers: { + 'Content-Type': 'image/jpeg', + }, +}); +``` + + + +When `method: 'PUT'` is specified, the `validateObjectExistence` option is ignored since the object may not exist yet. + + + + + +If you specify `contentType` when generating the presigned URL, you **must** include the matching `Content-Type` header in the upload request. A mismatch will cause S3 to reject the request with a signature error. + + + +### Using presigned URLs with third-party tools + +Because the presigned URL is a standard HTTP endpoint, it works with any tool that supports HTTP uploads: + +```typescript +import { getUrl } from 'aws-amplify/storage'; + +const { url } = await getUrl({ + path: 'analytics/data.parquet', + options: { + method: 'PUT', + contentType: 'application/octet-stream', + } +}); + +// Example: use with DuckDB to export query results directly to S3 +await duckdb.query(` + COPY (SELECT * FROM processed_data) + TO '${url}' + (FORMAT PARQUET) +`); +``` + +### Presigned URL upload options + +Option | Type | Default | Description | +| :--: | :--: | :--: | ----------- | +| method | 'GET' \| 'PUT' | 'GET' | The HTTP method for the presigned URL. Use `'PUT'` to generate an upload URL. | +| bucket | string \|
\{ bucketName: string;
region: string; \} | Default bucket and region from Amplify configuration | A string representing the target bucket's assigned name in Amplify Backend or an object specifying the bucket name and region from the console.

Read more at [Configure additional storage buckets](/[platform]/build-a-backend/storage/set-up-storage/#configure-additional-storage-buckets) | +| expiresIn | number | 900 | Number of seconds till the URL expires.

The expiration time of the presigned url is dependent on the session and will max out at 1 hour. | +| contentType | string | — | The MIME type of the file to be uploaded. When specified, the matching `Content-Type` header must be included in the upload request. | +| contentDisposition | string \| object | — | Specifies presentational information for the object. Can be a string (e.g. `'attachment; filename="file.jpg"'`) or an object (e.g. `{ type: 'attachment', filename: 'file.jpg' }`). | +| expectedBucketOwner | string | — | The account ID that owns requested bucket. | + +
diff --git a/src/styles/global-nav.scss b/src/styles/global-nav.scss index 3146c782f58..40157161920 100644 --- a/src/styles/global-nav.scss +++ b/src/styles/global-nav.scss @@ -342,7 +342,7 @@ .gen1-page-banner__badge { display: inline-block; - background-color: #d97706; + background-color: #b45309; color: var(--amplify-colors-white); font-size: 11px; font-weight: 700; diff --git a/src/utils/__tests__/getPageSection.test.ts b/src/utils/__tests__/getPageSection.test.ts index 7e3a32176a4..7770f805fd9 100644 --- a/src/utils/__tests__/getPageSection.test.ts +++ b/src/utils/__tests__/getPageSection.test.ts @@ -41,9 +41,7 @@ describe('getPageSection', () => { }); it('returns frontend for SSR pages at new path', () => { - const result = getPageSection( - '/[platform]/frontend/server-side-rendering' - ); + const result = getPageSection('/[platform]/frontend/server-side-rendering'); expect(result.section).toBe('frontend'); }); @@ -54,6 +52,11 @@ describe('getPageSection', () => { expect(result.featureRoute).toBe('/[platform]/frontend/auth'); }); + it('returns backend featureRoute for frontend auth pages', () => { + const result = getPageSection('/[platform]/frontend/auth/sign-in'); + expect(result.featureRoute).toBe('/[platform]/build-a-backend/auth'); + }); + it('returns frontend featureRoute for deeply nested backend pages', () => { const result = getPageSection( '/[platform]/build-a-backend/data/data-modeling/add-fields' @@ -70,4 +73,67 @@ describe('getPageSection', () => { const result = getPageSection('/gen1/[platform]/build-a-backend/auth'); expect(result.section).toBeUndefined(); }); + + it('returns backend add-aws-services featureRoute for frontend analytics pages', () => { + const result = getPageSection( + '/[platform]/frontend/analytics/record-events' + ); + expect(result.featureRoute).toBe( + '/[platform]/build-a-backend/add-aws-services/analytics' + ); + }); + + it('returns deep backend featureRoute for frontend kinesis page', () => { + const result = getPageSection('/[platform]/frontend/analytics/kinesis'); + expect(result.featureRoute).toBe( + '/[platform]/build-a-backend/add-aws-services/analytics/kinesis' + ); + }); + + it('returns frontend featureRoute for backend add-aws-services analytics page', () => { + const result = getPageSection( + '/[platform]/build-a-backend/add-aws-services/analytics/set-up-analytics' + ); + expect(result.featureRoute).toBe('/[platform]/frontend/analytics'); + }); + + it('returns backend featureRoute for frontend geo page', () => { + const result = getPageSection('/[platform]/frontend/geo/maps'); + expect(result.featureRoute).toBe( + '/[platform]/build-a-backend/add-aws-services/geo' + ); + }); + + it('returns frontend featureRoute for backend geo page', () => { + const result = getPageSection( + '/[platform]/build-a-backend/add-aws-services/geo/set-up-geo' + ); + expect(result.featureRoute).toBe('/[platform]/frontend/geo'); + }); + + it('returns frontend featureRoute for backend storage page', () => { + const result = getPageSection( + '/[platform]/build-a-backend/storage/set-up-storage' + ); + expect(result.featureRoute).toBe('/[platform]/frontend/storage'); + }); + + it('returns backend featureRoute for frontend storage page', () => { + const result = getPageSection('/[platform]/frontend/storage/upload-files'); + expect(result.featureRoute).toBe('/[platform]/build-a-backend/storage'); + }); + + it('returns undefined featureRoute for pages with no cross-section match', () => { + const result = getPageSection( + '/[platform]/build-a-backend/functions/set-up-function' + ); + expect(result.featureRoute).toBeUndefined(); + }); + + it('returns undefined featureRoute for Gen1 pages', () => { + const result = getPageSection( + '/gen1/[platform]/build-a-backend/auth/set-up-auth' + ); + expect(result.featureRoute).toBeUndefined(); + }); }); diff --git a/src/utils/getPageSection.ts b/src/utils/getPageSection.ts index bbe2dc49916..3e8c8fe3de8 100644 --- a/src/utils/getPageSection.ts +++ b/src/utils/getPageSection.ts @@ -4,8 +4,7 @@ import { SectionKey } from '@/data/sections'; /** * Walk the directory tree from a page's pathname upward to find * the nearest ancestor with a non-'both' section tag. - * Also finds the feature category route (e.g., /[platform]/build-a-backend/auth) - * for smart CrossLink targeting. + * Also finds the best matching backend/frontend route for CrossLink targeting. */ export function getPageSection(pathname: string): { section: SectionKey | undefined; @@ -24,34 +23,58 @@ export function getPageSection(pathname: string): { } } - // Find feature category for CrossLink targeting. - // For backend pages, link to the corresponding frontend category. - // For frontend pages, link to the corresponding backend category. - let featureRoute: string | undefined; - const backendFeature = pathname.match( - /\/\[platform\]\/build-a-backend\/([^/]+)/ - ); - const frontendFeature = pathname.match(/\/\[platform\]\/frontend\/([^/]+)/); - - if (backendFeature) { - // Backend page → link to frontend equivalent - const feature = backendFeature[1]; - const frontendNode = findDirectoryNode( - `/[platform]/frontend/${feature}` - ); - if (frontendNode) { - featureRoute = `/[platform]/frontend/${feature}`; + // CrossLink: find the corresponding page in the other section. + // Try progressively shorter sub-paths for the best match. + const featureRoute = findCrossLink(pathname); + + return { section, featureRoute }; +} + +const BACKEND_ROOTS = [ + '/[platform]/build-a-backend/add-aws-services/', + '/[platform]/build-a-backend/' +]; +const FRONTEND_ROOT = '/[platform]/frontend/'; + +/** + * Given a pathname, find the best matching route in the opposite section. + * Tries deepest sub-path first, then walks up to feature-level. + */ +function findCrossLink(pathname: string): string | undefined { + // Backend → Frontend + for (const root of BACKEND_ROOTS) { + if (pathname.startsWith(root)) { + const relative = pathname.slice(root.length); + return findBestMatch(relative, FRONTEND_ROOT); } - } else if (frontendFeature) { - // Frontend page → link to backend equivalent - const feature = frontendFeature[1]; - const backendNode = findDirectoryNode( - `/[platform]/build-a-backend/${feature}` - ); - if (backendNode) { - featureRoute = `/[platform]/build-a-backend/${feature}`; + } + + // Frontend → Backend (try sub-paths from deepest to shallowest) + if (pathname.startsWith(FRONTEND_ROOT)) { + const relative = pathname.slice(FRONTEND_ROOT.length); + for (const root of BACKEND_ROOTS) { + const match = findBestMatch(relative, root); + if (match) return match; } } - return { section, featureRoute }; + return undefined; +} + +/** + * Try progressively shorter sub-paths of `relative` under `targetRoot`. + * e.g. relative="analytics/kinesis" tries "analytics/kinesis" then "analytics". + */ +function findBestMatch( + relative: string, + targetRoot: string +): string | undefined { + const parts = relative.split('/').filter(Boolean); + for (let i = parts.length; i > 0; i--) { + const candidate = targetRoot + parts.slice(0, i).join('/'); + if (findDirectoryNode(candidate)) { + return candidate; + } + } + return undefined; }