Skip to content

Commit 76ee973

Browse files
committed
Testing a feature coverage component
The dynamic import is now working but the table structure and data isn't set up
1 parent 65af259 commit 76ee973

File tree

2 files changed

+222
-32
lines changed

2 files changed

+222
-32
lines changed
Lines changed: 185 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,185 @@
1+
import React from "react";
2+
const jsonData = import.meta.glob('/src/data/coverage/*.json');
3+
import {
4+
Table,
5+
TableHeader,
6+
TableBody,
7+
TableRow,
8+
TableHead,
9+
TableCell,
10+
} from "@/components/ui/table";
11+
import {
12+
useReactTable,
13+
getCoreRowModel,
14+
getSortedRowModel,
15+
flexRender,
16+
getFilteredRowModel,
17+
getPaginationRowModel,
18+
} from "@tanstack/react-table";
19+
import type { SortingState, ColumnDef, ColumnFiltersState } from "@tanstack/react-table";
20+
21+
const columns: ColumnDef<any>[] = [
22+
{
23+
accessorKey: "full_name",
24+
header: () => "Service",
25+
cell: ({ row }) => (
26+
<a href={`/aws/${row.original.service}`}>{row.original.full_name}</a>
27+
),
28+
enableColumnFilter: true,
29+
filterFn: (row, columnId, filterValue) => {
30+
return row.original.full_name
31+
.toLowerCase()
32+
.includes((filterValue ?? "").toLowerCase());
33+
},
34+
meta: { className: "w-1/3" },
35+
},
36+
{
37+
accessorKey: "support",
38+
header: () => "Supported",
39+
cell: ({ row }) =>
40+
row.original.support === "supported" ||
41+
row.original.support === "supported with limitations"
42+
? "✔️"
43+
: "",
44+
meta: { className: "w-1/6" },
45+
enableSorting: true,
46+
sortingFn: (rowA, rowB) => {
47+
// Sort supported to the top
48+
const a = rowA.original.support;
49+
const b = rowB.original.support;
50+
if (a === b) return 0;
51+
if (a === "supported") return -1;
52+
if (b === "supported") return 1;
53+
if (a === "supported with limitations") return -1;
54+
if (b === "supported with limitations") return 1;
55+
return a.localeCompare(b);
56+
},
57+
},
58+
{
59+
accessorKey: "test_suite",
60+
header: () => "Persistence Test Suite",
61+
cell: ({ row }) => (row.original.test_suite ? "✔️" : ""),
62+
meta: { className: "w-1/6" },
63+
enableSorting: true,
64+
},
65+
{
66+
accessorKey: "limitations",
67+
header: () => "Limitations",
68+
cell: ({ row }) => row.original.limitations,
69+
enableSorting: false,
70+
meta: { className: "whitespace-normal" },
71+
},
72+
];
73+
74+
export default function PersistenceCoverage({service}: {service: string}) {
75+
const data = jsonData[`/src/data/coverage/${service}.json`];
76+
const coverage = Object.values(data);
77+
const [sorting, setSorting] = React.useState<SortingState>([
78+
{ id: "full_name", desc: false },
79+
]);
80+
const [columnFilters, setColumnFilters] = React.useState<ColumnFiltersState>([]);
81+
82+
const table = useReactTable({
83+
data: coverage,
84+
columns,
85+
state: { sorting, columnFilters },
86+
onSortingChange: setSorting,
87+
onColumnFiltersChange: setColumnFilters,
88+
getCoreRowModel: getCoreRowModel(),
89+
getSortedRowModel: getSortedRowModel(),
90+
getFilteredRowModel: getFilteredRowModel(),
91+
getPaginationRowModel: getPaginationRowModel(),
92+
debugTable: false,
93+
initialState: { pagination: { pageSize: 10 } },
94+
});
95+
96+
return (
97+
<div className="w-full">
98+
<h2 id="api-coverage">API Coverage</h2>
99+
<div style={{ marginBottom: 12, marginTop: 12 }}>
100+
<input
101+
type="text"
102+
placeholder="Filter by service name..."
103+
value={
104+
table.getColumn("full_name")?.getFilterValue() as string || ""
105+
}
106+
onChange={e =>
107+
table.getColumn("full_name")?.setFilterValue(e.target.value)
108+
}
109+
className="border rounded px-2 py-1 w-full max-w-xs"
110+
/>
111+
</div>
112+
<div className="rounded-md border">
113+
<Table>
114+
<TableHeader>
115+
{table.getHeaderGroups().map((headerGroup) => (
116+
<TableRow key={headerGroup.id}>
117+
{headerGroup.headers.map((header) => {
118+
const canSort = header.column.getCanSort();
119+
const meta = header.column.columnDef.meta as { className?: string } | undefined;
120+
return (
121+
<TableHead
122+
key={header.id}
123+
onClick={canSort ? header.column.getToggleSortingHandler() : undefined}
124+
className={
125+
(meta?.className || "") +
126+
(canSort ? " cursor-pointer select-none" : "")
127+
}
128+
>
129+
{flexRender(header.column.columnDef.header, header.getContext())}
130+
{canSort && (
131+
<span>
132+
{header.column.getIsSorted() === "asc"
133+
? " ▲"
134+
: header.column.getIsSorted() === "desc"
135+
? " ▼"
136+
: ""}
137+
</span>
138+
)}
139+
</TableHead>
140+
);
141+
})}
142+
</TableRow>
143+
))}
144+
</TableHeader>
145+
<TableBody>
146+
{table.getRowModel().rows.map((row) => (
147+
<TableRow key={row.id}>
148+
{row.getVisibleCells().map((cell) => {
149+
const meta = cell.column.columnDef.meta as { className?: string } | undefined;
150+
return (
151+
<TableCell
152+
key={cell.id}
153+
className={meta?.className || undefined}
154+
>
155+
{flexRender(cell.column.columnDef.cell, cell.getContext())}
156+
</TableCell>
157+
);
158+
})}
159+
</TableRow>
160+
))}
161+
</TableBody>
162+
</Table>
163+
</div>
164+
<div className="flex items-center justify-between mt-4">
165+
<button
166+
className="px-3 py-1 border rounded disabled:opacity-50"
167+
onClick={() => table.previousPage()}
168+
disabled={!table.getCanPreviousPage()}
169+
>
170+
Previous
171+
</button>
172+
<span>
173+
Page {table.getState().pagination.pageIndex + 1} of {table.getPageCount()}
174+
</span>
175+
<button
176+
className="px-3 py-1 border rounded disabled:opacity-50"
177+
onClick={() => table.nextPage()}
178+
disabled={!table.getCanNextPage()}
179+
>
180+
Next
181+
</button>
182+
</div>
183+
</div>
184+
);
185+
}
Lines changed: 37 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,13 @@ description: Use foundation models running on your device with LocalStack!
55
tags: ["Ultimate"]
66
---
77

8+
import FeatureCoverage from "../../../../components/feature-coverage/FeatureCoverage";
9+
810
## Introduction
911

1012
Bedrock is a fully managed service provided by Amazon Web Services (AWS) that makes foundation models from various LLM providers accessible via an API.
1113
LocalStack allows you to use the Bedrock APIs to test and develop AI-powered applications in your local environment.
12-
The supported APIs are available on our [API Coverage Page]({{< ref "coverage_bedrock" >}}), which provides information on the extent of Bedrock's integration with LocalStack.
14+
The supported APIs are available on our [API Coverage section](#api-coverage), which provides information on the extent of Bedrock's integration with LocalStack.
1315

1416
## Getting started
1517

@@ -37,16 +39,17 @@ This way you avoid long wait times when switching between models on demand with
3739

3840
You can view all available foundation models using the [`ListFoundationModels`](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_ListFoundationModels.html) API.
3941
This will show you which models are available on AWS Bedrock.
40-
{{< callout "note">}}
42+
43+
:::note
4144
The actual model that will be used for emulation will differ from the ones defined in this list.
4245
You can define the used model with `DEFAULT_BEDROCK_MODEL`
43-
{{< / callout >}}
46+
:::
4447

4548
Run the following command:
4649

47-
{{< command >}}
48-
$ awslocal bedrock list-foundation-models
49-
{{< / command >}}
50+
```bash
51+
awslocal bedrock list-foundation-models
52+
```
5053

5154
### Invoke a model
5255

@@ -56,15 +59,15 @@ However, the actual model will be defined by the `DEFAULT_BEDROCK_MODEL` environ
5659

5760
Run the following command:
5861

59-
{{< command >}}
60-
$ awslocal bedrock-runtime invoke-model \
62+
```bash
63+
awslocal bedrock-runtime invoke-model \
6164
--model-id "meta.llama3-8b-instruct-v1:0" \
6265
--body '{
6366
"prompt": "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\nSay Hello!\n<|eot_id|>\n<|start_header_id|>assistant<|end_header_id|>",
6467
"max_gen_len": 2,
6568
"temperature": 0.9
6669
}' --cli-binary-format raw-in-base64-out outfile.txt
67-
{{< / command >}}
70+
```
6871

6972
The output will be available in the `outfile.txt`.
7073

@@ -75,8 +78,8 @@ You can specify both system prompts and user messages.
7578

7679
Run the following command:
7780

78-
{{< command >}}
79-
$ awslocal bedrock-runtime converse \
81+
```bash
82+
awslocal bedrock-runtime converse \
8083
--model-id "meta.llama3-8b-instruct-v1:0" \
8184
--messages '[{
8285
"role": "user",
@@ -87,38 +90,38 @@ $ awslocal bedrock-runtime converse \
8790
--system '[{
8891
"text": "You'\''re a chatbot that can only say '\''Hello!'\''"
8992
}]'
90-
{{< / command >}}
93+
```
9194

9295
### Model Invocation Batch Processing
9396

9497
Bedrock offers the feature to handle large batches of model invocation requests defined in S3 buckets using the [`CreateModelInvocationJob`](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_CreateModelInvocationJob.html) API.
9598

9699
First, you need to create a `JSONL` file that contains all your prompts:
97100

98-
{{< command >}}
99-
$ cat batch_input.jsonl
101+
```bash
102+
cat batch_input.jsonl
100103
{"prompt": "Tell me a quick fact about Vienna.", "max_tokens": 50, "temperature": 0.5}
101104
{"prompt": "Tell me a quick fact about Zurich.", "max_tokens": 50, "temperature": 0.5}
102105
{"prompt": "Tell me a quick fact about Las Vegas.", "max_tokens": 50, "temperature": 0.5}
103-
{{< / command >}}
106+
```
104107

105108
Then, you need to define buckets for the input as well as the output and upload the file in the input bucket:
106109

107-
{{< command >}}
108-
$ awslocal s3 mb s3://in-bucket
110+
```bash
111+
awslocal s3 mb s3://in-bucket
109112
make_bucket: in-bucket
110113

111-
$ awslocal s3 cp batch_input.jsonl s3://in-bucket
114+
awslocal s3 cp batch_input.jsonl s3://in-bucket
112115
upload: ./batch_input.jsonl to s3://in-bucket/batch_input.jsonl
113116

114-
$ awslocal s3 mb s3://out-bucket
117+
awslocal s3 mb s3://out-bucket
115118
make_bucket: out-bucket
116-
{{< / command >}}
119+
```
117120

118121
Afterwards you can run the invocation job like this:
119122

120-
{{< command >}}
121-
$ awslocal bedrock create-model-invocation-job \
123+
```bash
124+
awslocal bedrock create-model-invocation-job \
122125
--job-name "my-batch-job" \
123126
--model-id "mistral.mistral-small-2402-v1:0" \
124127
--role-arn "arn:aws:iam::123456789012:role/MyBatchInferenceRole" \
@@ -127,7 +130,7 @@ $ awslocal bedrock create-model-invocation-job \
127130
{
128131
"jobArn": "arn:aws:bedrock:us-east-1:000000000000:model-invocation-job/12345678"
129132
}
130-
{{< / command >}}
133+
```
131134

132135
The results will be at the S3 URL `s3://out-bucket/12345678/batch_input.jsonl.out`
133136

@@ -140,33 +143,33 @@ LocalStack will pull the model from Ollama and use it for emulation.
140143

141144
For example, to use the Mistral model, set the environment variable while starting LocalStack:
142145

143-
{{< command >}}
144-
$ DEFAULT_BEDROCK_MODEL=mistral localstack start
145-
{{< / command >}}
146+
```bash
147+
DEFAULT_BEDROCK_MODEL=mistral localstack start
148+
```
146149

147150
You can also define models directly in the request, by setting the `model-id` parameter to `ollama.<ollama-model-id>`.
148151
For example, if you want to access `deepseek-r1`, you can do it like this:
149152

150-
{{< command >}}
151-
$ awslocal bedrock-runtime converse \
153+
```bash
154+
awslocal bedrock-runtime converse \
152155
--model-id "ollama.deepseek-r1" \
153156
--messages '[{
154157
"role": "user",
155158
"content": [{
156159
"text": "Say Hello!"
157160
}]
158161
}]'
159-
{{< / command >}}
162+
```
160163

161164
## Troubleshooting
162165

163166
Users of Docker Desktop on macOS or Windows might run into the issue of Bedrock becoming unresponsive after some usage.
164167
A common reason for that is insufficient storage or memory space in the Docker Desktop VM.
165168
To resolve this issue you can increase those amounts directly in Docker Desktop or clean up unused artifacts with the Docker CLI like this
166169

167-
{{< command >}}
168-
$ docker system prune
169-
{{< / command >}}
170+
```bash
171+
docker system prune
172+
```
170173

171174
You could also try to use a model with lower requirements.
172175
To achieve that you can search for models in the [Ollama Models library](https://ollama.com/search) with a low parameter count or smaller size.
@@ -176,3 +179,5 @@ To achieve that you can search for models in the [Ollama Models library](https:/
176179
* At this point, we have only tested text-based models in LocalStack.
177180
Other models available with Ollama might also work, but are not officially supported by the Bedrock implementation.
178181
* Currently, GPU models are not supported by the LocalStack Bedrock implementation.
182+
183+
<FeatureCoverage service="bedrock" client:load />

0 commit comments

Comments
 (0)