diff --git a/src/content/docs/images/examples/watermark-from-kv.mdx b/src/content/docs/images/examples/watermark-from-kv.mdx
index 827e018a736ed8..1c8b3417bcd842 100644
--- a/src/content/docs/images/examples/watermark-from-kv.mdx
+++ b/src/content/docs/images/examples/watermark-from-kv.mdx
@@ -1,5 +1,4 @@
---
-
summary: Draw a watermark from KV on an image from R2
pcx_content_type: example
title: Watermarks
@@ -9,41 +8,47 @@ description: Draw a watermark from KV on an image from R2
reviewed: 2025-04-03
---
+import { TypeScriptExample } from "~/components";
+
+
+
```ts
interface Env {
- BUCKET: R2Bucket,
- NAMESPACE: KVNamespace,
- IMAGES: ImagesBinding,
+ BUCKET: R2Bucket;
+ NAMESPACE: KVNamespace;
+ IMAGES: ImagesBinding;
}
export default {
- async fetch(request, env, ctx): Promise {
- const watermarkKey = "my-watermark";
- const sourceKey = "my-source-image";
+ async fetch(request, env, ctx): Promise {
+ const watermarkKey = "my-watermark";
+ const sourceKey = "my-source-image";
- const cache = await caches.open("transformed-images");
- const cacheKey = new URL(sourceKey + "/" + watermarkKey, request.url);
- const cacheResponse = await cache.match(cacheKey);
+ const cache = await caches.open("transformed-images");
+ const cacheKey = new URL(sourceKey + "/" + watermarkKey, request.url);
+ const cacheResponse = await cache.match(cacheKey);
- if (cacheResponse) {
- return cacheResponse;
- }
+ if (cacheResponse) {
+ return cacheResponse;
+ }
- let watermark = await env.NAMESPACE.get(watermarkKey, "stream");
- let source = await env.BUCKET.get(sourceKey);
+ let watermark = await env.NAMESPACE.get(watermarkKey, "stream");
+ let source = await env.BUCKET.get(sourceKey);
- if (!watermark || !source) {
- return new Response("Not found", { status: 404 });
- }
+ if (!watermark || !source) {
+ return new Response("Not found", { status: 404 });
+ }
- const result = await env.IMAGES.input(source.body)
- .draw(watermark)
- .output({ format: "image/jpeg" });
+ const result = await env.IMAGES.input(source.body)
+ .draw(watermark)
+ .output({ format: "image/jpeg" });
- const response = result.response();
+ const response = result.response();
- ctx.waitUntil(cache.put(cacheKey, response.clone()));
+ ctx.waitUntil(cache.put(cacheKey, response.clone()));
- return result.response();
- },
+ return response;
+ },
} satisfies ExportedHandler;
```
+
+
diff --git a/src/content/docs/images/manage-images/serve-images/serve-private-images.mdx b/src/content/docs/images/manage-images/serve-images/serve-private-images.mdx
index 4ab52fbb43449a..18f0e6b1cf9b97 100644
--- a/src/content/docs/images/manage-images/serve-images/serve-private-images.mdx
+++ b/src/content/docs/images/manage-images/serve-images/serve-private-images.mdx
@@ -74,7 +74,7 @@ async function generateSignedUrl(url) {
export default {
async fetch(request, env, ctx): Promise {
- const url = new URL(event.request.url);
+ const url = new URL(request.url);
const imageDeliveryURL = new URL(
url.pathname
.slice(1)
diff --git a/src/content/docs/images/transform-images/bindings.mdx b/src/content/docs/images/transform-images/bindings.mdx
index 68d954e7337264..ed4f3683756bef 100644
--- a/src/content/docs/images/transform-images/bindings.mdx
+++ b/src/content/docs/images/transform-images/bindings.mdx
@@ -5,7 +5,7 @@ sidebar:
order: 4
---
-import { WranglerConfig } from "~/components";
+import { WranglerConfig, TypeScriptExample } from "~/components";
A [binding](/workers/runtime-apis/bindings/) connects your [Worker](/workers/) to external resources on the Developer Platform, like [Images](/images/transform-images/transform-via-workers/), [R2 buckets](/r2/buckets/), or [KV Namespaces](/kv/concepts/kv-namespaces/).
@@ -61,26 +61,29 @@ Within your Worker code, you can interact with this binding by using `env.IMAGES
For example, to draw a resized watermark on an image:
+
+
```ts
// Fetch the watermark from Workers Assets, R2, KV etc
-const watermark: ReadableStream = ...
+const watermark: ReadableStream = getWatermarkStream();
// Fetch the main image
-const image: ReadableStream = ...
+const image: ReadableStream = getImageStream();
const response = (
- await env.IMAGES.input(image)
- .draw(
- env.IMAGES.input(watermark)
- .transform({ width: 32, height: 32}),
- { bottom: 32, right: 32 }
- )
- .output({ format: "image/avif" })
-).response()
+ await env.IMAGES.input(image)
+ .draw(env.IMAGES.input(watermark).transform({ width: 32, height: 32 }), {
+ bottom: 32,
+ right: 32,
+ })
+ .output({ format: "image/avif" })
+).response();
return response;
```
+
+
### `.output()`
- You must define [a supported format](/images/transform-images/#supported-output-formats) such as AVIF, WebP, or JPEG for the [transformed image](/images/transform-images/).
@@ -89,6 +92,8 @@ return response;
For example, to rotate, resize, and blur an image, then output the image as AVIF:
+
+
```ts
const info = await env.IMAGES.info(stream);
// Stream contains a valid image, and width/height is available on the info object
@@ -107,6 +112,8 @@ const response = (
return response;
```
+
+
### `.info()`
- Outputs information about the image, such as `format`, `fileSize`, `width`, and `height`.
diff --git a/src/content/docs/images/transform-images/control-origin-access.mdx b/src/content/docs/images/transform-images/control-origin-access.mdx
index 9b0eb18380d607..f25276454220c7 100644
--- a/src/content/docs/images/transform-images/control-origin-access.mdx
+++ b/src/content/docs/images/transform-images/control-origin-access.mdx
@@ -42,7 +42,7 @@ export default {
const requestURL = new URL(request.url);
// Append the request path such as "/assets/image1.jpg" to the hiddenImageOrigin.
// You could also process the path to add or remove directories, modify filenames, etc.
- const imageURL = hiddenImageOrigin + requestURL.path;
+ const imageURL = hiddenImageOrigin + requestURL.pathname;
// This will fetch image from the given URL, but to the website's visitors this
// will appear as a response to the original request. Visitor’s browser will
// not see this URL.
@@ -61,13 +61,12 @@ export default {
const imageURL = … // detail omitted in this example, see the previous example
const requestURL = new URL(request.url)
- const resizingOptions = {
- width: requestURL.searchParams.get("width"),
- }
+ const width = parseInt(requestURL.searchParams.get("width"), 10);
+ const resizingOptions = { width }
// If someone tries to manipulate your image URLs to reveal higher-resolution images,
// you can catch that and refuse to serve the request (or enforce a smaller size, etc.)
if (resizingOptions.width > 1000) {
- throw Error("We don’t allow viewing images larger than 1000 pixels wide")
+ return new Response("We don't allow viewing images larger than 1000 pixels wide", { status: 400 })
}
return fetch(imageURL, {cf:{image:resizingOptions}})
},};
@@ -85,7 +84,7 @@ export default {
// The regex selects the first path component after the "images"
// prefix, and the rest of the path (e.g. "/images/first/rest")
- const match = requestURL.path.match(/images\/([^/]+)\/(.+)/);
+ const match = requestURL.pathname.match(/images\/([^/]+)\/(.+)/);
// You can require the first path component to be one of the
// predefined sizes only, and set actual dimensions accordingly.
@@ -121,14 +120,14 @@ Cloudflare image transformations cache resized images to aid performance. Images
const signedHeaders = generatedSignedHeaders();
fetch(private_url, {
- headers: signedHeaders
- cf: {
- image: {
- format: "auto",
- "origin-auth": "share-publicly"
- }
- }
-})
+ headers: signedHeaders,
+ cf: {
+ image: {
+ format: "auto",
+ "origin-auth": "share-publicly",
+ },
+ },
+});
```
When using this code, the following headers are passed through to the origin, and allow your request to be successful:
diff --git a/src/content/docs/images/transform-images/draw-overlays.mdx b/src/content/docs/images/transform-images/draw-overlays.mdx
index b4d5d46f21082f..d0df7b524725f8 100644
--- a/src/content/docs/images/transform-images/draw-overlays.mdx
+++ b/src/content/docs/images/transform-images/draw-overlays.mdx
@@ -3,7 +3,6 @@ pcx_content_type: reference
title: Draw overlays and watermarks
sidebar:
order: 5
-
---
You can draw additional images on top of a resized image, with transparency and blending effects. This enables adding of watermarks, logos, signatures, vignettes, and other effects to resized images.
@@ -12,23 +11,23 @@ This feature is available only in [Workers](/images/transform-images/transform-v
```js
fetch(imageURL, {
- cf: {
- image: {
- width: 800,
- height: 600,
- draw: [
- {
- url: 'https://example.com/branding/logo.png', // draw this image
- bottom: 5, // 5 pixels from the bottom edge
- right: 5, // 5 pixels from the right edge
- fit: 'contain', // make it fit within 100x50 area
- width: 100,
- height: 50,
- opacity: 0.8, // 20% transparent
- },
- ],
- },
- },
+ cf: {
+ image: {
+ width: 800,
+ height: 600,
+ draw: [
+ {
+ url: "https://example.com/branding/logo.png", // draw this image
+ bottom: 5, // 5 pixels from the bottom edge
+ right: 5, // 5 pixels from the right edge
+ fit: "contain", // make it fit within 100x50 area
+ width: 100,
+ height: 50,
+ opacity: 0.8, // 20% transparent
+ },
+ ],
+ },
+ },
});
```
@@ -36,37 +35,35 @@ fetch(imageURL, {
The `draw` property is an array. Overlays are drawn in the order they appear in the array (the last array entry is the topmost layer). Each item in the `draw` array is an object, which can have the following properties:
+- `url`
+ - Absolute URL of the image file to use for the drawing. It can be any of the supported file formats. For drawing watermarks or non-rectangular overlays, Cloudflare recommends that you use PNG or WebP images.
+- `width` and `height`
+ - Maximum size of the overlay image, in pixels. It must be an integer.
-* `url`
- * Absolute URL of the image file to use for the drawing. It can be any of the supported file formats. For drawing watermarks or non-rectangular overlays, Cloudflare recommends that you use PNG or WebP images.
-
-* `width` and `height`
- * Maximum size of the overlay image, in pixels. It must be an integer.
+- `fit` and `gravity`
+ - Affects interpretation of `width` and `height`. Same as [for the main image](/images/transform-images/transform-via-workers/#fetch-options).
-* `fit` and `gravity`
- * Affects interpretation of `width` and `height`. Same as [for the main image](/images/transform-images/transform-via-workers/#fetch-options).
+- `opacity`
+ - Floating-point number between `0` (transparent) and `1` (opaque). For example, `opacity: 0.5` makes overlay semitransparent.
-* `opacity`
- * Floating-point number between `0` (transparent) and `1` (opaque). For example, `opacity: 0.5` makes overlay semitransparent.
+- `repeat`
+ - If set to `true`, the overlay image will be tiled to cover the entire area. This is useful for stock-photo-like watermarks.
+ - If set to `"x"`, the overlay image will be tiled horizontally only (form a line).
+ - If set to `"y"`, the overlay image will be tiled vertically only (form a line).
-* `repeat`
- * If set to `true`, the overlay image will be tiled to cover the entire area. This is useful for stock-photo-like watermarks.
- * If set to `"x"`, the overlay image will be tiled horizontally only (form a line).
- * If set to `"y"`, the overlay image will be tiled vertically only (form a line).
-
-* `top`, `left`, `bottom`, `right`
- * Position of the overlay image relative to a given edge. Each property is an offset in pixels. `0` aligns exactly to the edge. For example, `left: 10` positions left side of the overlay 10 pixels from the left edge of the image it is drawn over. `bottom: 0` aligns bottom of the overlay with bottom of the background image.
+- `top`, `left`, `bottom`, `right`
+ - Position of the overlay image relative to a given edge. Each property is an offset in pixels. `0` aligns exactly to the edge. For example, `left: 10` positions left side of the overlay 10 pixels from the left edge of the image it is drawn over. `bottom: 0` aligns bottom of the overlay with bottom of the background image.
Setting both `left` and `right`, or both `top` and `bottom` is an error.
If no position is specified, the image will be centered.
-* `background`
- * Background color to add underneath the overlay image. Same as [for the main image](/images/transform-images/transform-via-workers/#fetch-options).
+- `background`
+ - Background color to add underneath the overlay image. Same as [for the main image](/images/transform-images/transform-via-workers/#fetch-options).
-* `rotate`
- * Number of degrees to rotate the overlay image by. Same as [for the main image](/images/transform-images/transform-via-workers/#fetch-options).
+- `rotate`
+ - Number of degrees to rotate the overlay image by. Same as [for the main image](/images/transform-images/transform-via-workers/#fetch-options).
## Draw using the Images binding
@@ -76,14 +73,15 @@ The accepted options for the overlaid image are `opacity`, `repeat`, `top`, `lef
```js
// Fetch image and watermark
-const img = await fetch('https://example.com/image.png');
-const watermark = await fetch('https://example.com/watermark.png');
+const img = await fetch("https://example.com/image.png");
+const watermark = await fetch("https://example.com/watermark.png");
-const response = await env.IMAGES.input(img.body)
- .transform({ width: 1024 })
- .draw(watermark.body, { "opacity": 0.25, "repeat": true })
- .output({ format: "image/avif" })
- .response();
+const response = (
+ await env.IMAGES.input(img.body)
+ .transform({ width: 1024 })
+ .draw(watermark.body, { opacity: 0.25, repeat: true })
+ .output({ format: "image/avif" })
+).response();
return response;
```
@@ -95,10 +93,10 @@ In the example below, the watermark is manipulated with `rotate` and `width` bef
```js
// Fetch image and watermark
const response = (
- await env.IMAGES.input(img.body)
- .transform({ width: 1024 })
- .draw(watermark.body, { "opacity": 0.25, "repeat": true })
- .output({ format: "image/avif" })
+ await env.IMAGES.input(img.body)
+ .transform({ width: 1024 })
+ .draw(watermark.body, { opacity: 0.25, repeat: true })
+ .output({ format: "image/avif" })
).response();
```
@@ -114,7 +112,7 @@ image: {
repeat: true, // Tiled over entire image
opacity: 0.2, // and subtly blended
},
- ];
+ ],
}
```
@@ -128,7 +126,7 @@ image: {
bottom: 5, // Positioned near bottom right corner
right: 5,
},
- ];
+ ],
}
```
@@ -141,7 +139,7 @@ image: {
url: 'https://example.com/play-button.png',
// Center position is the default
},
- ];
+ ],
}
```
@@ -155,6 +153,6 @@ image: {
{ url: 'https://example.com/watermark.png', repeat: true, opacity: 0.2 },
{ url: 'https://example.com/play-button.png' },
{ url: 'https://example.com/by-me.png', bottom: 5, right: 5 },
- ];
+ ],
}
```
diff --git a/src/content/docs/images/transform-images/transform-via-workers.mdx b/src/content/docs/images/transform-images/transform-via-workers.mdx
index 5a90a560c4e879..0a5bef52bc2512 100644
--- a/src/content/docs/images/transform-images/transform-via-workers.mdx
+++ b/src/content/docs/images/transform-images/transform-via-workers.mdx
@@ -158,15 +158,17 @@ To perform resizing and optimizations, the Worker must be able to fetch the orig
You must detect which requests must go directly to the origin server. When the `image-resizing` string is present in the `Via` header, it means that it is a request coming from another Worker and should be directed to the origin server:
```js
-addEventListener("fetch", event => {
- // If this request is coming from image resizing worker,
- // avoid causing an infinite loop by resizing it again:
- if (/image-resizing/.test(event.request.headers.get("via"))) {
- return fetch(event.request)
- }
-
- // Now you can safely use image resizing here
-}
+export default {
+ async fetch(request) {
+ // If this request is coming from image resizing worker,
+ // avoid causing an infinite loop by resizing it again:
+ if (/image-resizing/.test(request.headers.get("via"))) {
+ return fetch(request);
+ }
+
+ // Now you can safely use image resizing here
+ },
+};
```
## Lack of preview in the dashboard
@@ -192,7 +194,7 @@ if (response.ok || response.redirected) {
// fetch() may respond with status 304
return response;
} else {
- return response.redirect(imageURL, 307);
+ return Response.redirect(imageURL, 307);
}
```
@@ -232,11 +234,11 @@ export default {
if (url.searchParams.has("fit"))
options.cf.image.fit = url.searchParams.get("fit");
if (url.searchParams.has("width"))
- options.cf.image.width = url.searchParams.get("width");
+ options.cf.image.width = parseInt(url.searchParams.get("width"), 10);
if (url.searchParams.has("height"))
- options.cf.image.height = url.searchParams.get("height");
+ options.cf.image.height = parseInt(url.searchParams.get("height"), 10);
if (url.searchParams.has("quality"))
- options.cf.image.quality = url.searchParams.get("quality");
+ options.cf.image.quality = parseInt(url.searchParams.get("quality"), 10);
// Your Worker is responsible for automatic format negotiation. Check the Accept header.
const accept = request.headers.get("Accept");
@@ -290,4 +292,4 @@ When testing image resizing, please deploy the script first. Resizing will not b
Resized images are always cached. They are cached as additional variants under a cache entry for the URL of the full-size source image in the `fetch` subrequest. Do not worry about using many different Workers or many external URLs — they do not influence caching of resized images, and you do not need to do anything for resized images to be cached correctly.
-If you use the `cacheKey` fetch option to unify the caches of multiple source URLs, do not include any resizing options in the `cacheKey`. Doing so will fragment the cache and hurt caching performance. The `cacheKey` should reference only the full-size source image URL, not any of its resized versions.
\ No newline at end of file
+If you use the `cacheKey` fetch option to unify the caches of multiple source URLs, do not include any resizing options in the `cacheKey`. Doing so will fragment the cache and hurt caching performance. The `cacheKey` should reference only the full-size source image URL, not any of its resized versions.
diff --git a/src/content/docs/images/tutorials/optimize-user-uploaded-image.mdx b/src/content/docs/images/tutorials/optimize-user-uploaded-image.mdx
index 016df446c86121..0681476f7bfdb0 100644
--- a/src/content/docs/images/tutorials/optimize-user-uploaded-image.mdx
+++ b/src/content/docs/images/tutorials/optimize-user-uploaded-image.mdx
@@ -7,8 +7,7 @@ sidebar:
reviewed: 2025-04-03
---
-import { WranglerConfig, Render } from "~/components"
-
+import { WranglerConfig, Render, TypeScriptExample } from "~/components";
In this guide, you will build an app that accepts image uploads, overlays the image with a visual watermark, then stores the transformed image in your R2 bucket.
@@ -24,21 +23,19 @@ You will learn how to connect Developer Platform services to your Worker through
Before you begin, you will need to do the following:
-* Add an [Images Paid](/images/pricing/#images-paid) subscription to your account. This allows you to bind the Images API to your Worker.
-* Create an [R2 bucket](/r2/get-started/#2-create-a-bucket), where the transformed images will be uploaded.
-* Create a new Worker project.
+- Add an [Images Paid](/images/pricing/#images-paid) subscription to your account. This allows you to bind the Images API to your Worker.
+- Create an [R2 bucket](/r2/get-started/#2-create-a-bucket), where the transformed images will be uploaded.
+- Create a new Worker project.
If you are new, review how to [create your first Worker](/workers/get-started/guide/).
-
## 1: Set up your Worker project
To start, you will need to set up your project to use the following resources on the Developer Platform:
-* [Images](/images/transform-images/bindings/) to transform, resize, and encode images directly from your Worker.
-* [R2](/r2/api/workers/workers-api-usage/) to connect the bucket for storing transformed images.
-* [Assets](/workers/static-assets/binding/) to access a static image that will be used as the visual watermark.
-
+- [Images](/images/transform-images/bindings/) to transform, resize, and encode images directly from your Worker.
+- [R2](/r2/api/workers/workers-api-usage/) to connect the bucket for storing transformed images.
+- [Assets](/workers/static-assets/binding/) to access a static image that will be used as the visual watermark.
### Add the bindings to your Wrangler configuration
@@ -76,7 +73,6 @@ The assets directory of your project lets you upload static assets as part of yo
After you configure your Wrangler file, upload the overlay image to the specified directory. In our example app, the directory `./assets` contains the overlay image.
-
## 2: Build your frontend
You will need to build the interface for the app that lets users upload images.
@@ -85,7 +81,9 @@ In this example, the frontend is rendered directly from the Worker script.
To do this, make a new `html` variable, which contains a `form` element for accepting uploads. In `fetch`, construct a new `Response` with a `Content-Type: text/html` header to serve your static HTML site to the client:
-```js
+
+
+```ts
const html = `
@@ -103,51 +101,63 @@ const html = `
`;
+interface Env {
+ IMAGES: ImagesBinding;
+ R2: R2Bucket;
+ ASSETS: Fetcher;
+}
+
export default {
- async fetch(request, env) {
- if (request.method === "GET") {
- return new Response(html, {headers:{'Content-Type':'text/html'},})
- }
- if (request.method ==="POST") {
- // This is called when the user submits the form
- }
- }
-};
+ async fetch(request: Request, env: Env): Promise {
+ if (request.method === "GET") {
+ return new Response(html, { headers: { "Content-Type": "text/html" } });
+ }
+ if (request.method === "POST") {
+ // This is called when the user submits the form
+ }
+ },
+} satisfies ExportedHandler;
```
+
+
## 3: Read the uploaded image
After you have a `form`, you need to make sure you can transform the uploaded images.
Because the `form` lets users upload directly from their disk, you cannot use `fetch()` to get an image from a URL. Instead, you will operate on the body of the image as a stream of bytes.
-To do this, parse the data from the `form` as an array buffer:
+To do this, parse the uploaded file from the `form` and get its stream:
+
+
-```js
+```ts
export default {
- async fetch(request, env) {
- if (request.method === "GET") {
- return new Response(html, {headers:{'Content-Type':'text/html'},})
- }
- if (request.method === "POST") {
- try {
- // Parse form data
- const formData = await request.formData();
- const file = formData.get("image");
- if (!file || typeof file.arrayBuffer !== "function") {
- return new Response("No image file provided", { status: 400 });
- }
-
- // Read uploaded image as array buffer
- const fileBuffer = await file.arrayBuffer();
- } catch (err) {
- console.log(err.message)
- }
- }
- }
-};
+ async fetch(request: Request, env: Env): Promise {
+ if (request.method === "GET") {
+ return new Response(html, { headers: { "Content-Type": "text/html" } });
+ }
+ if (request.method === "POST") {
+ try {
+ // Parse form data
+ const formData = await request.formData();
+ const file = formData.get("image");
+ if (!file || typeof file.stream !== "function") {
+ return new Response("No image file provided", { status: 400 });
+ }
+
+ // Get uploaded image as a readable stream
+ const fileStream = file.stream();
+ } catch (err) {
+ console.log((err as Error).message);
+ }
+ }
+ },
+} satisfies ExportedHandler;
```
+
+
## 4: Transform the image
@@ -162,101 +172,112 @@ For every uploaded image, you want to perform the following actions:
To fetch the overlay image from the assets directory, create a function `assetUrl` then use `env.ASSETS` to retrieve the `watermark.png` image:
-```js
-var __defProp = Object.defineProperty;
-var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
+
-function assetUrl(request, path) {
+```ts
+function assetUrl(request: Request, path: string): URL {
const url = new URL(request.url);
url.pathname = path;
return url;
}
-__name(assetUrl, "assetUrl");
export default {
- async fetch(request, env) {
- if (request.method === "GET") {
- return new Response(html, {headers:{'Content-Type':'text/html'},})
- }
- if (request.method === "POST") {
- try {
- // Parse form data
- const formData = await request.formData();
- const file = formData.get("image");
- if (!file || typeof file.arrayBuffer !== "function") {
- return new Response("No image file provided", { status: 400 });
- }
-
- // Read uploaded image as array buffer
- const fileBuffer = await file.arrayBuffer();
-
- // Fetch image as watermark
- let watermarkStream = (await env.ASSETS.fetch(assetUrl(request, "watermark.png"))).body;
- } catch (err) {
- console.log(err.message)
- }
- }
- }
-};
+ async fetch(request: Request, env: Env): Promise {
+ if (request.method === "GET") {
+ return new Response(html, { headers: { "Content-Type": "text/html" } });
+ }
+ if (request.method === "POST") {
+ try {
+ // Parse form data
+ const formData = await request.formData();
+ const file = formData.get("image");
+ if (!file || typeof file.stream !== "function") {
+ return new Response("No image file provided", { status: 400 });
+ }
+
+ // Get uploaded image as a readable stream
+ const fileStream = file.stream();
+
+ // Fetch image as watermark
+ const watermarkResponse = await env.ASSETS.fetch(
+ assetUrl(request, "watermark.png"),
+ );
+ const watermarkStream = watermarkResponse.body;
+ } catch (err) {
+ console.log((err as Error).message);
+ }
+ }
+ },
+} satisfies ExportedHandler;
```
+
+
### Watermark and transcode the image
You can interact with the Images binding through `env.IMAGES`.
This is where you will put all of the optimization operations you want to perform on the image. Here, you will use the `.draw()` function to apply a visual watermark over the uploaded image, then use `.output()` to encode the image as AVIF:
-```js
-var __defProp = Object.defineProperty;
-var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
+
-function assetUrl(request, path) {
+```ts
+function assetUrl(request: Request, path: string): URL {
const url = new URL(request.url);
url.pathname = path;
return url;
}
-__name(assetUrl, "assetUrl");
export default {
- async fetch(request, env) {
- if (request.method === "GET") {
- return new Response(html, {headers:{'Content-Type':'text/html'},})
- }
- if (request.method === "POST") {
- try {
- // Parse form data
- const formData = await request.formData();
- const file = formData.get("image");
- if (!file || typeof file.arrayBuffer !== "function") {
- return new Response("No image file provided", { status: 400 });
- }
-
- // Read uploaded image as array buffer
- const fileBuffer = await file.arrayBuffer();
-
- // Fetch image as watermark
- let watermarkStream = (await env.ASSETS.fetch(assetUrl(request, "watermark.png"))).body;
-
- // Apply watermark and convert to AVIF
- const imageResponse = (
- await env.IMAGES.input(fileBuffer)
- // Draw the watermark on top of the image
- .draw(
- env.IMAGES.input(watermarkStream)
- .transform({ width: 100, height: 100 }),
- { bottom: 10, right: 10, opacity: 0.75 }
- )
- // Output the final image as AVIF
- .output({ format: "image/avif" })
- ).response();
- } catch (err) {
- console.log(err.message)
- }
- }
- }
-};
+ async fetch(request: Request, env: Env): Promise {
+ if (request.method === "GET") {
+ return new Response(html, { headers: { "Content-Type": "text/html" } });
+ }
+ if (request.method === "POST") {
+ try {
+ // Parse form data
+ const formData = await request.formData();
+ const file = formData.get("image");
+ if (!file || typeof file.stream !== "function") {
+ return new Response("No image file provided", { status: 400 });
+ }
+
+ // Get uploaded image as a readable stream
+ const fileStream = file.stream();
+
+ // Fetch image as watermark
+ const watermarkResponse = await env.ASSETS.fetch(
+ assetUrl(request, "watermark.png"),
+ );
+ const watermarkStream = watermarkResponse.body;
+ if (!watermarkStream) {
+ return new Response("Failed to fetch watermark", { status: 500 });
+ }
+
+ // Apply watermark and convert to AVIF
+ const imageResponse = (
+ await env.IMAGES.input(fileStream)
+ // Draw the watermark on top of the image
+ .draw(
+ env.IMAGES.input(watermarkStream).transform({
+ width: 100,
+ height: 100,
+ }),
+ { bottom: 10, right: 10, opacity: 0.75 },
+ )
+ // Output the final image as AVIF
+ .output({ format: "image/avif" })
+ ).response();
+ } catch (err) {
+ console.log((err as Error).message);
+ }
+ }
+ },
+} satisfies ExportedHandler;
```
+
+
## 5: Upload to R2
Upload the transformed image to R2.
@@ -265,65 +286,100 @@ By creating a `fileName` variable, you can specify the name of the transformed i
Here is the full code for the example:
-```js
-var __defProp = Object.defineProperty;
-var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
+
-function assetUrl(request, path) {
+```ts
+interface Env {
+ IMAGES: ImagesBinding;
+ R2: R2Bucket;
+ ASSETS: Fetcher;
+}
+
+const html = `
+
+
+
+
+ Upload Image
+
+
+ Upload an image
+
+
+
+`;
+
+function assetUrl(request: Request, path: string): URL {
const url = new URL(request.url);
url.pathname = path;
return url;
}
-__name(assetUrl, "assetUrl");
export default {
- async fetch(request, env) {
- if (request.method === "GET") {
- return new Response(html, {headers:{'Content-Type':'text/html'},})
- }
- if (request.method === "POST") {
- try {
- // Parse form data
- const formData = await request.formData();
- const file = formData.get("image");
- if (!file || typeof file.arrayBuffer !== "function") {
- return new Response("No image file provided", { status: 400 });
- }
-
- // Read uploaded image as array buffer
- const fileBuffer = await file.arrayBuffer();
-
- // Fetch image as watermark
- let watermarkStream = (await env.ASSETS.fetch(assetUrl(request, "watermark.png"))).body;
-
- // Apply watermark and convert to AVIF
- const imageResponse = (
- await env.IMAGES.input(fileBuffer)
- // Draw the watermark on top of the image
- .draw(
- env.IMAGES.input(watermarkStream)
- .transform({ width: 100, height: 100 }),
- { bottom: 10, right: 10, opacity: 0.75 }
- )
- // Output the final image as AVIF
- .output({ format: "image/avif" })
- ).response();
-
- // Add timestamp to file name
- const fileName = `image-${Date.now()}.avif`;
-
- // Upload to R2
- await env.R2.put(fileName, imageResponse.body)
-
- return new Response(`Image uploaded successfully as ${fileName}`, { status: 200 });
- } catch (err) {
- console.log(err.message)
- }
- }
- }
-};
+ async fetch(request: Request, env: Env): Promise {
+ if (request.method === "GET") {
+ return new Response(html, { headers: { "Content-Type": "text/html" } });
+ }
+ if (request.method === "POST") {
+ try {
+ // Parse form data
+ const formData = await request.formData();
+ const file = formData.get("image");
+ if (!file || typeof file.stream !== "function") {
+ return new Response("No image file provided", { status: 400 });
+ }
+
+ // Get uploaded image as a readable stream
+ const fileStream = file.stream();
+
+ // Fetch image as watermark
+ const watermarkResponse = await env.ASSETS.fetch(
+ assetUrl(request, "watermark.png"),
+ );
+ const watermarkStream = watermarkResponse.body;
+ if (!watermarkStream) {
+ return new Response("Failed to fetch watermark", { status: 500 });
+ }
+
+ // Apply watermark and convert to AVIF
+ const imageResponse = (
+ await env.IMAGES.input(fileStream)
+ // Draw the watermark on top of the image
+ .draw(
+ env.IMAGES.input(watermarkStream).transform({
+ width: 100,
+ height: 100,
+ }),
+ { bottom: 10, right: 10, opacity: 0.75 },
+ )
+ // Output the final image as AVIF
+ .output({ format: "image/avif" })
+ ).response();
+
+ // Add timestamp to file name
+ const fileName = `image-${Date.now()}.avif`;
+
+ // Upload to R2
+ await env.R2.put(fileName, imageResponse.body);
+
+ return new Response(`Image uploaded successfully as ${fileName}`, {
+ status: 200,
+ });
+ } catch (err) {
+ console.log((err as Error).message);
+ return new Response("Internal error", { status: 500 });
+ }
+ }
+ return new Response("Method not allowed", { status: 405 });
+ },
+} satisfies ExportedHandler;
```
+
+
## Next steps
In this tutorial, you learned how to connect your Worker to various resources on the Developer Platform to build an app that accepts image uploads, transform images, and uploads the output to R2.
diff --git a/src/content/docs/images/upload-images/upload-file-worker.mdx b/src/content/docs/images/upload-images/upload-file-worker.mdx
index 14635dc16fe578..bbc041e6b4644a 100644
--- a/src/content/docs/images/upload-images/upload-file-worker.mdx
+++ b/src/content/docs/images/upload-images/upload-file-worker.mdx
@@ -2,55 +2,65 @@
pcx_content_type: how-to
title: Upload via a Worker
description: Learn how to upload images to Cloudflare using Workers. This guide provides code examples for uploading both standard and AI-generated images efficiently.
-
---
+import { TypeScriptExample } from "~/components";
+
You can use a Worker to upload your image to Cloudflare Images.
Refer to the example below or refer to the [Workers documentation](/workers/) for more information.
+
+
```ts
-const API_URL = "https://api.cloudflare.com/client/v4/accounts//images/v1";
+const API_URL =
+ "https://api.cloudflare.com/client/v4/accounts//images/v1";
const TOKEN = "";
const image = await fetch("https://example.com/image.png");
const bytes = await image.bytes();
const formData = new FormData();
-formData.append('file', new File([bytes], 'image.png'));
+formData.append("file", new File([bytes], "image.png"));
const response = await fetch(API_URL, {
- method: 'POST',
- headers: {
- 'Authorization': `Bearer ${TOKEN}`,
- },
- body: formData,
+ method: "POST",
+ headers: {
+ Authorization: `Bearer ${TOKEN}`,
+ },
+ body: formData,
});
```
+
+
+
## Upload from AI generated images
You can use an AI Worker to generate an image and then upload that image to store it in Cloudflare Images. For more information about using Workers AI to generate an image, refer to the [SDXL-Lightning Model](/workers-ai/models/stable-diffusion-xl-lightning).
+
+
```ts
-const API_URL = "https://api.cloudflare.com/client/v4/accounts//images/v1";
+const API_URL =
+ "https://api.cloudflare.com/client/v4/accounts//images/v1";
const TOKEN = "YOUR_TOKEN_HERE";
-const stream = await env.AI.run(
- "@cf/bytedance/stable-diffusion-xl-lightning",
- {
- prompt: YOUR_PROMPT_HERE
- }
-);
-const bytes = await (new Response(stream)).bytes();
+const stream = await env.AI.run("@cf/bytedance/stable-diffusion-xl-lightning", {
+ prompt: YOUR_PROMPT_HERE,
+});
+const bytes = await new Response(stream).bytes();
const formData = new FormData();
-formData.append('file', new File([bytes], 'image.jpg');
+formData.append("file", new File([bytes], "image.jpg"));
const response = await fetch(API_URL, {
- method: 'POST',
- headers: {
- 'Authorization': `Bearer ${TOKEN}`,
- },
- body: formData,
+ method: "POST",
+ headers: {
+ Authorization: `Bearer ${TOKEN}`,
+ },
+ body: formData,
});
```
+
+
+