Skip to content

Commit 2a8a3cd

Browse files
committed
[WebGPU] Add backend build system
CMake integration: backend library target, Vulkan FlatBuffer schema dependency, root build flags, and glslc guard fix.
1 parent cdb38bf commit 2a8a3cd

6 files changed

Lines changed: 179 additions & 1 deletion

File tree

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ cmake-android-out/
1616
cmake-ios-out/
1717
cmake-out*
1818
cmake-out-android/
19+
backends/webgpu/third-party/
1920
build-android/
2021
build-x86/
2122
build-hexagon/

CMakeLists.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1154,6 +1154,11 @@ if(EXECUTORCH_BUILD_VULKAN)
11541154
list(APPEND _executorch_backends vulkan_backend vulkan_schema)
11551155
endif()
11561156

1157+
if(EXECUTORCH_BUILD_WEBGPU)
1158+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/webgpu)
1159+
list(APPEND _executorch_backends webgpu_backend)
1160+
endif()
1161+
11571162
if(EXECUTORCH_BUILD_VGF)
11581163
list(APPEND _executorch_backends vgf_backend)
11591164
endif()

backends/vulkan/cmake/ShaderLibrary.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ endif()
2626

2727
find_program(GLSLC_PATH glslc PATHS $ENV{PATH})
2828

29-
if(NOT GLSLC_PATH)
29+
if(NOT GLSLC_PATH AND EXECUTORCH_BUILD_VULKAN)
3030
message(
3131
FATAL_ERROR
3232
"glslc from the Vulkan SDK must be installed to build the Vulkan backend. "

backends/webgpu/CMakeLists.txt

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
cmake_minimum_required(VERSION 3.19)
8+
9+
if(NOT EXECUTORCH_ROOT)
10+
set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../..)
11+
endif()
12+
13+
include(${EXECUTORCH_ROOT}/tools/cmake/Utils.cmake)
14+
15+
# Ensure vulkan_schema is available even when EXECUTORCH_BUILD_VULKAN is OFF.
16+
# The WebGPU backend reuses the Vulkan FlatBuffer serialization format.
17+
if(NOT TARGET vulkan_schema)
18+
# We need the schema generation from the Vulkan backend. Build only the
19+
# schema target by including the Vulkan CMakeLists.txt. The full Vulkan
20+
# backend will only build if EXECUTORCH_BUILD_VULKAN is ON (which gates the
21+
# vulkan_backend target), but vulkan_schema is unconditionally defined.
22+
add_subdirectory(
23+
${CMAKE_CURRENT_SOURCE_DIR}/../vulkan
24+
${CMAKE_CURRENT_BINARY_DIR}/_vulkan_schema
25+
)
26+
endif()
27+
28+
set(WEBGPU_SRCS
29+
runtime/WebGPUBackend.cpp
30+
runtime/WebGPUGraph.cpp
31+
runtime/WebGPUDelegateHeader.cpp
32+
runtime/ops/OperatorRegistry.cpp
33+
runtime/ops/add/BinaryOp.cpp
34+
)
35+
36+
add_library(webgpu_backend ${WEBGPU_SRCS})
37+
38+
target_include_directories(
39+
webgpu_backend
40+
PRIVATE $<BUILD_INTERFACE:${EXECUTORCH_ROOT}/..>
41+
)
42+
43+
target_link_libraries(webgpu_backend PRIVATE vulkan_schema executorch_core)
44+
45+
target_compile_options(webgpu_backend PRIVATE -fexceptions)
46+
47+
# Link with --whole-archive for static registration of backend + ops
48+
executorch_target_link_options_shared_lib(webgpu_backend)
49+
50+
set_property(TARGET webgpu_backend PROPERTY CXX_STANDARD 17)
51+
52+
install(
53+
TARGETS webgpu_backend
54+
EXPORT ExecuTorchTargets
55+
DESTINATION ${CMAKE_INSTALL_LIBDIR}
56+
)

backends/webgpu/README.md

Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
# WebGPU Backend
2+
3+
Run ExecuTorch models on the GPU via [WebGPU](https://www.w3.org/TR/webgpu/). The backend compiles delegated subgraphs into WGSL compute shaders executed natively through [wgpu-native](https://github.com/gfx-rs/wgpu-native) (Metal on macOS, Vulkan on Linux/Windows).
4+
5+
> **Status: Prototype.** The backend supports a single operator today and is under active development. See [TODO.md](TODO.md) for the roadmap.
6+
7+
## Architecture
8+
9+
```
10+
PyTorch model
11+
│ torch.export
12+
13+
Exported Program
14+
│ VulkanPartitioner (tags supported fp32 ops)
15+
16+
Edge Dialect IR
17+
│ VulkanBackend.preprocess (builds Vulkan FlatBuffer, buffer-only storage)
18+
19+
.pte file (with VH00/VK00 delegate blob)
20+
21+
22+
Native runtime (wgpu-native → Metal / Vulkan)
23+
│ WebGPUGraph::build → creates GPU buffers, pipelines, bind groups
24+
│ WebGPUGraph::execute → encodes + submits compute passes
25+
26+
GPU output (mapped back to CPU via wgpuDevicePoll)
27+
```
28+
29+
Key design choices:
30+
- **Reuses Vulkan serialization** — the delegate blob is a Vulkan FlatBuffer (`VK00`) with a `VH00` header. All tensor storage is forced to `BUFFER` (WebGPU has no 3D storage textures).
31+
- **Built-in WGSL shaders** — shader source is compiled as C++ string constants. Future work will embed fused shaders in the FlatBuffer for compile-time mega-kernel fusion.
32+
- **No Python AOT code** — directly consumes .pte files exported via `VulkanPartitioner`.
33+
34+
## Operator Support
35+
36+
| Operator | WGSL Shader | Notes |
37+
|---|---|---|
38+
| `aten.add.Tensor` | `binary_add.wgsl` | Element-wise with alpha: `out = in1 + alpha * in2` |
39+
40+
**Planned:** `sub`, `mul`, `relu`, `linear` (matmul), `softmax`, `layer_norm`
41+
42+
## Quick Start
43+
44+
### 1. Setup
45+
46+
```bash
47+
bash backends/webgpu/scripts/setup-wgpu-native.sh
48+
```
49+
50+
This downloads prebuilt wgpu-native binaries for your platform.
51+
52+
### 2. Export a model
53+
54+
```python
55+
import torch
56+
from executorch.backends.vulkan import VulkanPartitioner
57+
from executorch.exir import to_edge_transform_and_lower
58+
59+
class AddModule(torch.nn.Module):
60+
def forward(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
61+
return a + b
62+
63+
ep = torch.export.export(AddModule(), (torch.randn(4, 4), torch.randn(4, 4)))
64+
et_program = to_edge_transform_and_lower(
65+
ep, partitioner=[VulkanPartitioner()]
66+
).to_executorch()
67+
68+
with open("add.pte", "wb") as f:
69+
f.write(et_program.buffer)
70+
```
71+
72+
### 3. Build and run
73+
74+
```bash
75+
bash backends/webgpu/test/test_build_webgpu.sh
76+
```
77+
78+
This runs Python export tests, exports a .pte, builds the native runtime, and validates GPU output.
79+
80+
## Directory Structure
81+
82+
```
83+
backends/webgpu/
84+
├── CMakeLists.txt
85+
├── README.md
86+
├── TODO.md
87+
├── runtime/
88+
│ ├── WebGPUBackend.h/cpp # BackendInterface (init/execute)
89+
│ ├── WebGPUGraph.h/cpp # GPU graph: buffers, pipelines, dispatch
90+
│ ├── WebGPUDelegateHeader.h/cpp # VH00 header parser
91+
│ ├── WebGPUDevice.h/cpp # wgpu-native device abstraction
92+
│ └── ops/
93+
│ ├── OperatorRegistry.h/cpp # Op dispatch table
94+
│ └── add/
95+
│ ├── BinaryOp.cpp # aten.add.Tensor implementation
96+
│ ├── binary_add.wgsl # WGSL shader source
97+
│ └── binary_add_wgsl.h # Shader as C++ string constant
98+
├── scripts/
99+
│ └── setup-wgpu-native.sh # Download wgpu-native binaries
100+
└── test/
101+
├── conftest.py
102+
├── test_build_webgpu.sh # End-to-end build + test
103+
├── test_webgpu_native.cpp # C++ native test runner
104+
└── ops/
105+
└── add/
106+
└── test_add.py # Python export tests
107+
```
108+
109+
## Requirements
110+
111+
- **macOS**: Metal-capable GPU
112+
- **Linux**: Vulkan-capable GPU + drivers
113+
- **Build**: CMake 3.19+, conda environment with ExecuTorch installed

tools/cmake/preset/default.cmake

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,9 @@ define_overridable_option(
168168
define_overridable_option(
169169
EXECUTORCH_BUILD_VULKAN "Build the Vulkan backend" BOOL OFF
170170
)
171+
define_overridable_option(
172+
EXECUTORCH_BUILD_WEBGPU "Build the WebGPU backend" BOOL OFF
173+
)
171174
define_overridable_option(
172175
EXECUTORCH_BUILD_PORTABLE_OPS "Build portable_ops library" BOOL ON
173176
)

0 commit comments

Comments
 (0)