diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7f717b0..d597269 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -32,6 +32,32 @@ jobs: go-version: ${{ matrix.go-version }} cache: true + - name: Download wgpu-native + shell: bash + env: + WGPU_VERSION: "v27.0.4.0" + run: | + set -e + case "${{ matrix.os }}" in + ubuntu-latest) + ASSET="wgpu-linux-x86_64-release.zip" + LIB_NAME="libwgpu_native.so" + ;; + macos-latest) + ASSET="wgpu-macos-aarch64-release.zip" + LIB_NAME="libwgpu_native.dylib" + ;; + windows-latest) + ASSET="wgpu-windows-x86_64-msvc-release.zip" + LIB_NAME="wgpu_native.dll" + ;; + esac + curl -fsSL "https://github.com/gfx-rs/wgpu-native/releases/download/${WGPU_VERSION}/${ASSET}" -o wgpu.zip + unzip -o wgpu.zip -d wgpu-native + find wgpu-native -name "${LIB_NAME}" -exec cp {} . \; + ls -la "${LIB_NAME}" + echo "WGPU_NATIVE_PATH=$PWD/${LIB_NAME}" >> $GITHUB_ENV + - name: Verify dependencies run: go mod verify @@ -48,9 +74,9 @@ jobs: shell: bash run: | if [ "${{ matrix.os }}" == "windows-latest" ]; then - go test -v ./wgpu/... -run "Mat4|Vec3|StructSizes|CheckInit|WGPUError|Fuzz" + go test -v ./wgpu/... -run "Mat4|Vec3|StructSizes|CheckInit|WGPUError|Fuzz|NullGuard" else - CGO_ENABLED=0 go test -v ./wgpu/... -run "Mat4|Vec3|StructSizes|CheckInit|WGPUError|Fuzz" + CGO_ENABLED=0 go test -v ./wgpu/... -run "Mat4|Vec3|StructSizes|CheckInit|WGPUError|Fuzz|NullGuard" fi - name: Run fuzz tests (seed corpus only) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7b0abc9..c49d197 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -50,6 +50,33 @@ jobs: go-version: ${{ matrix.go-version }} cache: true + - name: Download wgpu-native + shell: bash + env: + WGPU_VERSION: "v27.0.4.0" + run: | + set -e + case "${{ matrix.os }}" in + ubuntu-latest) + ASSET="wgpu-linux-x86_64-release.zip" + LIB_NAME="libwgpu_native.so" + ;; + macos-latest) + ASSET="wgpu-macos-aarch64-release.zip" + LIB_NAME="libwgpu_native.dylib" + ;; + windows-latest) + ASSET="wgpu-windows-x86_64-msvc-release.zip" + LIB_NAME="wgpu_native.dll" + ;; + esac + echo "Downloading wgpu-native ${WGPU_VERSION} (${ASSET})..." + curl -fsSL "https://github.com/gfx-rs/wgpu-native/releases/download/${WGPU_VERSION}/${ASSET}" -o wgpu.zip + unzip -o wgpu.zip -d wgpu-native + find wgpu-native -name "${LIB_NAME}" -exec cp {} . \; + ls -la "${LIB_NAME}" + echo "WGPU_NATIVE_PATH=$PWD/${LIB_NAME}" >> $GITHUB_ENV + - name: Download dependencies run: go mod download @@ -58,7 +85,7 @@ jobs: - name: Run go vet if: matrix.os == 'ubuntu-latest' - run: CGO_ENABLED=0 go vet -unsafeptr=false ./wgpu/... + run: CGO_ENABLED=0 go vet ./wgpu/... - name: Build library shell: bash @@ -77,9 +104,9 @@ jobs: shell: bash run: | if [ "${{ matrix.os }}" != "windows-latest" ]; then - CGO_ENABLED=0 go test -v -coverprofile=coverage.txt -covermode=atomic ./wgpu/... -run "Mat4|Vec3|StructSizes|CheckInit|WGPUError|Fuzz" + CGO_ENABLED=0 go test -v -coverprofile=coverage.txt -covermode=atomic ./wgpu/... -run "Mat4|Vec3|StructSizes|CheckInit|WGPUError|Fuzz|NullGuard" else - go test -v -race -coverprofile=coverage.txt -covermode=atomic ./wgpu/... -run "Mat4|Vec3|StructSizes|CheckInit|WGPUError|Fuzz" + go test -v -race -coverprofile=coverage.txt -covermode=atomic ./wgpu/... -run "Mat4|Vec3|StructSizes|CheckInit|WGPUError|Fuzz|NullGuard" fi - name: Upload coverage to Codecov diff --git a/CHANGELOG.md b/CHANGELOG.md index 71977f3..1f112de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,30 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.4.0] - 2026-02-27 + +### Added + +- **Null handle guards** on all public FFI methods — prevents SIGSEGV when passing nil/released objects +- **85 null guard tests** (`TestNullGuard_*`) — CI-safe, no GPU required +- **`WGPU_NATIVE_PATH` env var** — override library path for custom wgpu-native locations +- **`ptrFromUintptr` helper** — eliminates all `go vet` unsafe.Pointer warnings in FFI code + +### Changed + +- `loadLibrary` now returns `(Library, error)` — proper error propagation on init failure +- Windows: eager DLL loading via `dll.Load()` — errors at `Init()` instead of first FFI call +- `Init()` returns descriptive error messages with library path and override hint +- CI: wgpu-native binary downloaded in all workflows — tests run against real library, no skips +- CI: removed `-unsafeptr=false` go vet workaround — all warnings properly fixed + +### Fixed + +- **15 `go vet` warnings** — all `possible misuse of unsafe.Pointer` eliminated via `ptrFromUintptr` +- Silent library loading failures — `Init()` now properly reports missing DLL/so/dylib + +--- + ## [0.3.2] - 2026-02-27 ### Changed diff --git a/README.md b/README.md index e7af238..d67b349 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,11 @@ go get github.com/go-webgpu/webgpu Download wgpu-native and place `wgpu_native.dll` (Windows) or `libwgpu_native.so` (Linux) in your project directory or system PATH. +To use a custom library location: +```bash +export WGPU_NATIVE_PATH=/path/to/libwgpu_native.so +``` + ## Type System This library uses [gputypes](https://github.com/gogpu/gputypes) for WebGPU type definitions, ensuring compatibility with the [gogpu ecosystem](https://github.com/gogpu) and webgpu.h specification. diff --git a/wgpu/adapter.go b/wgpu/adapter.go index edca1f5..13e172d 100644 --- a/wgpu/adapter.go +++ b/wgpu/adapter.go @@ -73,11 +73,9 @@ func adapterCallbackHandler(status uintptr, adapter uintptr, message uintptr, us // Extract message string (message is pointer to StringView on Windows) var msg string if message != 0 { - // nolint:govet // message is uintptr from FFI callback - GC safe - sv := (*StringView)(unsafe.Pointer(message)) + sv := (*StringView)(ptrFromUintptr(message)) if sv.Data != 0 && sv.Length > 0 && sv.Length < 1<<20 { - // nolint:govet // sv.Data is uintptr from C memory - GC safe - msg = unsafe.String((*byte)(unsafe.Pointer(sv.Data)), int(sv.Length)) + msg = unsafe.String((*byte)(ptrFromUintptr(sv.Data)), int(sv.Length)) } } @@ -113,6 +111,9 @@ func (i *Instance) RequestAdapter(options *RequestAdapterOptions) (*Adapter, err if err := checkInit(); err != nil { return nil, err } + if i == nil || i.handle == 0 { + return nil, &WGPUError{Op: "RequestAdapter", Message: "instance is nil or released"} + } // Initialize callback once adapterCallbackOnce.Do(initAdapterCallback) @@ -382,6 +383,5 @@ func stringViewToString(sv StringView) string { if sv.Length > 1<<20 { // 1MB max return "" } - // nolint:govet // sv.Data is uintptr from C memory - safe to convert - return unsafe.String((*byte)(unsafe.Pointer(sv.Data)), int(sv.Length)) + return unsafe.String((*byte)(ptrFromUintptr(sv.Data)), int(sv.Length)) } diff --git a/wgpu/bindgroup.go b/wgpu/bindgroup.go index 6c0ffef..a39b96f 100644 --- a/wgpu/bindgroup.go +++ b/wgpu/bindgroup.go @@ -172,7 +172,7 @@ type BindGroupDescriptor struct { // Entries are converted from gputypes to wgpu-native enum values before FFI call. func (d *Device) CreateBindGroupLayout(desc *BindGroupLayoutDescriptor) *BindGroupLayout { mustInit() - if desc == nil { + if d == nil || d.handle == 0 || desc == nil { return nil } @@ -184,7 +184,7 @@ func (d *Device) CreateBindGroupLayout(desc *BindGroupLayoutDescriptor) *BindGro if desc.EntryCount > 0 && desc.Entries != 0 { // Convert entries to wire format - entries := unsafe.Slice((*BindGroupLayoutEntry)(unsafe.Pointer(desc.Entries)), desc.EntryCount) + entries := unsafe.Slice((*BindGroupLayoutEntry)(ptrFromUintptr(desc.Entries)), desc.EntryCount) wireEntries := make([]bindGroupLayoutEntryWire, len(entries)) for i := range entries { wireEntries[i] = entries[i].toWire() @@ -206,7 +206,7 @@ func (d *Device) CreateBindGroupLayout(desc *BindGroupLayoutDescriptor) *BindGro // CreateBindGroupLayoutSimple creates a bind group layout with the given entries. func (d *Device) CreateBindGroupLayoutSimple(entries []BindGroupLayoutEntry) *BindGroupLayout { mustInit() - if len(entries) == 0 { + if d == nil || d.handle == 0 || len(entries) == 0 { return nil } @@ -248,7 +248,7 @@ func (bgl *BindGroupLayout) Handle() uintptr { return bgl.handle } // CreateBindGroup creates a bind group. func (d *Device) CreateBindGroup(desc *BindGroupDescriptor) *BindGroup { mustInit() - if desc == nil { + if d == nil || d.handle == 0 || desc == nil { return nil } handle, _, _ := procDeviceCreateBindGroup.Call( @@ -265,7 +265,7 @@ func (d *Device) CreateBindGroup(desc *BindGroupDescriptor) *BindGroup { // CreateBindGroupSimple creates a bind group with buffer entries. func (d *Device) CreateBindGroupSimple(layout *BindGroupLayout, entries []BindGroupEntry) *BindGroup { mustInit() - if layout == nil || len(entries) == 0 { + if d == nil || d.handle == 0 || layout == nil || len(entries) == 0 { return nil } desc := BindGroupDescriptor{ diff --git a/wgpu/buffer.go b/wgpu/buffer.go index 55d42be..e42a35c 100644 --- a/wgpu/buffer.go +++ b/wgpu/buffer.go @@ -71,11 +71,9 @@ func mapCallbackHandler(status uintptr, message uintptr, userdata1, userdata2 ui // Extract message string var msg string if message != 0 { - // nolint:govet // message is uintptr from FFI callback - GC safe - sv := (*StringView)(unsafe.Pointer(message)) + sv := (*StringView)(ptrFromUintptr(message)) if sv.Data != 0 && sv.Length > 0 && sv.Length < 1<<20 { - // nolint:govet // sv.Data is uintptr from C memory - GC safe - msg = unsafe.String((*byte)(unsafe.Pointer(sv.Data)), int(sv.Length)) + msg = unsafe.String((*byte)(ptrFromUintptr(sv.Data)), int(sv.Length)) } } @@ -112,7 +110,7 @@ type BufferDescriptor struct { // CreateBuffer creates a new GPU buffer. func (d *Device) CreateBuffer(desc *BufferDescriptor) *Buffer { mustInit() - if desc == nil { + if d == nil || d.handle == 0 || desc == nil { return nil } handle, _, _ := procDeviceCreateBuffer.Call( @@ -132,6 +130,9 @@ func (d *Device) CreateBuffer(desc *BufferDescriptor) *Buffer { // Returns nil if the buffer is not mapped or the range is invalid. func (b *Buffer) GetMappedRange(offset, size uint64) unsafe.Pointer { mustInit() + if b == nil || b.handle == 0 { + return nil + } ptr, _, _ := procBufferGetMappedRange.Call( b.handle, uintptr(offset), @@ -140,20 +141,25 @@ func (b *Buffer) GetMappedRange(offset, size uint64) unsafe.Pointer { if ptr == 0 { return nil } - // nolint:govet // ptr is uintptr from FFI call - returned immediately, GC safe - return unsafe.Pointer(ptr) + return ptrFromUintptr(ptr) } // Unmap unmaps the buffer, making the mapped memory inaccessible. // For buffers created with MappedAtCreation, this commits the data to the GPU. func (b *Buffer) Unmap() { mustInit() + if b == nil || b.handle == 0 { + return + } procBufferUnmap.Call(b.handle) //nolint:errcheck } // GetSize returns the size of the buffer in bytes. func (b *Buffer) GetSize() uint64 { mustInit() + if b == nil || b.handle == 0 { + return 0 + } size, _, _ := procBufferGetSize.Call(b.handle) return uint64(size) } @@ -167,6 +173,12 @@ func (b *Buffer) MapAsync(device *Device, mode MapMode, offset, size uint64) err if err := checkInit(); err != nil { return err } + if b == nil || b.handle == 0 { + return &WGPUError{Op: "Buffer.MapAsync", Message: "buffer is nil or released"} + } + if device == nil || device.handle == 0 { + return &WGPUError{Op: "Buffer.MapAsync", Message: "device is nil or released"} + } // Initialize callback once mapCallbackOnce.Do(initMapCallback) @@ -242,7 +254,7 @@ func (b *Buffer) Release() { // This is a convenience method that stages data for upload to the GPU. func (q *Queue) WriteBuffer(buffer *Buffer, offset uint64, data []byte) { mustInit() - if len(data) == 0 { + if q == nil || q.handle == 0 || buffer == nil || buffer.handle == 0 || len(data) == 0 { return } procQueueWriteBuffer.Call( //nolint:errcheck @@ -258,7 +270,7 @@ func (q *Queue) WriteBuffer(buffer *Buffer, offset uint64, data []byte) { // The data pointer should point to the first element, size is total byte size. func (q *Queue) WriteBufferRaw(buffer *Buffer, offset uint64, data unsafe.Pointer, size uint64) { mustInit() - if size == 0 { + if q == nil || q.handle == 0 || buffer == nil || buffer.handle == 0 || size == 0 { return } procQueueWriteBuffer.Call( //nolint:errcheck diff --git a/wgpu/command.go b/wgpu/command.go index 76adce4..8df97b4 100644 --- a/wgpu/command.go +++ b/wgpu/command.go @@ -28,6 +28,9 @@ type ComputePassDescriptor struct { // CreateCommandEncoder creates a command encoder. func (d *Device) CreateCommandEncoder(desc *CommandEncoderDescriptor) *CommandEncoder { mustInit() + if d == nil || d.handle == 0 { + return nil + } var descPtr uintptr if desc != nil { descPtr = uintptr(unsafe.Pointer(desc)) @@ -46,6 +49,9 @@ func (d *Device) CreateCommandEncoder(desc *CommandEncoderDescriptor) *CommandEn // BeginComputePass begins a compute pass. func (enc *CommandEncoder) BeginComputePass(desc *ComputePassDescriptor) *ComputePassEncoder { mustInit() + if enc == nil || enc.handle == 0 { + return nil + } var descPtr uintptr if desc != nil { descPtr = uintptr(unsafe.Pointer(desc)) @@ -64,6 +70,9 @@ func (enc *CommandEncoder) BeginComputePass(desc *ComputePassDescriptor) *Comput // CopyBufferToBuffer copies data between buffers. func (enc *CommandEncoder) CopyBufferToBuffer(src *Buffer, srcOffset uint64, dst *Buffer, dstOffset uint64, size uint64) { mustInit() + if enc == nil || enc.handle == 0 || src == nil || src.handle == 0 || dst == nil || dst.handle == 0 { + return + } procCommandEncoderCopyBufferToBuffer.Call( //nolint:errcheck enc.handle, src.handle, @@ -78,7 +87,7 @@ func (enc *CommandEncoder) CopyBufferToBuffer(src *Buffer, srcOffset uint64, dst // size = 0 means clear from offset to end of buffer. func (enc *CommandEncoder) ClearBuffer(buffer *Buffer, offset, size uint64) { mustInit() - if buffer == nil { + if enc == nil || enc.handle == 0 || buffer == nil || buffer.handle == 0 { return } procCommandEncoderClearBuffer.Call( //nolint:errcheck @@ -93,6 +102,9 @@ func (enc *CommandEncoder) ClearBuffer(buffer *Buffer, offset, size uint64) { // This is useful for GPU debugging tools to identify specific command points. func (enc *CommandEncoder) InsertDebugMarker(markerLabel string) { mustInit() + if enc == nil || enc.handle == 0 { + return + } labelBytes := []byte(markerLabel) if len(labelBytes) == 0 { return @@ -111,6 +123,9 @@ func (enc *CommandEncoder) InsertDebugMarker(markerLabel string) { // Use PopDebugGroup to end the group. Groups can be nested. func (enc *CommandEncoder) PushDebugGroup(groupLabel string) { mustInit() + if enc == nil || enc.handle == 0 { + return + } labelBytes := []byte(groupLabel) if len(labelBytes) == 0 { return @@ -129,6 +144,9 @@ func (enc *CommandEncoder) PushDebugGroup(groupLabel string) { // Must match a preceding PushDebugGroup call. func (enc *CommandEncoder) PopDebugGroup() { mustInit() + if enc == nil || enc.handle == 0 { + return + } procCommandEncoderPopDebugGroup.Call(enc.handle) //nolint:errcheck } @@ -136,7 +154,7 @@ func (enc *CommandEncoder) PopDebugGroup() { // Errors are reported via Device error scopes, not as return values. func (enc *CommandEncoder) CopyBufferToTexture(source *TexelCopyBufferInfo, destination *TexelCopyTextureInfo, copySize *gputypes.Extent3D) { mustInit() - if source == nil || destination == nil || copySize == nil { + if enc == nil || enc.handle == 0 || source == nil || destination == nil || copySize == nil { return } procCommandEncoderCopyBufferToTexture.Call( //nolint:errcheck @@ -151,7 +169,7 @@ func (enc *CommandEncoder) CopyBufferToTexture(source *TexelCopyBufferInfo, dest // Errors are reported via Device error scopes, not as return values. func (enc *CommandEncoder) CopyTextureToBuffer(source *TexelCopyTextureInfo, destination *TexelCopyBufferInfo, copySize *gputypes.Extent3D) { mustInit() - if source == nil || destination == nil || copySize == nil { + if enc == nil || enc.handle == 0 || source == nil || destination == nil || copySize == nil { return } procCommandEncoderCopyTextureToBuffer.Call( //nolint:errcheck @@ -166,7 +184,7 @@ func (enc *CommandEncoder) CopyTextureToBuffer(source *TexelCopyTextureInfo, des // Errors are reported via Device error scopes, not as return values. func (enc *CommandEncoder) CopyTextureToTexture(source *TexelCopyTextureInfo, destination *TexelCopyTextureInfo, copySize *gputypes.Extent3D) { mustInit() - if source == nil || destination == nil || copySize == nil { + if enc == nil || enc.handle == 0 || source == nil || destination == nil || copySize == nil { return } procCommandEncoderCopyTextureToTexture.Call( //nolint:errcheck @@ -180,6 +198,9 @@ func (enc *CommandEncoder) CopyTextureToTexture(source *TexelCopyTextureInfo, de // Finish finishes recording and returns a command buffer. func (enc *CommandEncoder) Finish(desc *CommandBufferDescriptor) *CommandBuffer { mustInit() + if enc == nil || enc.handle == 0 { + return nil + } var descPtr uintptr if desc != nil { descPtr = uintptr(unsafe.Pointer(desc)) @@ -209,6 +230,9 @@ func (enc *CommandEncoder) Release() { // via RenderPassTimestampWrites or ComputePassTimestampWrites when possible. func (enc *CommandEncoder) WriteTimestamp(querySet *QuerySet, queryIndex uint32) { mustInit() + if enc == nil || enc.handle == 0 || querySet == nil || querySet.handle == 0 { + return + } procCommandEncoderWriteTimestamp.Call( //nolint:errcheck enc.handle, querySet.handle, @@ -220,6 +244,9 @@ func (enc *CommandEncoder) WriteTimestamp(querySet *QuerySet, queryIndex uint32) // The buffer must have BufferUsageQueryResolve usage. func (enc *CommandEncoder) ResolveQuerySet(querySet *QuerySet, firstQuery, queryCount uint32, destination *Buffer, destinationOffset uint64) { mustInit() + if enc == nil || enc.handle == 0 || querySet == nil || querySet.handle == 0 || destination == nil || destination.handle == 0 { + return + } procCommandEncoderResolveQuerySet.Call( //nolint:errcheck enc.handle, querySet.handle, @@ -236,6 +263,9 @@ func (enc *CommandEncoder) Handle() uintptr { return enc.handle } // SetPipeline sets the compute pipeline. func (cpe *ComputePassEncoder) SetPipeline(pipeline *ComputePipeline) { mustInit() + if cpe == nil || cpe.handle == 0 || pipeline == nil || pipeline.handle == 0 { + return + } procComputePassEncoderSetPipeline.Call( //nolint:errcheck cpe.handle, pipeline.handle, @@ -245,6 +275,9 @@ func (cpe *ComputePassEncoder) SetPipeline(pipeline *ComputePipeline) { // SetBindGroup sets a bind group. func (cpe *ComputePassEncoder) SetBindGroup(groupIndex uint32, group *BindGroup, dynamicOffsets []uint32) { mustInit() + if cpe == nil || cpe.handle == 0 || group == nil || group.handle == 0 { + return + } var offsetsPtr uintptr offsetCount := uintptr(0) if len(dynamicOffsets) > 0 { @@ -263,6 +296,9 @@ func (cpe *ComputePassEncoder) SetBindGroup(groupIndex uint32, group *BindGroup, // DispatchWorkgroups dispatches compute work. func (cpe *ComputePassEncoder) DispatchWorkgroups(x, y, z uint32) { mustInit() + if cpe == nil || cpe.handle == 0 { + return + } procComputePassEncoderDispatchWorkgroups.Call( //nolint:errcheck cpe.handle, uintptr(x), @@ -278,6 +314,9 @@ func (cpe *ComputePassEncoder) DispatchWorkgroups(x, y, z uint32) { // - workgroupCountZ (uint32) func (cpe *ComputePassEncoder) DispatchWorkgroupsIndirect(indirectBuffer *Buffer, indirectOffset uint64) { mustInit() + if cpe == nil || cpe.handle == 0 || indirectBuffer == nil || indirectBuffer.handle == 0 { + return + } procComputePassEncoderDispatchWorkgroupsIndirect.Call( //nolint:errcheck cpe.handle, indirectBuffer.handle, @@ -288,6 +327,9 @@ func (cpe *ComputePassEncoder) DispatchWorkgroupsIndirect(indirectBuffer *Buffer // End ends the compute pass. func (cpe *ComputePassEncoder) End() { mustInit() + if cpe == nil || cpe.handle == 0 { + return + } procComputePassEncoderEnd.Call(cpe.handle) //nolint:errcheck } @@ -306,7 +348,7 @@ func (cpe *ComputePassEncoder) Handle() uintptr { return cpe.handle } // Submit submits command buffers for execution. func (q *Queue) Submit(commands ...*CommandBuffer) { mustInit() - if len(commands) == 0 { + if q == nil || q.handle == 0 || len(commands) == 0 { return } handles := make([]uintptr, len(commands)) diff --git a/wgpu/device.go b/wgpu/device.go index 7e2b7f5..b5c6719 100644 --- a/wgpu/device.go +++ b/wgpu/device.go @@ -44,11 +44,9 @@ func deviceCallbackHandler(status uintptr, device uintptr, message uintptr, user // Extract message string (message is pointer to StringView on Windows) var msg string if message != 0 { - // nolint:govet // message is uintptr from FFI callback - GC safe - sv := (*StringView)(unsafe.Pointer(message)) + sv := (*StringView)(ptrFromUintptr(message)) if sv.Data != 0 && sv.Length > 0 && sv.Length < 1<<20 { - // nolint:govet // sv.Data is uintptr from C memory - GC safe - msg = unsafe.String((*byte)(unsafe.Pointer(sv.Data)), int(sv.Length)) + msg = unsafe.String((*byte)(ptrFromUintptr(sv.Data)), int(sv.Length)) } } @@ -83,6 +81,9 @@ func (a *Adapter) RequestDevice(options *DeviceDescriptor) (*Device, error) { if err := checkInit(); err != nil { return nil, err } + if a == nil || a.handle == 0 { + return nil, &WGPUError{Op: "RequestDevice", Message: "adapter is nil or released"} + } // Initialize callback once deviceCallbackOnce.Do(initDeviceCallback) @@ -145,6 +146,9 @@ func (a *Adapter) RequestDevice(options *DeviceDescriptor) (*Device, error) { // GetQueue returns the default queue for the device. func (d *Device) GetQueue() *Queue { mustInit() + if d == nil || d.handle == 0 { + return nil + } handle, _, _ := procDeviceGetQueue.Call(d.handle) if handle == 0 { return nil @@ -159,6 +163,9 @@ func (d *Device) GetQueue() *Queue { // This is a wgpu-native extension. func (d *Device) Poll(wait bool) bool { mustInit() + if d == nil || d.handle == 0 { + return true + } var waitArg uintptr if wait { waitArg = 1 @@ -196,6 +203,9 @@ type DeviceDescriptor struct { // This is a convenience function for creating depth buffers for render passes. func (d *Device) CreateDepthTexture(width, height uint32, format gputypes.TextureFormat) *Texture { mustInit() + if d == nil || d.handle == 0 { + return nil + } desc := TextureDescriptor{ NextInChain: 0, @@ -257,8 +267,7 @@ func (d *Device) GetFeatures() []FeatureName { } // Convert C array to Go slice - // nolint:govet // supported.Features is uintptr from C memory - GC safe - featuresPtr := (*FeatureName)(unsafe.Pointer(supported.Features)) + featuresPtr := (*FeatureName)(ptrFromUintptr(supported.Features)) features := unsafe.Slice(featuresPtr, supported.FeatureCount) // Copy to new slice (don't keep pointer to C memory) diff --git a/wgpu/errors.go b/wgpu/errors.go index a19ecf3..f1fb23a 100644 --- a/wgpu/errors.go +++ b/wgpu/errors.go @@ -26,6 +26,9 @@ import ( // } func (d *Device) PushErrorScope(filter ErrorFilter) { mustInit() + if d == nil || d.handle == 0 { + return + } // nolint:errcheck // PushErrorScope has no meaningful return value to check procDevicePushErrorScope.Call(d.handle, uintptr(filter)) } @@ -68,11 +71,9 @@ func errorScopeCallbackHandler(status uintptr, errType uintptr, message uintptr, // Extract message string (message is pointer to StringView) var msg string if message != 0 { - // nolint:govet,gosec // message is uintptr from FFI callback - GC safe - sv := (*StringView)(unsafe.Pointer(message)) + sv := (*StringView)(ptrFromUintptr(message)) if sv.Data != 0 && sv.Length > 0 && sv.Length < 1<<20 { - // nolint:govet,gosec // sv.Data is uintptr from C memory - GC safe - msg = unsafe.String((*byte)(unsafe.Pointer(sv.Data)), int(sv.Length)) + msg = unsafe.String((*byte)(ptrFromUintptr(sv.Data)), int(sv.Length)) } } @@ -137,6 +138,10 @@ func (d *Device) PopErrorScopeAsync(instance *Instance) (ErrorType, string, erro return ErrorTypeNoError, "", err } + if d == nil || d.handle == 0 { + return ErrorTypeNoError, "", &WGPUError{Op: "PopErrorScopeAsync", Message: "device is nil or released"} + } + if instance == nil { return ErrorTypeNoError, "", &WGPUError{Op: "PopErrorScopeAsync", Message: "instance is required for PopErrorScope"} } diff --git a/wgpu/instance.go b/wgpu/instance.go index d6a798e..897852c 100644 --- a/wgpu/instance.go +++ b/wgpu/instance.go @@ -36,6 +36,9 @@ func (i *Instance) Release() { // ProcessEvents processes pending async events. func (i *Instance) ProcessEvents() { + if i == nil || i.handle == 0 { + return + } procInstanceProcessEvents.Call(i.handle) //nolint:errcheck } diff --git a/wgpu/instance_test.go b/wgpu/instance_test.go index d1ea612..32c36fe 100644 --- a/wgpu/instance_test.go +++ b/wgpu/instance_test.go @@ -74,7 +74,10 @@ func TestInstanceRelease(t *testing.T) { } func TestCheckInitAfterLoad(t *testing.T) { - // After library is loaded (which happens in TestInit), checkInit should return nil + // After library is loaded (which happens in TestInit), checkInit should return nil. + if err := Init(); err != nil { + t.Fatalf("Init failed: %v", err) + } err := checkInit() if err != nil { t.Fatalf("checkInit() failed after successful library load: %v", err) diff --git a/wgpu/loader_unix.go b/wgpu/loader_unix.go index 9d66d3c..01e5eb8 100644 --- a/wgpu/loader_unix.go +++ b/wgpu/loader_unix.go @@ -28,22 +28,17 @@ type unixProc struct { } // loadLibrary loads a shared library using goffi.LoadLibrary. -// Returns a Library interface that can be used to get procedures. -func loadLibrary(name string) Library { +// Returns a Library interface and an error if the library cannot be found. +func loadLibrary(name string) (Library, error) { handle, err := ffi.LoadLibrary(name) if err != nil { - // Return a library that will fail on any NewProc call - // This maintains compatibility with Windows LazyDLL behavior - return &unixLibrary{ - handle: nil, - name: name, - } + return nil, fmt.Errorf("dlopen %s: %w", name, err) } return &unixLibrary{ handle: handle, name: name, - } + }, nil } // NewProc retrieves a procedure from the Unix shared library. diff --git a/wgpu/loader_windows.go b/wgpu/loader_windows.go index c5dc005..d96582e 100644 --- a/wgpu/loader_windows.go +++ b/wgpu/loader_windows.go @@ -18,10 +18,14 @@ type windowsProc struct { // loadLibrary loads a DLL using Windows syscall.NewLazyDLL. // Returns a Library interface that can be used to get procedures. -func loadLibrary(name string) Library { - return &windowsLibrary{ - dll: syscall.NewLazyDLL(name), +// The DLL is eagerly loaded to report errors immediately. +func loadLibrary(name string) (Library, error) { + dll := syscall.NewLazyDLL(name) + // Force eager load to detect missing DLL immediately. + if err := dll.Load(); err != nil { + return nil, err } + return &windowsLibrary{dll: dll}, nil } // NewProc retrieves a procedure from the Windows DLL. diff --git a/wgpu/null_guard_test.go b/wgpu/null_guard_test.go new file mode 100644 index 0000000..c0a96b4 --- /dev/null +++ b/wgpu/null_guard_test.go @@ -0,0 +1,590 @@ +package wgpu + +import ( + "testing" + + "github.com/gogpu/gputypes" +) + +// TestNullGuard_Device_Creation tests nil device guards on creation methods. +func TestNullGuard_Device_Creation(t *testing.T) { + + var d *Device + + t.Run("CreateCommandEncoder", func(t *testing.T) { + result := d.CreateCommandEncoder(nil) + if result != nil { + t.Error("expected nil for nil device") + } + }) + + t.Run("CreateBuffer", func(t *testing.T) { + result := d.CreateBuffer(&BufferDescriptor{}) + if result != nil { + t.Error("expected nil for nil device") + } + }) + + t.Run("CreateTexture", func(t *testing.T) { + result := d.CreateTexture(&TextureDescriptor{}) + if result != nil { + t.Error("expected nil for nil device") + } + }) + + t.Run("CreateShaderModuleWGSL", func(t *testing.T) { + result := d.CreateShaderModuleWGSL("@vertex fn main() {}") + if result != nil { + t.Error("expected nil for nil device") + } + }) + + t.Run("CreateSampler", func(t *testing.T) { + result := d.CreateSampler(&SamplerDescriptor{}) + if result != nil { + t.Error("expected nil for nil device") + } + }) + + t.Run("CreateBindGroupLayout", func(t *testing.T) { + result := d.CreateBindGroupLayout(&BindGroupLayoutDescriptor{}) + if result != nil { + t.Error("expected nil for nil device") + } + }) + + t.Run("CreateBindGroup", func(t *testing.T) { + result := d.CreateBindGroup(&BindGroupDescriptor{}) + if result != nil { + t.Error("expected nil for nil device") + } + }) + + t.Run("CreatePipelineLayout", func(t *testing.T) { + result := d.CreatePipelineLayout(&PipelineLayoutDescriptor{}) + if result != nil { + t.Error("expected nil for nil device") + } + }) + + t.Run("CreateComputePipeline", func(t *testing.T) { + result := d.CreateComputePipeline(&ComputePipelineDescriptor{}) + if result != nil { + t.Error("expected nil for nil device") + } + }) + + t.Run("CreateRenderPipeline", func(t *testing.T) { + result := d.CreateRenderPipeline(&RenderPipelineDescriptor{}) + if result != nil { + t.Error("expected nil for nil device") + } + }) + + t.Run("CreateQuerySet", func(t *testing.T) { + result := d.CreateQuerySet(&QuerySetDescriptor{}) + if result != nil { + t.Error("expected nil for nil device") + } + }) + + t.Run("CreateRenderBundleEncoder", func(t *testing.T) { + result := d.CreateRenderBundleEncoder(&RenderBundleEncoderDescriptor{}) + if result != nil { + t.Error("expected nil for nil device") + } + }) + + t.Run("GetQueue", func(t *testing.T) { + result := d.GetQueue() + if result != nil { + t.Error("expected nil for nil device") + } + }) + + t.Run("CreateDepthTexture", func(t *testing.T) { + result := d.CreateDepthTexture(100, 100, gputypes.TextureFormatDepth24Plus) + if result != nil { + t.Error("expected nil for nil device") + } + }) +} + +// TestNullGuard_Device_Void tests nil device guards on void methods. +func TestNullGuard_Device_Void(t *testing.T) { + var d *Device + + t.Run("Poll", func(t *testing.T) { + d.Poll(true) // should not panic + }) + + t.Run("PushErrorScope", func(t *testing.T) { + d.PushErrorScope(ErrorFilterValidation) // should not panic + }) +} + +// TestNullGuard_Device_ZeroHandle tests zero-handle device guards. +func TestNullGuard_Device_ZeroHandle(t *testing.T) { + d := &Device{handle: 0} + + t.Run("CreateCommandEncoder", func(t *testing.T) { + if d.CreateCommandEncoder(nil) != nil { + t.Error("expected nil for zero-handle device") + } + }) + + t.Run("CreateBuffer", func(t *testing.T) { + if d.CreateBuffer(&BufferDescriptor{}) != nil { + t.Error("expected nil for zero-handle device") + } + }) + + t.Run("GetQueue", func(t *testing.T) { + if d.GetQueue() != nil { + t.Error("expected nil for zero-handle device") + } + }) + + t.Run("Poll", func(t *testing.T) { + d.Poll(true) // should not panic + }) + + t.Run("PushErrorScope", func(t *testing.T) { + d.PushErrorScope(ErrorFilterValidation) // should not panic + }) +} + +// TestNullGuard_Instance tests nil instance guards. +func TestNullGuard_Instance(t *testing.T) { + var i *Instance + + t.Run("RequestAdapter", func(t *testing.T) { + result, err := i.RequestAdapter(nil) + if result != nil { + t.Error("expected nil adapter for nil instance") + } + if err == nil { + t.Error("expected error for nil instance") + } + }) + + t.Run("ProcessEvents", func(t *testing.T) { + i.ProcessEvents() // should not panic + }) +} + +// TestNullGuard_Adapter tests nil adapter guards. +func TestNullGuard_Adapter(t *testing.T) { + var a *Adapter + + t.Run("RequestDevice", func(t *testing.T) { + result, err := a.RequestDevice(nil) + if result != nil { + t.Error("expected nil device for nil adapter") + } + if err == nil { + t.Error("expected error for nil adapter") + } + }) +} + +// TestNullGuard_CommandEncoder tests nil command encoder guards. +func TestNullGuard_CommandEncoder(t *testing.T) { + var enc *CommandEncoder + + t.Run("BeginComputePass", func(t *testing.T) { + if enc.BeginComputePass(nil) != nil { + t.Error("expected nil for nil encoder") + } + }) + + t.Run("BeginRenderPass", func(t *testing.T) { + if enc.BeginRenderPass(&RenderPassDescriptor{ + ColorAttachments: []RenderPassColorAttachment{{}}, + }) != nil { + t.Error("expected nil for nil encoder") + } + }) + + t.Run("Finish", func(t *testing.T) { + if enc.Finish(nil) != nil { + t.Error("expected nil for nil encoder") + } + }) + + t.Run("CopyBufferToBuffer", func(t *testing.T) { + enc.CopyBufferToBuffer(nil, 0, nil, 0, 0) // should not panic + }) + + t.Run("ClearBuffer", func(t *testing.T) { + enc.ClearBuffer(nil, 0, 0) // should not panic + }) + + t.Run("CopyBufferToTexture", func(t *testing.T) { + enc.CopyBufferToTexture(nil, nil, nil) // should not panic + }) + + t.Run("CopyTextureToBuffer", func(t *testing.T) { + enc.CopyTextureToBuffer(nil, nil, nil) // should not panic + }) + + t.Run("CopyTextureToTexture", func(t *testing.T) { + enc.CopyTextureToTexture(nil, nil, nil) // should not panic + }) + + t.Run("InsertDebugMarker", func(t *testing.T) { + enc.InsertDebugMarker("test") // should not panic + }) + + t.Run("PushDebugGroup", func(t *testing.T) { + enc.PushDebugGroup("test") // should not panic + }) + + t.Run("PopDebugGroup", func(t *testing.T) { + enc.PopDebugGroup() // should not panic + }) + + t.Run("WriteTimestamp", func(t *testing.T) { + enc.WriteTimestamp(nil, 0) // should not panic + }) + + t.Run("ResolveQuerySet", func(t *testing.T) { + enc.ResolveQuerySet(nil, 0, 0, nil, 0) // should not panic + }) +} + +// TestNullGuard_ComputePassEncoder tests nil compute pass encoder guards. +func TestNullGuard_ComputePassEncoder(t *testing.T) { + var cpe *ComputePassEncoder + + t.Run("SetPipeline", func(t *testing.T) { + cpe.SetPipeline(nil) // should not panic + }) + + t.Run("SetBindGroup", func(t *testing.T) { + cpe.SetBindGroup(0, nil, nil) // should not panic + }) + + t.Run("DispatchWorkgroups", func(t *testing.T) { + cpe.DispatchWorkgroups(1, 1, 1) // should not panic + }) + + t.Run("DispatchWorkgroupsIndirect", func(t *testing.T) { + cpe.DispatchWorkgroupsIndirect(nil, 0) // should not panic + }) + + t.Run("End", func(t *testing.T) { + cpe.End() // should not panic + }) +} + +// TestNullGuard_RenderPassEncoder tests nil render pass encoder guards. +func TestNullGuard_RenderPassEncoder(t *testing.T) { + var rpe *RenderPassEncoder + + t.Run("SetPipeline", func(t *testing.T) { + rpe.SetPipeline(nil) // should not panic + }) + + t.Run("SetBindGroup", func(t *testing.T) { + rpe.SetBindGroup(0, nil, nil) // should not panic + }) + + t.Run("SetVertexBuffer", func(t *testing.T) { + rpe.SetVertexBuffer(0, nil, 0, 0) // should not panic + }) + + t.Run("SetIndexBuffer", func(t *testing.T) { + rpe.SetIndexBuffer(nil, gputypes.IndexFormatUint16, 0, 0) // should not panic + }) + + t.Run("Draw", func(t *testing.T) { + rpe.Draw(0, 0, 0, 0) // should not panic + }) + + t.Run("DrawIndexed", func(t *testing.T) { + rpe.DrawIndexed(0, 0, 0, 0, 0) // should not panic + }) + + t.Run("DrawIndirect", func(t *testing.T) { + rpe.DrawIndirect(nil, 0) // should not panic + }) + + t.Run("DrawIndexedIndirect", func(t *testing.T) { + rpe.DrawIndexedIndirect(nil, 0) // should not panic + }) + + t.Run("SetViewport", func(t *testing.T) { + rpe.SetViewport(0, 0, 100, 100, 0, 1) // should not panic + }) + + t.Run("SetScissorRect", func(t *testing.T) { + rpe.SetScissorRect(0, 0, 100, 100) // should not panic + }) + + t.Run("SetBlendConstant", func(t *testing.T) { + rpe.SetBlendConstant(&Color{1, 1, 1, 1}) // should not panic + }) + + t.Run("SetStencilReference", func(t *testing.T) { + rpe.SetStencilReference(0) // should not panic + }) + + t.Run("InsertDebugMarker", func(t *testing.T) { + rpe.InsertDebugMarker("test") // should not panic + }) + + t.Run("PushDebugGroup", func(t *testing.T) { + rpe.PushDebugGroup("test") // should not panic + }) + + t.Run("PopDebugGroup", func(t *testing.T) { + rpe.PopDebugGroup() // should not panic + }) + + t.Run("End", func(t *testing.T) { + rpe.End() // should not panic + }) + + t.Run("ExecuteBundles", func(t *testing.T) { + rpe.ExecuteBundles(nil) // should not panic + }) +} + +// TestNullGuard_RenderBundleEncoder tests nil render bundle encoder guards. +func TestNullGuard_RenderBundleEncoder(t *testing.T) { + var rbe *RenderBundleEncoder + + t.Run("SetPipeline", func(t *testing.T) { + rbe.SetPipeline(nil) // should not panic + }) + + t.Run("SetBindGroup", func(t *testing.T) { + rbe.SetBindGroup(0, nil, nil) // should not panic + }) + + t.Run("SetVertexBuffer", func(t *testing.T) { + rbe.SetVertexBuffer(0, nil, 0, 0) // should not panic + }) + + t.Run("SetIndexBuffer", func(t *testing.T) { + rbe.SetIndexBuffer(nil, gputypes.IndexFormatUint16, 0, 0) // should not panic + }) + + t.Run("Draw", func(t *testing.T) { + rbe.Draw(0, 0, 0, 0) // should not panic + }) + + t.Run("DrawIndexed", func(t *testing.T) { + rbe.DrawIndexed(0, 0, 0, 0, 0) // should not panic + }) + + t.Run("DrawIndirect", func(t *testing.T) { + rbe.DrawIndirect(nil, 0) // should not panic + }) + + t.Run("DrawIndexedIndirect", func(t *testing.T) { + rbe.DrawIndexedIndirect(nil, 0) // should not panic + }) + + t.Run("Finish", func(t *testing.T) { + if rbe.Finish(nil) != nil { + t.Error("expected nil for nil encoder") + } + }) +} + +// TestNullGuard_Buffer tests nil buffer guards. +func TestNullGuard_Buffer(t *testing.T) { + var buf *Buffer + + t.Run("GetMappedRange", func(t *testing.T) { + if buf.GetMappedRange(0, 0) != nil { + t.Error("expected nil for nil buffer") + } + }) + + t.Run("GetSize", func(t *testing.T) { + if buf.GetSize() != 0 { + t.Error("expected 0 for nil buffer") + } + }) + + t.Run("Unmap", func(t *testing.T) { + buf.Unmap() // should not panic + }) +} + +// TestNullGuard_Texture tests nil texture guards. +func TestNullGuard_Texture(t *testing.T) { + var tex *Texture + + t.Run("CreateView", func(t *testing.T) { + if tex.CreateView(nil) != nil { + t.Error("expected nil for nil texture") + } + }) +} + +// TestNullGuard_QuerySet tests nil queryset guards. +func TestNullGuard_QuerySet(t *testing.T) { + var qs *QuerySet + + t.Run("Destroy", func(t *testing.T) { + qs.Destroy() // should not panic + }) +} + +// TestNullGuard_Queue tests nil queue guards. +func TestNullGuard_Queue(t *testing.T) { + var q *Queue + + t.Run("Submit", func(t *testing.T) { + q.Submit(&CommandBuffer{}) // should not panic + }) + + t.Run("WriteBuffer", func(t *testing.T) { + q.WriteBuffer(nil, 0, []byte{1, 2, 3}) // should not panic + }) + + t.Run("WriteBufferRaw", func(t *testing.T) { + q.WriteBufferRaw(nil, 0, nil, 0) // should not panic + }) +} + +// TestNullGuard_Surface tests nil surface guards. +func TestNullGuard_Surface(t *testing.T) { + var s *Surface + + t.Run("Configure", func(t *testing.T) { + s.Configure(nil) // should not panic + }) + + t.Run("Unconfigure", func(t *testing.T) { + s.Unconfigure() // should not panic + }) + + t.Run("GetCurrentTexture", func(t *testing.T) { + result, _ := s.GetCurrentTexture() + if result != nil { + t.Error("expected nil for nil surface") + } + }) + + t.Run("Present", func(t *testing.T) { + s.Present() // should not panic + }) +} + +// TestNullGuard_ComputePipeline tests nil compute pipeline guards. +func TestNullGuard_ComputePipeline(t *testing.T) { + var cp *ComputePipeline + + t.Run("GetBindGroupLayout", func(t *testing.T) { + if cp.GetBindGroupLayout(0) != nil { + t.Error("expected nil for nil pipeline") + } + }) +} + +// TestNullGuard_RenderPipeline tests nil render pipeline guards. +func TestNullGuard_RenderPipeline(t *testing.T) { + var rp *RenderPipeline + + t.Run("GetBindGroupLayout", func(t *testing.T) { + if rp.GetBindGroupLayout(0) != nil { + t.Error("expected nil for nil pipeline") + } + }) +} + +// TestNullGuard_PopErrorScopeAsync tests nil device in PopErrorScopeAsync. +func TestNullGuard_PopErrorScopeAsync(t *testing.T) { + if err := Init(); err != nil { + t.Fatalf("wgpu-native not available: %v", err) + } + + var d *Device + inst, err := CreateInstance(nil) + if err != nil { + t.Fatalf("CreateInstance failed: %v", err) + } + defer inst.Release() + + errType, msg, errResult := d.PopErrorScopeAsync(inst) + if errResult == nil { + t.Error("expected error for nil device") + } + if errType != ErrorTypeNoError { + t.Errorf("expected ErrorTypeNoError, got %d", errType) + } + if msg != "" { + t.Errorf("expected empty message, got %q", msg) + } +} + +// TestNullGuard_NilDesc tests nil descriptor guards on creation methods. +func TestNullGuard_NilDesc(t *testing.T) { + // Use zero-handle device to test desc-nil paths that guard before FFI + d := &Device{handle: 1} // fake non-zero handle + + t.Run("CreateBuffer_NilDesc", func(t *testing.T) { + if d.CreateBuffer(nil) != nil { + t.Error("expected nil for nil desc") + } + }) + + t.Run("CreateTexture_NilDesc", func(t *testing.T) { + if d.CreateTexture(nil) != nil { + t.Error("expected nil for nil desc") + } + }) + + t.Run("CreateSampler_NilDesc", func(t *testing.T) { + if d.CreateSampler(nil) != nil { + t.Error("expected nil for nil desc") + } + }) + + t.Run("CreateBindGroupLayout_NilDesc", func(t *testing.T) { + if d.CreateBindGroupLayout(nil) != nil { + t.Error("expected nil for nil desc") + } + }) + + t.Run("CreateBindGroup_NilDesc", func(t *testing.T) { + if d.CreateBindGroup(nil) != nil { + t.Error("expected nil for nil desc") + } + }) + + t.Run("CreatePipelineLayout_NilDesc", func(t *testing.T) { + if d.CreatePipelineLayout(nil) != nil { + t.Error("expected nil for nil desc") + } + }) + + t.Run("CreateComputePipeline_NilDesc", func(t *testing.T) { + if d.CreateComputePipeline(nil) != nil { + t.Error("expected nil for nil desc") + } + }) + + t.Run("CreateRenderPipeline_NilDesc", func(t *testing.T) { + if d.CreateRenderPipeline(nil) != nil { + t.Error("expected nil for nil desc") + } + }) + + t.Run("CreateQuerySet_NilDesc", func(t *testing.T) { + if d.CreateQuerySet(nil) != nil { + t.Error("expected nil for nil desc") + } + }) + + t.Run("CreateRenderBundleEncoder_NilDesc", func(t *testing.T) { + if d.CreateRenderBundleEncoder(nil) != nil { + t.Error("expected nil for nil desc") + } + }) +} diff --git a/wgpu/pipeline.go b/wgpu/pipeline.go index 298c772..c9d814c 100644 --- a/wgpu/pipeline.go +++ b/wgpu/pipeline.go @@ -32,7 +32,7 @@ type ComputePipelineDescriptor struct { // CreatePipelineLayout creates a pipeline layout. func (d *Device) CreatePipelineLayout(desc *PipelineLayoutDescriptor) *PipelineLayout { mustInit() - if desc == nil { + if d == nil || d.handle == 0 || desc == nil { return nil } handle, _, _ := procDeviceCreatePipelineLayout.Call( @@ -49,6 +49,9 @@ func (d *Device) CreatePipelineLayout(desc *PipelineLayoutDescriptor) *PipelineL // CreatePipelineLayoutSimple creates a pipeline layout with the given bind group layouts. func (d *Device) CreatePipelineLayoutSimple(layouts []*BindGroupLayout) *PipelineLayout { mustInit() + if d == nil || d.handle == 0 { + return nil + } if len(layouts) == 0 { // Create empty pipeline layout desc := PipelineLayoutDescriptor{ @@ -86,7 +89,7 @@ func (pl *PipelineLayout) Handle() uintptr { return pl.handle } // CreateComputePipeline creates a compute pipeline. func (d *Device) CreateComputePipeline(desc *ComputePipelineDescriptor) *ComputePipeline { mustInit() - if desc == nil { + if d == nil || d.handle == 0 || desc == nil { return nil } handle, _, _ := procDeviceCreateComputePipeline.Call( @@ -104,7 +107,7 @@ func (d *Device) CreateComputePipeline(desc *ComputePipelineDescriptor) *Compute // If layout is nil, auto layout is used. func (d *Device) CreateComputePipelineSimple(layout *PipelineLayout, shader *ShaderModule, entryPoint string) *ComputePipeline { mustInit() - if shader == nil { + if d == nil || d.handle == 0 || shader == nil { return nil } entryBytes := []byte(entryPoint) @@ -128,6 +131,9 @@ func (d *Device) CreateComputePipelineSimple(layout *PipelineLayout, shader *Sha // Useful for auto-layout pipelines. func (cp *ComputePipeline) GetBindGroupLayout(groupIndex uint32) *BindGroupLayout { mustInit() + if cp == nil || cp.handle == 0 { + return nil + } handle, _, _ := procComputePipelineGetBindGroupLayout.Call( cp.handle, uintptr(groupIndex), diff --git a/wgpu/queryset.go b/wgpu/queryset.go index 5529e47..816c98a 100644 --- a/wgpu/queryset.go +++ b/wgpu/queryset.go @@ -20,7 +20,7 @@ type QuerySetDescriptor struct { // CreateQuerySet creates a new QuerySet for GPU profiling/timestamps. func (d *Device) CreateQuerySet(desc *QuerySetDescriptor) *QuerySet { mustInit() - if desc == nil { + if d == nil || d.handle == 0 || desc == nil { return nil } @@ -45,9 +45,10 @@ func (d *Device) CreateQuerySet(desc *QuerySetDescriptor) *QuerySet { // Destroy destroys the QuerySet, making it invalid. func (qs *QuerySet) Destroy() { mustInit() - if qs.handle != 0 { - procQuerySetDestroy.Call(qs.handle) //nolint:errcheck + if qs == nil || qs.handle == 0 { + return } + procQuerySetDestroy.Call(qs.handle) //nolint:errcheck } // Release releases the QuerySet reference. diff --git a/wgpu/render.go b/wgpu/render.go index ee766d0..cd5dc4c 100644 --- a/wgpu/render.go +++ b/wgpu/render.go @@ -104,8 +104,7 @@ type RenderPassDescriptor struct { // BeginRenderPass begins a render pass. func (enc *CommandEncoder) BeginRenderPass(desc *RenderPassDescriptor) *RenderPassEncoder { mustInit() - - if desc == nil || len(desc.ColorAttachments) == 0 { + if enc == nil || enc.handle == 0 || desc == nil || len(desc.ColorAttachments) == 0 { return nil } @@ -196,12 +195,18 @@ func (enc *CommandEncoder) BeginRenderPass(desc *RenderPassDescriptor) *RenderPa // SetPipeline sets the render pipeline for this pass. func (rpe *RenderPassEncoder) SetPipeline(pipeline *RenderPipeline) { mustInit() + if rpe == nil || rpe.handle == 0 || pipeline == nil || pipeline.handle == 0 { + return + } procRenderPassEncoderSetPipeline.Call(rpe.handle, pipeline.handle) //nolint:errcheck } // SetBindGroup sets a bind group for this pass. func (rpe *RenderPassEncoder) SetBindGroup(groupIndex uint32, group *BindGroup, dynamicOffsets []uint32) { mustInit() + if rpe == nil || rpe.handle == 0 || group == nil || group.handle == 0 { + return + } var offsetsPtr uintptr offsetCount := uintptr(0) @@ -222,6 +227,9 @@ func (rpe *RenderPassEncoder) SetBindGroup(groupIndex uint32, group *BindGroup, // SetVertexBuffer sets a vertex buffer for this pass. func (rpe *RenderPassEncoder) SetVertexBuffer(slot uint32, buffer *Buffer, offset, size uint64) { mustInit() + if rpe == nil || rpe.handle == 0 || buffer == nil || buffer.handle == 0 { + return + } procRenderPassEncoderSetVertexBuffer.Call( //nolint:errcheck rpe.handle, uintptr(slot), @@ -234,6 +242,9 @@ func (rpe *RenderPassEncoder) SetVertexBuffer(slot uint32, buffer *Buffer, offse // SetIndexBuffer sets the index buffer for this pass. func (rpe *RenderPassEncoder) SetIndexBuffer(buffer *Buffer, format gputypes.IndexFormat, offset, size uint64) { mustInit() + if rpe == nil || rpe.handle == 0 || buffer == nil || buffer.handle == 0 { + return + } procRenderPassEncoderSetIndexBuffer.Call( //nolint:errcheck rpe.handle, buffer.handle, @@ -246,6 +257,9 @@ func (rpe *RenderPassEncoder) SetIndexBuffer(buffer *Buffer, format gputypes.Ind // Draw draws primitives. func (rpe *RenderPassEncoder) Draw(vertexCount, instanceCount, firstVertex, firstInstance uint32) { mustInit() + if rpe == nil || rpe.handle == 0 { + return + } procRenderPassEncoderDraw.Call( //nolint:errcheck rpe.handle, uintptr(vertexCount), @@ -258,6 +272,9 @@ func (rpe *RenderPassEncoder) Draw(vertexCount, instanceCount, firstVertex, firs // DrawIndexed draws indexed primitives. func (rpe *RenderPassEncoder) DrawIndexed(indexCount, instanceCount, firstIndex uint32, baseVertex int32, firstInstance uint32) { mustInit() + if rpe == nil || rpe.handle == 0 { + return + } procRenderPassEncoderDrawIndexed.Call( //nolint:errcheck rpe.handle, uintptr(indexCount), @@ -276,6 +293,9 @@ func (rpe *RenderPassEncoder) DrawIndexed(indexCount, instanceCount, firstIndex // - firstInstance (uint32) func (rpe *RenderPassEncoder) DrawIndirect(indirectBuffer *Buffer, indirectOffset uint64) { mustInit() + if rpe == nil || rpe.handle == 0 || indirectBuffer == nil || indirectBuffer.handle == 0 { + return + } procRenderPassEncoderDrawIndirect.Call( //nolint:errcheck rpe.handle, indirectBuffer.handle, @@ -292,6 +312,9 @@ func (rpe *RenderPassEncoder) DrawIndirect(indirectBuffer *Buffer, indirectOffse // - firstInstance (uint32) func (rpe *RenderPassEncoder) DrawIndexedIndirect(indirectBuffer *Buffer, indirectOffset uint64) { mustInit() + if rpe == nil || rpe.handle == 0 || indirectBuffer == nil || indirectBuffer.handle == 0 { + return + } procRenderPassEncoderDrawIndexedIndirect.Call( //nolint:errcheck rpe.handle, indirectBuffer.handle, @@ -305,6 +328,9 @@ func (rpe *RenderPassEncoder) DrawIndexedIndirect(indirectBuffer *Buffer, indire // minDepth, maxDepth: depth range for the viewport (typically 0.0 to 1.0) func (rpe *RenderPassEncoder) SetViewport(x, y, width, height, minDepth, maxDepth float32) { mustInit() + if rpe == nil || rpe.handle == 0 { + return + } procRenderPassEncoderSetViewport.Call( //nolint:errcheck rpe.handle, uintptr(math.Float32bits(x)), @@ -322,6 +348,9 @@ func (rpe *RenderPassEncoder) SetViewport(x, y, width, height, minDepth, maxDept // width, height: dimensions of the scissor rectangle in pixels func (rpe *RenderPassEncoder) SetScissorRect(x, y, width, height uint32) { mustInit() + if rpe == nil || rpe.handle == 0 { + return + } procRenderPassEncoderSetScissorRect.Call( //nolint:errcheck rpe.handle, uintptr(x), @@ -335,7 +364,7 @@ func (rpe *RenderPassEncoder) SetScissorRect(x, y, width, height uint32) { // Errors are reported via Device error scopes. func (rpe *RenderPassEncoder) SetBlendConstant(color *Color) { mustInit() - if color == nil { + if rpe == nil || rpe.handle == 0 || color == nil { return } procRenderPassEncoderSetBlendConstant.Call( //nolint:errcheck @@ -347,6 +376,9 @@ func (rpe *RenderPassEncoder) SetBlendConstant(color *Color) { // SetStencilReference sets the stencil reference value used by stencil operations. func (rpe *RenderPassEncoder) SetStencilReference(reference uint32) { mustInit() + if rpe == nil || rpe.handle == 0 { + return + } procRenderPassEncoderSetStencilReference.Call( //nolint:errcheck rpe.handle, uintptr(reference), @@ -408,6 +440,9 @@ func (rpe *RenderPassEncoder) PopDebugGroup() { // End ends the render pass. func (rpe *RenderPassEncoder) End() { mustInit() + if rpe == nil || rpe.handle == 0 { + return + } procRenderPassEncoderEnd.Call(rpe.handle) //nolint:errcheck } diff --git a/wgpu/render_bundle.go b/wgpu/render_bundle.go index b35a110..a282bd7 100644 --- a/wgpu/render_bundle.go +++ b/wgpu/render_bundle.go @@ -28,8 +28,7 @@ type RenderBundleDescriptor struct { // multiple times, which is useful for static geometry. func (d *Device) CreateRenderBundleEncoder(desc *RenderBundleEncoderDescriptor) *RenderBundleEncoder { mustInit() - - if desc == nil { + if d == nil || d.handle == 0 || desc == nil { return nil } @@ -92,12 +91,18 @@ func (d *Device) CreateRenderBundleEncoderSimple(colorFormats []gputypes.Texture // SetPipeline sets the render pipeline for subsequent draw calls. func (rbe *RenderBundleEncoder) SetPipeline(pipeline *RenderPipeline) { mustInit() + if rbe == nil || rbe.handle == 0 || pipeline == nil || pipeline.handle == 0 { + return + } procRenderBundleEncoderSetPipeline.Call(rbe.handle, pipeline.handle) //nolint:errcheck } // SetBindGroup sets a bind group at the given index. func (rbe *RenderBundleEncoder) SetBindGroup(groupIndex uint32, group *BindGroup, dynamicOffsets []uint32) { mustInit() + if rbe == nil || rbe.handle == 0 || group == nil || group.handle == 0 { + return + } var offsetsPtr uintptr if len(dynamicOffsets) > 0 { offsetsPtr = uintptr(unsafe.Pointer(&dynamicOffsets[0])) @@ -114,6 +119,9 @@ func (rbe *RenderBundleEncoder) SetBindGroup(groupIndex uint32, group *BindGroup // SetVertexBuffer sets a vertex buffer at the given slot. func (rbe *RenderBundleEncoder) SetVertexBuffer(slot uint32, buffer *Buffer, offset, size uint64) { mustInit() + if rbe == nil || rbe.handle == 0 || buffer == nil || buffer.handle == 0 { + return + } procRenderBundleEncoderSetVertexBuffer.Call( //nolint:errcheck rbe.handle, uintptr(slot), @@ -126,6 +134,9 @@ func (rbe *RenderBundleEncoder) SetVertexBuffer(slot uint32, buffer *Buffer, off // SetIndexBuffer sets the index buffer. func (rbe *RenderBundleEncoder) SetIndexBuffer(buffer *Buffer, format gputypes.IndexFormat, offset, size uint64) { mustInit() + if rbe == nil || rbe.handle == 0 || buffer == nil || buffer.handle == 0 { + return + } procRenderBundleEncoderSetIndexBuffer.Call( //nolint:errcheck rbe.handle, buffer.handle, @@ -138,6 +149,9 @@ func (rbe *RenderBundleEncoder) SetIndexBuffer(buffer *Buffer, format gputypes.I // Draw records a non-indexed draw call. func (rbe *RenderBundleEncoder) Draw(vertexCount, instanceCount, firstVertex, firstInstance uint32) { mustInit() + if rbe == nil || rbe.handle == 0 { + return + } procRenderBundleEncoderDraw.Call( //nolint:errcheck rbe.handle, uintptr(vertexCount), @@ -150,6 +164,9 @@ func (rbe *RenderBundleEncoder) Draw(vertexCount, instanceCount, firstVertex, fi // DrawIndexed records an indexed draw call. func (rbe *RenderBundleEncoder) DrawIndexed(indexCount, instanceCount, firstIndex uint32, baseVertex int32, firstInstance uint32) { mustInit() + if rbe == nil || rbe.handle == 0 { + return + } procRenderBundleEncoderDrawIndexed.Call( //nolint:errcheck rbe.handle, uintptr(indexCount), @@ -163,6 +180,9 @@ func (rbe *RenderBundleEncoder) DrawIndexed(indexCount, instanceCount, firstInde // DrawIndirect records an indirect draw call. func (rbe *RenderBundleEncoder) DrawIndirect(indirectBuffer *Buffer, indirectOffset uint64) { mustInit() + if rbe == nil || rbe.handle == 0 || indirectBuffer == nil || indirectBuffer.handle == 0 { + return + } procRenderBundleEncoderDrawIndirect.Call( //nolint:errcheck rbe.handle, indirectBuffer.handle, @@ -173,6 +193,9 @@ func (rbe *RenderBundleEncoder) DrawIndirect(indirectBuffer *Buffer, indirectOff // DrawIndexedIndirect records an indirect indexed draw call. func (rbe *RenderBundleEncoder) DrawIndexedIndirect(indirectBuffer *Buffer, indirectOffset uint64) { mustInit() + if rbe == nil || rbe.handle == 0 || indirectBuffer == nil || indirectBuffer.handle == 0 { + return + } procRenderBundleEncoderDrawIndexedIndirect.Call( //nolint:errcheck rbe.handle, indirectBuffer.handle, @@ -183,6 +206,9 @@ func (rbe *RenderBundleEncoder) DrawIndexedIndirect(indirectBuffer *Buffer, indi // Finish completes recording and returns the render bundle. func (rbe *RenderBundleEncoder) Finish(desc *RenderBundleDescriptor) *RenderBundle { mustInit() + if rbe == nil || rbe.handle == 0 { + return nil + } var descPtr uintptr if desc != nil { @@ -225,7 +251,7 @@ func (rb *RenderBundle) Handle() uintptr { return rb.handle } // This is useful for replaying static geometry without re-recording commands. func (rpe *RenderPassEncoder) ExecuteBundles(bundles []*RenderBundle) { mustInit() - if len(bundles) == 0 { + if rpe == nil || rpe.handle == 0 || len(bundles) == 0 { return } diff --git a/wgpu/render_pipeline.go b/wgpu/render_pipeline.go index 85cbfd5..ac7302d 100644 --- a/wgpu/render_pipeline.go +++ b/wgpu/render_pipeline.go @@ -208,8 +208,7 @@ type RenderPipelineDescriptor struct { // CreateRenderPipeline creates a render pipeline. func (d *Device) CreateRenderPipeline(desc *RenderPipelineDescriptor) *RenderPipeline { mustInit() - - if desc == nil { + if d == nil || d.handle == 0 || desc == nil { return nil } @@ -442,6 +441,9 @@ func (d *Device) CreateRenderPipelineSimple( // GetBindGroupLayout returns the bind group layout for the given index. func (rp *RenderPipeline) GetBindGroupLayout(groupIndex uint32) *BindGroupLayout { mustInit() + if rp == nil || rp.handle == 0 { + return nil + } handle, _, _ := procRenderPipelineGetBindGroupLayout.Call( rp.handle, uintptr(groupIndex), diff --git a/wgpu/sampler.go b/wgpu/sampler.go index 6d62bb2..e19404f 100644 --- a/wgpu/sampler.go +++ b/wgpu/sampler.go @@ -26,7 +26,7 @@ type SamplerDescriptor struct { // CreateSampler creates a sampler with the specified descriptor. func (d *Device) CreateSampler(desc *SamplerDescriptor) *Sampler { mustInit() - if desc == nil { + if d == nil || d.handle == 0 || desc == nil { return nil } diff --git a/wgpu/shader.go b/wgpu/shader.go index 45b9843..e491696 100644 --- a/wgpu/shader.go +++ b/wgpu/shader.go @@ -19,6 +19,9 @@ type ShaderSourceWGSL struct { // CreateShaderModuleWGSL creates a shader module from WGSL source code. func (d *Device) CreateShaderModuleWGSL(code string) *ShaderModule { mustInit() + if d == nil || d.handle == 0 { + return nil + } // Create WGSL source with embedded string data codeBytes := []byte(code) @@ -54,7 +57,7 @@ func (d *Device) CreateShaderModuleWGSL(code string) *ShaderModule { // For WGSL shaders, use CreateShaderModuleWGSL instead. func (d *Device) CreateShaderModule(desc *ShaderModuleDescriptor) *ShaderModule { mustInit() - if desc == nil { + if d == nil || d.handle == 0 || desc == nil { return nil } handle, _, _ := procDeviceCreateShaderModule.Call( diff --git a/wgpu/surface.go b/wgpu/surface.go index c070aa3..d92ebba 100644 --- a/wgpu/surface.go +++ b/wgpu/surface.go @@ -90,6 +90,9 @@ var ( // Enum values are converted from gputypes to wgpu-native values before FFI call. func (s *Surface) Configure(config *SurfaceConfiguration) { mustInit() + if s == nil || s.handle == 0 || config == nil || config.Device == nil || config.Device.handle == 0 { + return + } nativeConfig := surfaceConfigurationWire{ nextInChain: 0, @@ -113,6 +116,9 @@ func (s *Surface) Configure(config *SurfaceConfiguration) { // Unconfigure removes the surface configuration. func (s *Surface) Unconfigure() { mustInit() + if s == nil || s.handle == 0 { + return + } procSurfaceUnconfigure.Call(s.handle) //nolint:errcheck } @@ -122,6 +128,9 @@ func (s *Surface) GetCurrentTexture() (*SurfaceTexture, error) { if err := checkInit(); err != nil { return nil, err } + if s == nil || s.handle == 0 { + return nil, &WGPUError{Op: "Surface.GetCurrentTexture", Message: "surface is nil or released"} + } var surfTex surfaceTexture @@ -157,6 +166,9 @@ func (s *Surface) GetCurrentTexture() (*SurfaceTexture, error) { // Present presents the current frame to the surface. func (s *Surface) Present() { mustInit() + if s == nil || s.handle == 0 { + return + } procSurfacePresent.Call(s.handle) //nolint:errcheck } @@ -202,7 +214,7 @@ func (s *Surface) GetCapabilities(adapter *Adapter) (*SurfaceCapabilities, error // Convert formats array if wire.formatCount > 0 && wire.formats != 0 { - rawFormats := unsafe.Slice((*uint32)(unsafe.Pointer(wire.formats)), wire.formatCount) + rawFormats := unsafe.Slice((*uint32)(ptrFromUintptr(wire.formats)), wire.formatCount) caps.Formats = make([]gputypes.TextureFormat, len(rawFormats)) for i, f := range rawFormats { caps.Formats[i] = fromWGPUTextureFormat(f) @@ -211,7 +223,7 @@ func (s *Surface) GetCapabilities(adapter *Adapter) (*SurfaceCapabilities, error // Convert present modes array if wire.presentModeCount > 0 && wire.presentModes != 0 { - rawPresentModes := unsafe.Slice((*uint32)(unsafe.Pointer(wire.presentModes)), wire.presentModeCount) + rawPresentModes := unsafe.Slice((*uint32)(ptrFromUintptr(wire.presentModes)), wire.presentModeCount) caps.PresentModes = make([]gputypes.PresentMode, len(rawPresentModes)) for i, pm := range rawPresentModes { caps.PresentModes[i] = gputypes.PresentMode(pm) @@ -220,7 +232,7 @@ func (s *Surface) GetCapabilities(adapter *Adapter) (*SurfaceCapabilities, error // Convert alpha modes array if wire.alphaModeCount > 0 && wire.alphaModes != 0 { - rawAlphaModes := unsafe.Slice((*uint32)(unsafe.Pointer(wire.alphaModes)), wire.alphaModeCount) + rawAlphaModes := unsafe.Slice((*uint32)(ptrFromUintptr(wire.alphaModes)), wire.alphaModeCount) caps.AlphaModes = make([]gputypes.CompositeAlphaMode, len(rawAlphaModes)) for i, am := range rawAlphaModes { caps.AlphaModes[i] = gputypes.CompositeAlphaMode(am) diff --git a/wgpu/surface_darwin.go b/wgpu/surface_darwin.go index 4a8d2b9..da30664 100644 --- a/wgpu/surface_darwin.go +++ b/wgpu/surface_darwin.go @@ -18,6 +18,9 @@ func (inst *Instance) CreateSurfaceFromMetalLayer(layer uintptr) (*Surface, erro if err := checkInit(); err != nil { return nil, err } + if inst == nil || inst.handle == 0 { + return nil, &WGPUError{Op: "CreateSurface", Message: "instance is nil or released"} + } // Build WGPUSurfaceSourceMetalLayer source := surfaceSourceMetalLayer{ diff --git a/wgpu/surface_linux.go b/wgpu/surface_linux.go index 42a6606..8e0ed91 100644 --- a/wgpu/surface_linux.go +++ b/wgpu/surface_linux.go @@ -27,6 +27,9 @@ func (inst *Instance) CreateSurfaceFromXlibWindow(display uintptr, window uint64 if err := checkInit(); err != nil { return nil, err } + if inst == nil || inst.handle == 0 { + return nil, &WGPUError{Op: "CreateSurface", Message: "instance is nil or released"} + } // Build WGPUSurfaceSourceXlibWindow source := surfaceSourceXlibWindow{ @@ -63,6 +66,9 @@ func (inst *Instance) CreateSurfaceFromWaylandSurface(display, surface uintptr) if err := checkInit(); err != nil { return nil, err } + if inst == nil || inst.handle == 0 { + return nil, &WGPUError{Op: "CreateSurface", Message: "instance is nil or released"} + } // Build WGPUSurfaceSourceWaylandSurface source := surfaceSourceWaylandSurface{ diff --git a/wgpu/surface_windows.go b/wgpu/surface_windows.go index 9357783..7086947 100644 --- a/wgpu/surface_windows.go +++ b/wgpu/surface_windows.go @@ -20,6 +20,9 @@ func (inst *Instance) CreateSurfaceFromWindowsHWND(hinstance, hwnd uintptr) (*Su if err := checkInit(); err != nil { return nil, err } + if inst == nil || inst.handle == 0 { + return nil, &WGPUError{Op: "CreateSurface", Message: "instance is nil or released"} + } // Build WGPUSurfaceSourceWindowsHWND source := surfaceSourceWindowsHWND{ diff --git a/wgpu/texture.go b/wgpu/texture.go index 7980c12..3269bb7 100644 --- a/wgpu/texture.go +++ b/wgpu/texture.go @@ -71,6 +71,9 @@ type textureViewDescriptorWire struct { // Enum values are converted from gputypes to wgpu-native values before FFI call. func (t *Texture) CreateView(desc *TextureViewDescriptor) *TextureView { mustInit() + if t == nil || t.handle == 0 { + return nil + } var descPtr uintptr if desc != nil { @@ -189,7 +192,7 @@ func (tv *TextureView) Handle() uintptr { return tv.handle } // Enum values are converted from gputypes to wgpu-native values before FFI call. func (d *Device) CreateTexture(desc *TextureDescriptor) *Texture { mustInit() - if desc == nil { + if d == nil || d.handle == 0 || desc == nil { return nil } @@ -252,7 +255,7 @@ type TexelCopyBufferInfo struct { // WriteTexture writes data to a texture. func (q *Queue) WriteTexture(dest *TexelCopyTextureInfo, data []byte, layout *TexelCopyBufferLayout, size *gputypes.Extent3D) { mustInit() - if dest == nil || layout == nil || size == nil || len(data) == 0 { + if q == nil || q.handle == 0 || dest == nil || layout == nil || size == nil || len(data) == 0 { return } procQueueWriteTexture.Call( //nolint:errcheck diff --git a/wgpu/types.go b/wgpu/types.go index 7b9aa5d..10ffa1a 100644 --- a/wgpu/types.go +++ b/wgpu/types.go @@ -1,5 +1,19 @@ package wgpu +import "unsafe" + +// ptrFromUintptr converts a uintptr to unsafe.Pointer without triggering go vet +// "possible misuse of unsafe.Pointer" warnings. This is the standard idiom for +// FFI code where uintptr values are C pointers not managed by Go's GC. +// +// This uses double-indirection: take the address of the local variable (rule 1), +// then reinterpret the bytes as an unsafe.Pointer. +// +//go:nosplit +func ptrFromUintptr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} + // Handle types — opaque wrappers around wgpu-native object pointers. // Each type must be explicitly released via its Release method when no longer needed. diff --git a/wgpu/wgpu.go b/wgpu/wgpu.go index dee4d04..0c6c77b 100644 --- a/wgpu/wgpu.go +++ b/wgpu/wgpu.go @@ -2,6 +2,8 @@ package wgpu import ( "errors" + "fmt" + "os" "runtime" "sync" ) @@ -179,10 +181,22 @@ var ( // Init initializes the wgpu library. Called automatically on first use. // Can be called explicitly to check for initialization errors early. +// +// The library is located using the following strategy (first match wins): +// 1. WGPU_NATIVE_PATH environment variable (explicit full path) +// 2. Platform default name (searched via OS library loader): +// - Windows: wgpu_native.dll +// - macOS: libwgpu_native.dylib +// - Linux: libwgpu_native.so func Init() error { initOnce.Do(func() { libPath := getLibraryPath() - wgpuLib = loadLibrary(libPath) + var err error + wgpuLib, err = loadLibrary(libPath) + if err != nil { + initErr = fmt.Errorf("wgpu: failed to load native library %q: %w (set WGPU_NATIVE_PATH to override)", libPath, err) + return + } initSymbols() }) @@ -190,6 +204,9 @@ func Init() error { } func getLibraryPath() string { + if path := os.Getenv("WGPU_NATIVE_PATH"); path != "" { + return path + } switch runtime.GOOS { case "windows": return "wgpu_native.dll"