Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
nx, ny, nz, nc = texture_size
nbytes = ctypes.sizeof(data1)
bpp = nbytes // (nx * ny * nz) # bytes per pixel
if can_use_vulkan_sdk:
pyshader.dev.validate(compute_shader)
device = get_default_device()
cshader = device.create_shader_module(code=compute_shader)
# Create textures and views
texture1 = device.create_texture(
size=(nx, ny, nz),
dimension=texture_dim,
format=texture_format,
usage=wgpu.TextureUsage.STORAGE | wgpu.TextureUsage.COPY_DST,
)
texture2 = device.create_texture(
size=(nx, ny, nz),
dimension=texture_dim,
format=texture_format,
usage=wgpu.TextureUsage.STORAGE | wgpu.TextureUsage.COPY_SRC,
)
texture_view1 = texture1.create_view()
texture_view2 = texture2.create_view()
# Determine texture component type from the format
if texture_format.endswith(("norm", "float")):
texture_component_type = wgpu.TextureComponentType.float
elif "uint" in texture_format:
texture_component_type = wgpu.TextureComponentType.uint
else:
device = wgpu.utils.get_default_device()
nx, ny, nz = 100, 1, 1
data0 = (ctypes.c_float * 100)(*[random.random() for i in range(nx * ny * nz)])
data1 = (ctypes.c_float * 100)()
nbytes = ctypes.sizeof(data1)
bpp = nbytes // (nx * ny * nz)
texture_format = wgpu.TextureFormat.r32float
texture_dim = wgpu.TextureDimension.d1
# Create buffers and textures
tex3 = device.create_texture(
size=(nx, ny, nz),
dimension=texture_dim,
format=texture_format,
usage=wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST,
)
buf4 = device.create_buffer(
size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.MAP_READ
)
for i in range(len(data1)):
data1[i] = data0[i]
# Upload from CPU to texture
command_encoder = device.create_command_encoder()
device.default_queue.write_texture(
{"texture": tex3},
data1,
{"bytes_per_row": bpp * nx, "rows_per_image": ny},
(nx, ny, nz),
)
vbos = vbos or []
vbo_views = vbo_views or []
# Select texture format. The srgb norm maps to the srgb colorspace which
# appears to be the default for render pipelines https://en.wikipedia.org/wiki/SRGB
texture_format = wgpu.TextureFormat.rgba8unorm # rgba8unorm or bgra8unorm_srgb
# Create texture to render to
nx, ny, bpp = size[0], size[1], 4
nbytes = nx * ny * bpp
texture = device.create_texture(
size=(nx, ny, 1),
dimension=wgpu.TextureDimension.d2,
format=texture_format,
usage=wgpu.TextureUsage.OUTPUT_ATTACHMENT | wgpu.TextureUsage.COPY_SRC,
)
current_texture_view = texture.create_view()
# Also a buffer to read the data to CPU
buffer = device.create_buffer(
size=nbytes, usage=wgpu.BufferUsage.MAP_READ | wgpu.BufferUsage.COPY_DST
)
vshader = device.create_shader_module(code=vertex_shader)
fshader = device.create_shader_module(code=fragment_shader)
render_pipeline = device.create_render_pipeline(
layout=pipeline_layout,
vertex_stage={"module": vshader, "entry_point": "main"},
fragment_stage={"module": fshader, "entry_point": "main"},
primitive_topology=topology,
wgpu.BlendFactor.zero,
wgpu.BlendOperation.add,
),
"write_mask": wgpu.ColorWrite.ALL,
}
],
vertex_state={"index_format": wgpu.IndexFormat.uint32, "vertex_buffers": [],},
sample_count=1,
sample_mask=0xFFFFFFFF,
alpha_to_coverage_enabled=False,
)
swap_chain = device.configure_swap_chain(
canvas,
device.get_swap_chain_preferred_format(canvas),
wgpu.TextureUsage.OUTPUT_ATTACHMENT,
)
def draw_frame():
with swap_chain as current_texture_view:
command_encoder = device.create_command_encoder()
ca = {
"attachment": current_texture_view,
"resolve_target": None,
"load_value": (0, 0, 0, 0),
"store_op": wgpu.StoreOp.store,
}
render_pass = command_encoder.begin_render_pass(color_attachments=[ca],)
render_pass.set_pipeline(render_pipeline)
render_pass.set_bind_group(0, bind_group, [], 0, 999999)
usage=wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST,
)
buf4 = device.create_buffer(
size=nbytes, usage=wgpu.BufferUsage.COPY_SRC | wgpu.BufferUsage.COPY_DST
)
buf5 = device.create_buffer(
size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.MAP_READ
)
# Check texture stats
assert tex2.texture_size == (nx, ny, nz)
assert tex2.mip_level_count == 1
assert tex2.sample_count == 1
assert tex2.dimension == wgpu.TextureDimension.d1
assert tex2.format == texture_format
assert tex2.texture_usage == wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST
assert tex2.create_view().texture is tex2
# Upload from CPU to buffer
# assert buf1.state == "unmapped"
# mapped_data = buf1.map(wgpu.MapMode.WRITE)
# assert buf1.state == "mapped"
# mapped_data.cast("f")[:] = data1
# buf1.unmap()
# assert buf1.state == "unmapped"
buf1.write_data(data1)
# Copy from buffer to texture
command_encoder = device.create_command_encoder()
command_encoder.copy_buffer_to_texture(
{"buffer": buf1, "offset": 0, "bytes_per_row": bpp * nx, "rows_per_image": ny},
{"texture": tex2, "mip_level": 0, "origin": (0, 0, 0)},
gradient on R and B, zeros on G and ones on A.
"""
nx, ny, nz = texture_size
device = get_default_device()
if can_use_vulkan_sdk:
pyshader.dev.validate(vertex_shader)
pyshader.dev.validate(fragment_shader)
# Create texture
texture = device.create_texture(
size=(nx, ny, nz),
dimension=wgpu.TextureDimension.d2,
format=texture_format,
usage=wgpu.TextureUsage.SAMPLED | wgpu.TextureUsage.COPY_DST,
)
upload_to_texture(device, texture, texture_data, nx, ny, nz)
# texture_view = texture.create_view()
# or:
texture_view = texture.create_view(
format=texture_format, dimension=wgpu.TextureDimension.d2,
)
# But not like these ...
with raises(ValueError):
texture_view = texture.create_view(dimension=wgpu.TextureDimension.d2,)
with raises(ValueError):
texture_view = texture.create_view(mip_level_count=1,)
sampler = device.create_sampler(mag_filter="linear", min_filter="linear")
renderpass.set_stencil_reference(42)
# Bindings and layout
bind_group_layout = device.create_bind_group_layout(entries=[]) # zero bindings
bind_group = device.create_bind_group(layout=bind_group_layout, entries=[])
pipeline_layout = device.create_pipeline_layout(
bind_group_layouts=[bind_group_layout]
)
# Create dept-stencil texture
depth_stencil_texture = device.create_texture(
size=(64, 64, 1), # when rendering to texture
# size=(640, 480, 1), # when rendering to screen
dimension=wgpu.TextureDimension.d2,
format=wgpu.TextureFormat.depth24plus_stencil8,
usage=wgpu.TextureUsage.OUTPUT_ATTACHMENT,
)
depth_stencil_state = dict(
format=wgpu.TextureFormat.depth24plus_stencil8,
depth_write_enabled=True,
depth_compare=wgpu.CompareFunction.less_equal,
stencil_front={
"compare": wgpu.CompareFunction.equal,
"fail_op": wgpu.StencilOperation.keep,
"depth_fail_op": wgpu.StencilOperation.keep,
"pass_op": wgpu.StencilOperation.keep,
},
stencil_back={
"compare": wgpu.CompareFunction.equal,
"fail_op": wgpu.StencilOperation.keep,
"depth_fail_op": wgpu.StencilOperation.keep,
device = get_default_device()
cshader = device.create_shader_module(code=compute_shader)
# Create textures and views
texture1 = device.create_texture(
size=(nx, ny, nz),
dimension=texture_dim,
format=texture_format,
usage=wgpu.TextureUsage.STORAGE | wgpu.TextureUsage.COPY_DST,
)
texture2 = device.create_texture(
size=(nx, ny, nz),
dimension=texture_dim,
format=texture_format,
usage=wgpu.TextureUsage.STORAGE | wgpu.TextureUsage.COPY_SRC,
)
texture_view1 = texture1.create_view()
texture_view2 = texture2.create_view()
# Determine texture component type from the format
if texture_format.endswith(("norm", "float")):
texture_component_type = wgpu.TextureComponentType.float
elif "uint" in texture_format:
texture_component_type = wgpu.TextureComponentType.uint
else:
texture_component_type = wgpu.TextureComponentType.sint
# Create buffer that we need to upload the data
buffer_usage = (
wgpu.BufferUsage.MAP_READ
| wgpu.BufferUsage.COPY_SRC
# Create index buffer, and upload data
index_buffer = device.create_buffer_with_data(
data=index_data, usage=wgpu.BufferUsage.INDEX
)
# Create uniform buffer - data is uploaded each frame
uniform_buffer = device.create_buffer(
size=uniform_data.nbytes, usage=wgpu.BufferUsage.UNIFORM | wgpu.BufferUsage.COPY_DST
)
# Create texture, and upload data
texture = device.create_texture(
size=texture_size,
usage=wgpu.TextureUsage.COPY_DST | wgpu.TextureUsage.SAMPLED,
dimension=wgpu.TextureDimension.d2,
format=wgpu.TextureFormat.r8uint,
mip_level_count=1,
sample_count=1,
)
texture_view = texture.create_view()
tmp_buffer = device.create_buffer_with_data(
data=texture_data, usage=wgpu.BufferUsage.COPY_SRC
)
command_encoder = device.create_command_encoder()
command_encoder.copy_buffer_to_texture(
{
"buffer": tmp_buffer,
"offset": 0,
"bytes_per_row": texture_data.strides[0],
"rows_per_image": 0,
wgpu.BlendFactor.one,
wgpu.BlendFactor.zero,
wgpu.BlendOperation.add,
),
}
],
vertex_state={"index_format": wgpu.IndexFormat.uint32, "vertex_buffers": []},
sample_count=1,
sample_mask=0xFFFFFFFF,
alpha_to_coverage_enabled=False,
)
swap_chain = device.configure_swap_chain(
canvas,
device.get_swap_chain_preferred_format(canvas),
wgpu.TextureUsage.OUTPUT_ATTACHMENT,
)
def draw_frame():
with swap_chain as current_texture_view:
command_encoder = device.create_command_encoder()
render_pass = command_encoder.begin_render_pass(
color_attachments=[
{
"attachment": current_texture_view,
"resolve_target": None,
"load_value": (0, 0, 0, 1), # LoadOp.load or color
"store_op": wgpu.StoreOp.store,
}
],
)