Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_linear_nested():
x = ti.var(ti.i32)
y = ti.var(ti.i32)
n = 128
@ti.layout
def place():
ti.root.dense(ti.i, n // 16).dense(ti.i, 16).place(x)
ti.root.dense(ti.i, n // 16).dense(ti.i, 16).place(y)
for i in range(n):
x[i] = i
y[i] = i + 123
for i in range(n):
assert x[i] == i
assert y[i] == i + 123
if inside(p, Vector2(0.50, 0.25), 0.25):
if ret == -1:
ret = 0
if inside(p, Vector2(0.50, 0.75), 0.25):
if ret == -1:
ret = 1
if p[0] < 0.5:
if ret == -1:
ret = 1
else:
if ret == -1:
ret = 0
return ret
x = ti.var(ti.f32)
n = 512
ti.cfg.use_llvm = True
@ti.layout
def layout():
ti.root.dense(ti.ij, n).place(x)
@ti.kernel
def paint():
for i in range(n * 4):
for j in range(n * 4):
ret = 1.0 - inside_taichi(Vector2(1.0 * i / n / 4, 1.0 * j / n / 4))
x[i // 4, j // 4] += ret / 16
def test_ad_reduce():
x = ti.var(ti.f32)
loss = ti.var(ti.f32)
N = 16
@ti.layout
def place():
ti.root.place(loss, loss.grad).dense(ti.i, N).place(x, x.grad)
@ti.kernel
def func():
for i in x:
loss.atomic_add(ti.sqr(x[i]))
total_loss = 0
for i in range(N):
x[i] = i
total_loss += i * i
def test_simple():
x = ti.var(ti.i32)
n = 128
@ti.layout
def place():
ti.root.dense(ti.i, n).place(x)
@ti.kernel
def func():
x[7] = 120
func()
for i in range(n):
if i == 7:
assert x[i] == 120
def __init__(self):
self.x = ti.var(ti.f32)
self.total = ti.var(ti.f32)
self.n = 128
def test_arg_load():
x = ti.var(ti.i32)
y = ti.var(ti.f32)
@ti.layout
def layout():
ti.root.place(x, y)
@ti.kernel
def set_i32(v: ti.i32):
x[None] = v
@ti.kernel
def set_f32(v: ti.f32):
y[None] = v
@ti.kernel
def set_f64(v: ti.f64):
def test_kernel_template_basic():
x = ti.var(ti.i32)
y = ti.var(ti.f32)
n = 16
@ti.layout
def layout():
ti.root.dense(ti.i, n).place(x, y)
@ti.kernel
def inc(a: ti.template(), b: ti.template()):
for i in a:
a[i] += b
inc(x, 1)
inc(y, 2)
for i in range(n):
import numpy as np
import random
import taichi as ti
import pickle
# ti.runtime.print_preprocessed = True
# ti.cfg.print_ir = True
input = ti.var(ti.f32)
weight1 = ti.var(ti.f32)
output1 = ti.var(ti.f32)
output1_nonlinear = ti.var(ti.f32)
weight2 = ti.var(ti.f32)
output = ti.var(ti.f32)
output_exp = ti.var(ti.f32)
output_softmax = ti.var(ti.f32)
softmax_sum = ti.var(ti.f32)
gt = ti.var(ti.f32)
loss = ti.var(ti.f32)
learning_rate = ti.var(ti.f32)
n_input = 28**2
n_hidden = 500
n_output = 10
scalar = lambda: ti.var(dt=real)
vec = lambda: ti.Vector(2, dt=real)