Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def unicode_to_bytes_cast(context, builder, fromty, toty, val):
uni_str = cgutils.create_struct_proxy(fromty)(context, builder, value=val)
src1 = builder.bitcast(uni_str.data, ir.IntType(8).as_pointer())
notkind1 = builder.icmp_unsigned('!=', uni_str.kind,
ir.Constant(uni_str.kind.type, 1))
src_length = uni_str.length
with builder.if_then(notkind1):
context.call_conv.return_user_exc(
builder, ValueError,
("cannot cast higher than 8-bit unicode_type to bytes",))
bstr = _make_constant_bytes(context, builder, src_length)
cgutils.memcpy(builder, bstr.data, src1, bstr.nitems)
return bstr
entry = main_func.append_basic_block(name="entry")
builder = ir.IRBuilder(entry)
putchar_type = ir.FunctionType(int32, (int32,))
putchar = ir.Function(module, putchar_type, name="putchar")
getchar_type = ir.FunctionType(int32, ())
getchar = ir.Function(module, getchar_type, name="getchar")
bzero_type = ir.FunctionType(void, (byte.as_pointer(), size_t))
bzero = ir.Function(module, bzero_type, name="bzero")
index_type = ir.IntType(INDEX_BIT_SIZE)
index = builder.alloca(index_type)
builder.store(ir.Constant(index_type, 0), index)
tape_type = byte
tape = builder.alloca(tape_type, size=2 ** INDEX_BIT_SIZE)
builder.call(bzero, (tape, size_t(2 ** INDEX_BIT_SIZE)))
zero8 = byte(0)
one8 = byte(1)
eof = int32(-1)
def get_tape_location():
index_value = builder.load(index)
index_value = builder.zext(index_value, int32)
location = builder.gep(tape, (index_value,), inbounds=True)
return location
def unicode_to_unicode_charseq(context, builder, fromty, toty, val):
uni_str = cgutils.create_struct_proxy(fromty)(context, builder, value=val)
src1 = builder.bitcast(uni_str.data, ir.IntType(8).as_pointer())
src2 = builder.bitcast(uni_str.data, ir.IntType(16).as_pointer())
src4 = builder.bitcast(uni_str.data, ir.IntType(32).as_pointer())
kind1 = builder.icmp_unsigned('==', uni_str.kind,
ir.Constant(uni_str.kind.type, 1))
kind2 = builder.icmp_unsigned('==', uni_str.kind,
ir.Constant(uni_str.kind.type, 2))
kind4 = builder.icmp_unsigned('==', uni_str.kind,
ir.Constant(uni_str.kind.type, 4))
src_length = uni_str.length
lty = context.get_value_type(toty)
dstint_t = ir.IntType(8 * unicode_byte_width)
dst_ptr = cgutils.alloca_once(builder, lty)
dst = builder.bitcast(dst_ptr, dstint_t.as_pointer())
dst_length = ir.Constant(src_length.type, toty.count)
is_shorter_value = builder.icmp_unsigned('<', src_length, dst_length)
count = builder.select(is_shorter_value, src_length, dst_length)
with builder.if_then(is_shorter_value):
cgutils.memset(builder,
dst,
ir.Constant(src_length.type,
toty.count * unicode_byte_width), 0)
Reflect the native list's contents into the Python object.
"""
if not typ.reflected:
return
if typ.dtype.reflected:
msg = "cannot reflect element of reflected container: {}\n".format(typ)
raise TypeError(msg)
list = listobj.ListInstance(c.context, c.builder, typ, val)
with c.builder.if_then(list.dirty, likely=False):
obj = list.parent
size = c.pyapi.list_size(obj)
new_size = list.size
diff = c.builder.sub(new_size, size)
diff_gt_0 = c.builder.icmp_signed('>=', diff,
ir.Constant(diff.type, 0))
with c.builder.if_else(diff_gt_0) as (if_grow, if_shrink):
# XXX no error checking below
with if_grow:
# First overwrite existing items
with cgutils.for_range(c.builder, size) as loop:
item = list.getitem(loop.index)
list.incref_value(item)
itemobj = c.box(typ.dtype, item)
c.pyapi.list_setitem(obj, loop.index, itemobj)
# Then add missing items
with cgutils.for_range(c.builder, diff) as loop:
idx = c.builder.add(size, loop.index)
item = list.getitem(idx)
list.incref_value(item)
itemobj = c.box(typ.dtype, item)
c.pyapi.list_append(obj, itemobj)
def _define_atomic_inc_dec(module, op, ordering):
"""Define a llvm function for atomic increment/decrement to the given module
Argument ``op`` is the operation "add"/"sub". Argument ``ordering`` is
the memory ordering. The generated function returns the new value.
"""
ftype = ir.FunctionType(_word_type, [_word_type.as_pointer()])
fn_atomic = ir.Function(module, ftype, name="nrt_atomic_{0}".format(op))
[ptr] = fn_atomic.args
bb = fn_atomic.append_basic_block()
builder = ir.IRBuilder(bb)
ONE = ir.Constant(_word_type, 1)
if not _disable_atomicity:
oldval = builder.atomic_rmw(op, ptr, ONE, ordering=ordering)
# Perform the operation on the old value so that we can pretend returning
# the "new" value.
res = getattr(builder, op)(oldval, ONE)
builder.ret(res)
else:
oldval = builder.load(ptr)
newval = getattr(builder, op)(oldval, ONE)
builder.store(newval, ptr)
builder.ret(oldval)
return fn_atomic
# BODY
equalszero = builder.icmp_signed(EQUALS, func.args[1], ir.Constant(type_map[BOOL], 0))
dyn_array_append = self.module.get_global('i64.array.append')
with builder.if_else(equalszero) as (then, otherwise):
with then:
builder.call(dyn_array_append, [builder.load(array_ptr), ir.Constant(type_map[INT], 102)])
builder.call(dyn_array_append, [builder.load(array_ptr), ir.Constant(type_map[INT], 97)])
builder.call(dyn_array_append, [builder.load(array_ptr), ir.Constant(type_map[INT], 108)])
builder.call(dyn_array_append, [builder.load(array_ptr), ir.Constant(type_map[INT], 115)])
builder.call(dyn_array_append, [builder.load(array_ptr), ir.Constant(type_map[INT], 101)])
with otherwise:
builder.call(dyn_array_append, [builder.load(array_ptr), ir.Constant(type_map[INT], 116)])
builder.call(dyn_array_append, [builder.load(array_ptr), ir.Constant(type_map[INT], 114)])
builder.call(dyn_array_append, [builder.load(array_ptr), ir.Constant(type_map[INT], 117)])
builder.call(dyn_array_append, [builder.load(array_ptr), ir.Constant(type_map[INT], 101)])
builder.branch(exit_block)
# CLOSE
builder.position_at_end(exit_block)
builder.ret_void()
def as_return(self, builder, value):
elems = self._as("as_data", builder, value)
struct = ir.Constant(self.get_data_type(), ir.Undefined)
for i, el in enumerate(elems):
struct = builder.insert_value(struct, el, [i])
return struct
def constant_to_typed_llvm_value(module, builder, c):
if c.matches.Float and c.bits == 64:
return TypedLLVMValue(
llvmlite.ir.Constant(llvmlite.ir.DoubleType(), c.val),
native_ast.Type.Float(bits=64)
)
if c.matches.Float and c.bits == 32:
return TypedLLVMValue(
llvmlite.ir.Constant(llvmlite.ir.FloatType(), c.val),
native_ast.Type.Float(bits=32)
)
if c.matches.Int:
return TypedLLVMValue(
llvmlite.ir.Constant(llvmlite.ir.IntType(c.bits), c.val),
native_ast.Type.Int(bits=c.bits, signed=c.signed)
)
if c.matches.NullPointer:
nt = native_ast.Type.Pointer(value_type=c.value_type)
t = type_to_llvm_type(nt)
llvm_c = llvmlite.ir.Constant(t, None)
return TypedLLVMValue(llvm_c, nt)
if c.matches.Struct:
vals = [constant_to_typed_llvm_value(module, builder, t) for _, t in c.elements]
def _allocate_payload(self, nentries, realloc=False):
"""
Allocate and initialize payload for the given number of entries.
If *realloc* is True, the existing meminfo is reused.
CAUTION: *nentries* must be a power of 2!
"""
context = self._context
builder = self._builder
ok = cgutils.alloca_once_value(builder, cgutils.true_bit)
intp_t = context.get_value_type(types.intp)
zero = ir.Constant(intp_t, 0)
one = ir.Constant(intp_t, 1)
payload_type = context.get_data_type(types.SetPayload(self._ty))
payload_size = context.get_abi_sizeof(payload_type)
entry_size = self._entrysize
# Account for the fact that the payload struct already contains an entry
payload_size -= entry_size
# Total allocation size = + nentries * entry_size
allocsize, ovf = cgutils.muladd_with_overflow(builder, nentries,
ir.Constant(intp_t, entry_size),
ir.Constant(intp_t, payload_size))
with builder.if_then(ovf, likely=False):
builder.store(cgutils.false_bit, ok)
with builder.if_then(builder.load(ok), likely=True):
def make_uninitialized(self, kind='value'):
self._define()
if kind == 'value':
ty = self.get_value_type()
else:
ty = self.get_data_type()
return ir.Constant(ty, ir.Undefined)