Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
the_node = [ n for n in func.transition_graph.nodes() if isinstance(n, angr.knowledge.Function) and n.addr == pthread_mutex_lock.addr ]
in_edges = func.transition_graph.in_edges(the_node[0])
if len(in_edges) == 3:
the_func = func
break
assert the_func is not None
key = ""
for block in sorted(the_func.blocks, key=lambda x: x.addr):
insns = block.capstone.insns
for insn in insns:
if insn.mnemonic == 'cmp' and \
insn.operands[0].type == 1 and \
insn.operands[0].reg in (capstone.x86_const.X86_REG_CL, capstone.x86_const.X86_REG_AL) and \
insn.operands[1].type == 2:
char = chr(insn.operands[1].imm)
if char in string.printable:
key += char
break
return key
def repair_alloca_ins(self, state: SimState, state_block: Block) -> None:
# NOTE: alloca problem, focus on sub rsp, rax
# Typical usage: alloca(strlen(x))
capstone = state_block.capstone
first_ins = capstone.insns[0].insn
if first_ins.mnemonic == "sub":
if (
first_ins.operands[0].reg
in (x86_const.X86_REG_RSP, x86_const.X86_REG_RBP)
and first_ins.operands[1].type == 1
):
reg_name = first_ins.reg_name(first_ins.operands[1].reg)
reg_v = getattr(state.regs, reg_name)
if state.solver.symbolic(reg_v):
setattr(state.regs, reg_name, state.libc.max_str_len)
processed_functions = []
processed_addrs = []
while len(function_calls) > 0:
addr = function_calls.pop()
processed_functions.append(addr)
function = []
func_table.append(function)
inst = self.locate_by_original_address(addr)
jmp_table = set()
# processed_jumps = []
cont = True
while cont:
function.append(inst)
processed_addrs.append(inst.original_addr)
if x86_const.X86_GRP_JUMP in inst.original_inst.groups:
if inst.original_inst.operands[0].type == x86_const.X86_OP_IMM:
jump_address = inst.original_inst.operands[0].imm
if inf_margin <= jump_address < sup_margin:
if x86_const.X86_INS_JMP == inst.original_inst.id:
if jump_address not in processed_addrs:
inst.new_bytes = str(bytearray([0x90]))
inst = self.locate_by_original_address(jump_address)
else:
cont = (len(jmp_table) > 0)
if cont:
jump_address = jmp_table.pop()
inst = self.locate_by_original_address(jump_address)
else:
if jump_address not in jmp_table \
and jump_address not in processed_addrs:
jmp_table.add(jump_address)
cont = (inst.next_instruction is not None)
cont = (inst.next_instruction is not None)
inst = inst.next_instruction
else:
cont = (len(jmp_table) > 0)
if cont:
jump_address = jmp_table.pop()
inst = self.locate_by_original_address(jump_address)
elif x86_const.X86_GRP_CALL in inst.original_inst.groups \
and inst.original_inst.operands[0].type == x86_const.X86_OP_IMM:
call_address = inst.original_inst.operands[0].imm
if inf_margin <= call_address < sup_margin \
and call_address not in processed_addrs:
function_calls.add(call_address)
cont = (inst.next_instruction is not None)
inst = inst.next_instruction
elif x86_const.X86_GRP_RET in inst.original_inst.groups:
cont = (len(jmp_table) > 0)
if cont:
jump_address = jmp_table.pop()
inst = self.locate_by_original_address(jump_address)
else:
cont = (inst.next_instruction is not None)
inst = inst.next_instruction
return func_table
#print each_mif_f, hex(each_mif_f)
cs_handler = Cs(CS_ARCH_X86, CS_MODE_LITTLE_ENDIAN)
cs_handler.detail = True # this is very important
code = k_header.memcpy(each_mif_f, 0x3ff)
cs_insn = cs_handler.disasm(code, each_mif_vm)
xr_m = x_reg_manager()
for insn in cs_insn:
address = insn.address
mnemonic = insn.mnemonic
op_str = insn.op_str
# print("0x%x:\t%s\t%s" % (address, mnemonic, op_str))
xr_m.set_actual_value_by_regN(x86_const.X86_REG_RIP, address + insn.size)
if not cmp(mnemonic, "lea"):
seg_num = insn.op_count(CS_OP_REG)
if seg_num > 2:
print "Extract: too many regs!"
imem_num = insn.op_count(CS_OP_MEM)
if imem_num:
mem_offset = get_mem_op_offset(insn)
s_reg = get_mem_op_reg(insn)
if s_reg == x86_const.X86_REG_RIP:
s_reg_v = xr_m.get_actual_value_by_regN(x86_const.X86_REG_RIP)
mem_addr = mem_offset + s_reg_v
index = insn.op_find(CS_OP_REG, 1)
for b in our_fn.blocks:
t = b.capstone
if len(t.insns) < 3:
continue
if t.insns[-3].insn.mnemonic == u"movzx" and \
t.insns[-2].insn.mnemonic == u"cmp" and \
(t.insns[-1].insn.mnemonic == u"jne" or \
t.insns[-1].insn.mnemonic == u"je"
):
not_crap.append(t)
good = []
for t in not_crap:
if t.insns[-3].insn.operands[1].mem.base in \
(capstone.x86_const.X86_REG_RAX, capstone.x86_const.X86_REG_ECX):
good.append(t)
really_good = sorted(good, key=lambda x: x.insns[-3].insn.operands[1].mem.disp)
# print(map(str, really_good))
solution = ''.join(chr(k.insns[-2].insn.operands[1].imm) for k in really_good)
return solution
cs_handler.detail = True # this is very important
code = k_header.memcpy(meta_class.newUserClient_f, 0xfff)
cs_insn = cs_handler.disasm(code, meta_class.newUserClient_vm)
xr_m = x_reg_manager()
# all substitutes of arg4: rcx/ecx
subs_rcx = [x86_const.X86_REG_RCX, x86_const.X86_REG_ECX]
open_Type = -1
couple_switch = 0
for insn in cs_insn:
address = insn.address
mnemonic = insn.mnemonic
op_str = insn.op_str
# print("0x%x:\t%s\t%s" % (address, mnemonic, op_str))
xr_m.set_actual_value_by_regN(x86_const.X86_REG_RIP, address + insn.size)
if mnemonic in ["mov"]:
seg_num = insn.op_count(CS_OP_REG)
if seg_num == 2:
f_reg = get_first_reg(insn)
s_reg = get_second_reg(insn)
if s_reg in [x86_const.X86_REG_RCX, x86_const.X86_REG_ECX]:
subs_rcx.append(f_reg)
if mnemonic in ["cmp"]:
seg_num = insn.op_count(CS_OP_REG)
imm_num = insn.op_count(CS_OP_IMM)
if seg_num == 1 and imm_num == 1:
f_reg = get_first_reg(insn)
if f_reg in subs_rcx:
open_Type = get_single_IMM(insn)
f_reg = get_first_reg(insn)
s_reg = get_second_reg(insn)
xr_m.set_actual_value_by_regN(f_reg, xr_m.get_actual_value_by_regN(s_reg))
except:
pass
continue
if not cmp(mnemonic, "call"):
imm_num = insn.op_count(CS_OP_IMM)
if imm_num == 1:
#address_f = k_header.get_f_from_vm(mod_init_fileaddr, mod_init_vmaddr, address)
cf_addr = address + 1
if cf_addr in EXT_RELOCATIONS:
if EXT_RELOCATIONS[cf_addr] == "__ZN11OSMetaClassC2EPKcPKS_j":
meta_class = OSMetaClass()
meta_class.class_self_addr = xr_m.get_actual_value_by_regN(x86_const.X86_REG_RDI)
meta_class.class_name_addr = xr_m.get_actual_value_by_regN(x86_const.X86_REG_RSI)
meta_class.class_super_addr = xr_m.get_actual_value_by_regN(x86_const.X86_REG_RDX)
meta_class.class_size = xr_m.get_actual_value_by_regN(x86_const.X86_REG_ECX)
if meta_class.class_name_addr:
# get meta class name
meta_class.class_name = k_header.get_memStr_from_vmaddr(each_mif_f, each_mif_vm,
meta_class.class_name_addr)
# get vtable for AppleClass*
object_name = "__ZTV%d%s" % (len(meta_class.class_name), meta_class.class_name)
for k, v in STRING_TAB.iteritems():
if not cmp(v, object_name):
meta_class.object_vt_vm = k
meta_class.object_vt_f = k_header.get_f_from_vm(const_fileaddr, const_vmaddr, k)
break
all_checkers = [ ]
for caller_caller in cfg.functions.callgraph.predecessors(caller_addr):
func = cfg.functions[caller_caller]
call_times = count_calls(func, caller_addr)
if call_times != 32:
continue
# make sure it has sub rsp, r15
has_alloca = False
for block in func.blocks:
for insn in block.capstone.insns:
if insn.mnemonic == 'sub' and \
insn.operands[0].type == 1 and \
insn.operands[0].reg == capstone.x86_const.X86_REG_RSP and \
insn.operands[1].type == 1:
has_alloca = True
break
if has_alloca:
break
if not has_alloca:
continue
all_checkers.append(func)
chars = {}
for o, check_func in enumerate(all_checkers):
print(o, len(all_checkers))