How to use the nose.tools function in nose

To help you get started, we’ve selected a few nose examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github angr / rex / tests / test_rex.py View on Github external
path = os.path.join(bin_location, binary)

    with archr.targets.LocalTarget([path], target_os='cgc') as target:
        crash = rex.Crash(target, inp, fast_mode=True, rop_cache_path=os.path.join(cache_location, os.path.basename(binary)))
        zp = crash.state.get_plugin("zen_plugin")
        nose.tools.assert_true(len(zp.controlled_transmits) == 1)

        flag_leaks = list(crash.point_to_flag())

        nose.tools.assert_true(len(flag_leaks) >= 1)

        for ptfi in flag_leaks:
            cg = colorguard.ColorGuard(path, ptfi)
            nose.tools.assert_true(cg.causes_leak())
            pov = cg.attempt_exploit()
            nose.tools.assert_true(pov.test_binary())

        crash.project.loader.close()
github baidubce / bce-sdk-python / test / media / qa_test / test_list_trandcoding.py View on Github external
def setUp(self):
        """create env"""
        time.sleep(1)
        succ = True
        config = {'capacity': self.capacity}
        try:
            resp = self.client.create_pipeline(self.pipeline_name, self.sourceBucket,
                   self.targetBucket, pipeline_config=config)
        except Exception as e:
            print(e.message)
            succ = False
        finally:
            nose.tools.assert_true(succ)

        try:
            resp = self.client.create_preset(self.preset_name, self.container, True)
        except Exception as e:
            print(e.message)
            succ = False
        finally:
            nose.tools.assert_true(succ)

        try:
            source = {'sourceKey': self.source_key}
            target = {'targetKey': self.target_key, 'presetName': self.preset_name}
            resp = self.client.create_job(self.pipeline_name, source, target)
        except Exception as e:
            print(e.message)
            succ = False
github Blosc / bloscpack / test / test_append.py View on Github external
# now get the first and the last chunk and check that the shuffle doesn't
    # match
    bloscpack_header, offsets = reset_read_beginning(orig)[0:4:3]
    orig.seek(offsets[0])
    checksum_impl = CHECKSUMS_LOOKUP[bloscpack_header['checksum']]
    compressed_zero,  blosc_header_zero, digest = \
        _read_compressed_chunk_fp(orig, checksum_impl)
    decompressed_zero = blosc.decompress(compressed_zero)
    orig.seek(offsets[-1])
    compressed_last,  blosc_header_last, digest = \
        _read_compressed_chunk_fp(orig, checksum_impl)
    decompressed_last = blosc.decompress(compressed_last)
    # first chunk has shuffle active
    nt.assert_equal(blosc_header_zero['flags'], 1)
    # last chunk doesn't
    nt.assert_equal(blosc_header_last['flags'], 0)
github angr / angr / tests / test_rcr.py View on Github external
def test_rcr():
    p = angr.Project(os.path.join(os.path.dirname(__file__), '..', '..', 'binaries', 'tests', 'i386', 'rcr_test'))
    result = p.factory.successors(p.factory.entry_state()).successors[0]
    nose.tools.assert_true(claripy.is_true(result.regs.cl == 8))
github Blosc / bloscpack / test / test_memory_io.py View on Github external
print("Creating test array")
    create_array_fp(repeats, in_fp, progress=progress)
    in_fp_size = in_fp.tell()
    if progress:
        print("Compressing")
    in_fp.seek(0)
    nchunks, chunk_size, last_chunk_size = \
            calculate_nchunks(in_fp_size, chunk_size)
    # let us play merry go round
    source = PlainFPSource(in_fp)
    sink = CompressedMemorySink()
    pack(source, sink, nchunks, chunk_size, last_chunk_size, metadata=metadata)
    source = CompressedMemorySource(sink)
    sink = PlainMemorySink()
    unpack(source, sink)
    nt.assert_equal(metadata, source.metadata)
    source = PlainMemorySource(sink.chunks)
    sink = CompressedFPSink(out_fp)
    pack(source, sink, nchunks, chunk_size, last_chunk_size, metadata=metadata)
    out_fp.seek(0)
    source = CompressedFPSource(out_fp)
    sink = PlainFPSink(dcmp_fp)
    unpack(source, sink)
    nt.assert_equal(metadata, source.metadata)
    in_fp.seek(0)
    dcmp_fp.seek(0)
    cmp_fp(in_fp, dcmp_fp)
    return source.metadata
github angr / angr / tests / test_strcasecmp.py View on Github external
def test_i386():
    p = angr.Project(os.path.join(test_location, 'i386', 'test_strcasecmp'), auto_load_libs=False)
    arg1 = claripy.BVS('arg1', 20*8)
    s = p.factory.entry_state(args=("test_strcasecmp", arg1))
    sm = p.factory.simulation_manager(s)
    sm.explore()

    sm.move('deadended', 'found', filter_func=lambda s: b"Welcome" in s.posix.dumps(1))

    nose.tools.assert_equal(len(sm.found), 1)

    f = sm.found[0]
    sol = f.solver.eval(arg1, cast_to=bytes)
    nose.tools.assert_in(b'\x00', sol)
    nose.tools.assert_equal(sol[:sol.index(b'\x00')].lower(), b'letmein')
    nose.tools.assert_in(b'wchar works', f.posix.dumps(1))
github craffel / mir_eval / tests / test_multipitch.py View on Github external
def unit_test_metrics():
    empty_array = np.array([])
    ref_time = np.array([0.0, 0.1])
    ref_freqs = [np.array([201.]), np.array([])]
    est_time = np.array([0.0, 0.1])
    est_freqs = [np.array([200.]), np.array([])]

    # ref sizes unequal
    nose.tools.assert_raises(
        ValueError, mir_eval.multipitch.metrics,
        np.array([0.0]), ref_freqs, est_time, est_freqs)

    # est sizes unequal
    nose.tools.assert_raises(
        ValueError, mir_eval.multipitch.metrics,
        ref_time, ref_freqs, np.array([0.0]), est_freqs)

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # Test for warnings on empty values
        actual_score = mir_eval.multipitch.metrics(
            ref_time, [empty_array, empty_array],
            est_time, [empty_array, empty_array])
        assert len(w) == 6
        assert issubclass(w[-1].category, UserWarning)
        assert str(w[-1].message) == "Reference frequencies are all empty."

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        # Test for warnings on empty values
github NERSC / pytokio / tests / test_connectors_nersc_isdct.py View on Github external
@nose.tools.with_setup(tokiotest.create_tempfile, tokiotest.delete_tempfile)
def test_serializer():
    """
    NerscIsdct can deserialize its serialization
    """
    # Read from a cache file
    isdct_data = tokio.connectors.nersc_isdct.NerscIsdct(DEFAULT_INPUT)
    # Serialize the object, then re-read it and verify it
    print("Caching to %s" % tokiotest.TEMP_FILE.name)
    isdct_data.save_cache(tokiotest.TEMP_FILE.name)
    # Open a second file handle to this cached file to load it
    isdct_cached = tokio.connectors.nersc_isdct.NerscIsdct(tokiotest.TEMP_FILE.name)
    validate_object(isdct_cached)
github baidu / tera / test / testcase / test_snapshot.py View on Github external
@nose.tools.with_setup(common.create_kv_table, common.cleanup)
def test_kv_snapshot_relaunch():
    """
    kv cluster relaunch
    1. write data set 1
    2. create snapshot
    3. write data set 2
    4. scan w/snapshot, scan w/o snapshot & compare
    5. kill & launch cluster
    6. repeat 4
    :return: None
    """
    table_name = 'test'
    dump_file1 = 'dump1.out'
    dump_file2 = 'dump2.out'
    scan_file1 = 'scan1.out'
    scan_file2 = 'scan2.out'
github jaysw / ipydb / tests / test_completion.py View on Github external
def test_table_name(self):
        result = self.completer.table_name(Event(symbol='ba'))
        nt.assert_equal(sorted(result), ['bar', 'baz'])