Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_from_dict_with_post_init():
@dataclass
class X:
s: str = field(init=False)
x = X()
x.s = 'test'
result = from_dict(X, {'s': 'test'})
assert result == x
def test_from_dict_with_nested_prefix():
@dataclass
class X:
i: int
@dataclass
class Y:
x: X
@dataclass
class Z:
y: Y
result = from_dict(Z, {'y': {'x_i': 1}}, Config(prefixed={'y.x': 'x_'}))
assert result == Z(y=Y(x=X(i=1)))
def test_from_dict_with_optional_nested_data_class():
@dataclass
class X:
i: int
@dataclass
class Y:
x: Optional[X]
result = from_dict(Y, {"x": {"i": 1}})
assert result == Y(x=X(i=1))
def test_from_dict_with_generic_collection():
@dataclass
class X:
l: List[int]
result = from_dict(X, {"l": [1]})
assert result == X(l=[1])
def test_from_dict_with_transform_of_missing_optional_field():
@dataclass
class X:
s: Optional[str]
result = from_dict(X, {}, Config(transform={'s': str.lower}))
assert result == X(s=None)
def test_from_dict_with_nested_data_class():
@dataclass
class X:
i: int
@dataclass
class Y:
s: str
x: X
result = from_dict(Y, {"s": "test", "x": {"i": 1}})
assert result == Y(s="test", x=X(i=1))
def test_from_dict_with_missing_optional_nested_data_class():
@dataclass
class X:
i: int
@dataclass
class Y:
x: Optional[X]
result = from_dict(Y, {})
assert result == Y(x=None)
def test_from_dict_with_optional_nested_data_class():
@dataclass
class X:
i: int
@dataclass
class Y:
x: Optional[X]
result = from_dict(Y, {'x': {'i': 1}})
assert result == Y(x=X(i=1))
def _update_or_delete_charge(charges, case_number, edit_action_ambiguous_charge_id, edit) -> OeciCharge:
charge = next((charge for charge in charges if charge.ambiguous_charge_id == edit_action_ambiguous_charge_id))
charge_dict = RecordEditor._parse_charge_edits(edit)
charge_type_string = charge_dict.pop("charge_type", None)
edited_oeci_charge = replace(charge, **charge_dict)
if charge_type_string:
charge_type_data = {
"id": f"{charge.ambiguous_charge_id}-0",
"case_number": case_number,
"charge_type": RecordEditor._get_charge_type(charge_type_string),
**asdict(edited_oeci_charge),
}
new_charge = from_dict(data_class=Charge, data=charge_type_data)
return new_charge
else:
return edited_oeci_charge
sent = asdict(sentences[si])
sent['labels'] = sent['labels'] if sent['labels'] else dict()
sent['labels']['evidence'] = 1
tokens = sent['tokens']
for t in tokens:
if t.get('labels', None) is None:
t['labels'] = dict()
if len(set(range(t['start_offset'], t['end_offset'])) & ev_chars) > 0:
t['labels']['evidence'] = 1
elif 'evidence' not in t:
t['labels']['evidence'] = 0
sent['tokens'] = tuple(tokens)
sentences[si] = from_dict(data_class=Sentence, data=sent)
doc_parts = asdict(doc)
doc_parts['sentences'] = tuple(sentences)
doc = from_dict(data_class=Document, data=doc_parts)
evidence_vector = torch.LongTensor([t.labels['evidence'] if t.labels is not None else 0 for t in doc.tokens()])
ann = Annotation(doc=doc,
prompt_id=str(prompt_id),
tokenized_sentences=[torch.IntTensor([t.token_id for t in s.tokens]) for s in doc.sentences],
i=i,
c=c,
o=o,
evidence_texts=tuple(set(evidence_texts.values)),
evidence_spans=tuple(set(ev_spans)),
evidence_vector=evidence_vector,
significance_class=label).retokenize(tokenizer)
docid = int(docid)
if docid in train_ids:
train.append(ann)
if docid in val_ids: