Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Only test if qiskit is installed.
try:
import qiskit
except ImportError:
# coverage: ignore
warnings.warn("Skipped assert_qasm_is_consistent_with_unitary because "
"qiskit isn't installed to verify against.")
return
unitary = protocols.unitary(val, None)
if unitary is None:
# Vacuous consistency.
return
if isinstance(val, ops.Operation):
qubits: Sequence[ops.Qid] = val.qubits
op = val
elif isinstance(val, ops.Gate):
qid_shape = protocols.qid_shape(val)
remaining_shape = list(qid_shape)
controls = getattr(val, 'control_qubits', None)
if controls is not None:
for i, q in zip(reversed(range(len(controls))), reversed(controls)):
if q is not None:
remaining_shape.pop(i)
qubits = devices.LineQid.for_qid_shape(remaining_shape)
op = val.on(*qubits)
else:
raise NotImplementedError("Don't know how to test {!r}".format(val))
args = protocols.QasmArgs(
qubit_id_map={q: 'q[{}]'.format(i) for i, q in enumerate(qubits)})
from dataclasses import dataclass
from typing import Sequence
from pcs.common.types import DrRole
from pcs.common.interface.dto import DataTransferObject
@dataclass(frozen=True)
class DrConfigNodeDto(DataTransferObject):
name: str
@dataclass(frozen=True)
class DrConfigSiteDto(DataTransferObject):
site_role: DrRole
node_list: Sequence[DrConfigNodeDto]
@dataclass(frozen=True)
class DrConfigDto(DataTransferObject):
local_site: DrConfigSiteDto
remote_site_list: Sequence[DrConfigSiteDto]
@dataclass(frozen=True)
class DrSiteStatusDto(DataTransferObject):
local_site: bool
site_role: DrRole
status_plaintext: str
status_successfully_obtained: bool
@dataclass(frozen=True)
class ReportReasonsArgs:
bench_file_path: Path = argument(
name_optional=True,
doc="""
Path to a benchfile.
All traces produced from this will have reasons reported.
""",
)
max_iterations: Optional[int] = argument(default=None, doc=MAX_ITERATIONS_FOR_ANALYZE_DOC)
@with_slots
@dataclass(frozen=True)
class DiffArgs(DocOutputArgs):
trace_paths: Sequence[Path] = argument(name_optional=True, doc=DIFFABLE_PATHS_DOC)
vary: Optional[Vary] = argument(default=None, doc=VARY_DOC)
metrics_as_columns: bool = argument(
default=False, doc="Show metrics on columns and tests on rows (default is the reverse)"
)
sort_by_run_metric: Optional[str] = argument(
default=None, doc="For --metrics-as-columns, sort rows by this metric"
)
machines: Optional[Sequence[str]] = argument(
default=None,
doc="Machine the test results are on (if different from the machine the benchfile is on).",
)
test_where: Optional[Sequence[str]] = argument(default=None, doc=TEST_WHERE_DOC)
class InstagramPostThumb(NamedTuple):
post_num_id: str
owner_num_id: int
caption: str
shortcode: str
comment_count: int
like_count: int
created_at: pendulum.datetime
img_height: int
img_width: int
img_url: str
is_video: bool
hashtags: Sequence[str]
mentions: Sequence[str]
@property
def simple_str(self):
d = self.created_at.to_datetime_string()
return f"{self.shortcode} {d} {self.caption[:30]}"
@property
def engagement(self):
return self.like_count + self.comment_count
class InstagramPost(NamedTuple):
post_num_id: int
shortcode: str
img_height: int
img_width: int
from typing import Callable, Sequence, NamedTuple, Optional, Tuple
from func_approx.dnn_spec import DNNSpec
from func_approx.func_approx_base import FuncApproxBase
from func_approx.linear_approx import LinearApprox
from func_approx.dnn import DNN
from utils.generic_typevars import S, A
class FuncApproxSpec(NamedTuple):
state_feature_funcs: Sequence[Callable[[S], float]]
sa_feature_funcs: Sequence[Callable[[Tuple[S, A]], float]]
dnn_spec: Optional[DNNSpec]
reglr_coeff: float = 0.
learning_rate: float = 0.1
adam_params: Tuple[bool, float, float] = (True, 0.9, 0.99)
add_unit_feature: bool = True
def get_vf_func_approx_obj(self) -> FuncApproxBase:
if self.dnn_spec is None:
ret = LinearApprox(
feature_funcs=self.state_feature_funcs,
reglr_coeff=self.reglr_coeff,
learning_rate=self.learning_rate,
adam=self.adam_params[0],
adam_decay1=self.adam_params[1],
adam_decay2=self.adam_params[2],
add_unit_feature=self.add_unit_feature
def positionFromMapping(mapping: Mapping[str, Any]) -> Position:
if type(mapping) is not dict:
raise DirectoryError(f"Position must be mapping: {mapping!r}")
name: Optional[str] = mapping.get("name", None)
if name is None:
raise DirectoryError(f"Position must have name: {mapping!r}")
elif type(name) is not str:
raise DirectoryError(f"Position name must be text: {name!r}")
members: Sequence[str] = mapping.get("members", [])
if type(members) is not list:
raise DirectoryError(
f"Position members must be sequence of text: {members!r}"
)
for m in members:
if type(m) is not str:
raise DirectoryError(f"Position members must be text: {m!r}")
return Position(name=name, members=frozenset(members))
""" Main tsrc entry point """
import argparse
import functools
import importlib
import os
import sys
import textwrap
from typing import Callable, Optional, Sequence
import colored_traceback
import cli_ui as ui
import tsrc
ArgsList = Optional[Sequence[str]]
MainFunc = Callable[..., None]
def fix_cmd_args_for_foreach(
args: argparse.Namespace, foreach_parser: argparse.ArgumentParser
) -> None:
""" We want to support both:
$ tsrc foreach -c 'shell command'
and
$ tsrc foreach -- some-cmd --some-opts
Due to argparse limitations, args.cmd will always be
a list, but we nee a *string* when using 'shell=True'
So transform the argparse.Namespace object to have
* args.cmd suitable to pass to subprocess later
def evaluate(self, docs: Sequence[Document], **kwargs):
"""
Evaluation on test set
:param docs: gold test set
:param kwargs: None
:return: (UAS, LAS, speed) speed is measured in sentences per second
"""
assert isinstance(docs, Sequence), 'Expect docs to be Sequence of Document'
with self.context:
UAS, LAS, speed = evaluate_official_script(self._parser, self._vocab, self._config.num_buckets_valid,
self._config.test_batch_size,
self._config.test_file,
None, documents=docs)
return UAS, LAS, speed
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google Cloud Language operators.
"""
from typing import Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.cloud.language_v1 import enums
from google.cloud.language_v1.types import Document
from google.protobuf.json_format import MessageToDict
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.natural_language import CloudNaturalLanguageHook
MetaData = Sequence[Tuple[str, str]]
class CloudLanguageAnalyzeEntitiesOperator(BaseOperator):
"""
Finds named entities in the text along with entity types,
salience, mentions for each entity, and other properties.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudLanguageAnalyzeEntitiesOperator`
:param document: Input document.
If a dict is provided, it must be of the same form as the protobuf message Document
:type document: dict or google.cloud.language_v1.types.Document
:param encoding_type: The encoding type used by the API to calculate offsets.
:type encoding_type: google.cloud.language_v1.enums.EncodingType
# Type aliases
BinRanges = List[Tuple[float, float]]
Learner = Union[Perceptron,
MiniBatchKMeans,
BernoulliNB,
MultinomialNB,
SGDClassifier,
PassiveAggressiveRegressor,
PassiveAggressiveClassifier]
ParamGrid = List[Dict[str, List[Any]]]
Vectorizer = Union[DictVectorizer, FeatureHasher]
Numeric = Union[int, float]
Generic = TypeVar('Generic')
ScoringFunction = Callable[[Sequence[Numeric],
Sequence[Numeric]],
Numeric]
Scorer = Optional[Union[str, ScoringFunction]]
# Seed for random state
SEED = 123456789
# Define default parameter grids
DEFAULT_PARAM_GRIDS = \
{MiniBatchKMeans: [{'n_clusters': [3, 5, 10],
'init' : ['k-means++', 'random'],
'random_state': [SEED]}],
BernoulliNB: [{'alpha': [0.1, 0.25, 0.5],
'fit_prior': [True, False]}],
MultinomialNB: [{'alpha': [0.1, 0.25, 0.5],
'fit_prior': [True, False]}],
Perceptron: [{'penalty': [None, 'l2', 'l1', 'elasticnet'],