diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 8a1cb631e381..f48aa74b2de1 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -30,7 +30,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 + uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2 with: egress-policy: audit @@ -39,12 +39,12 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5 + uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5 + uses: github/codeql-action/autobuild@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5 + uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5 diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml index a9e592d8da88..806f6eff68ac 100644 --- a/.github/workflows/copilot-setup-steps.yml +++ b/.github/workflows/copilot-setup-steps.yml @@ -32,7 +32,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 + uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2 with: egress-policy: audit diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 03d08b62d1a9..f9e9713724fc 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -17,11 +17,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 + uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2 with: egress-policy: audit - name: 'Checkout Repository' uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: 'Dependency Review' - uses: actions/dependency-review-action@40c09b7dc99638e5ddb0bfd91c1673effc064d8a # v4.8.1 + uses: actions/dependency-review-action@3c4e3dcb1aa7874d2c16be7d79418e9b7efd6261 # v4.8.2 diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 838410795c0e..fbf4afd823d3 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -29,19 +29,21 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, windows-latest] - python-version: ["3.11", "3.12", "3.13"] + python-version: ["3.11", "3.12", "3.13", "3.14"] exclude: - os: windows-latest python-version: 3.11 - os: windows-latest python-version: 3.13 + - os: windows-latest + python-version: 3.14 env: OS: ${{ matrix.os }} SPHINX_WARNINGS_AS_ERROR: true SPHINX_OPTS: "-v -j 2" steps: - name: Harden Runner - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 + uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2 with: egress-policy: audit - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 @@ -111,7 +113,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 + uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2 with: egress-policy: audit @@ -124,7 +126,7 @@ jobs: path: build_docs - name: Deploy to gh pages - uses: JamesIves/github-pages-deploy-action@6c2d9db40f9296374acc17b90404b6e8864128c8 # v4.7.3 + uses: JamesIves/github-pages-deploy-action@4a3abc783e1a24aeb44c16e869ad83caf6b4cc23 # v4.7.4 with: branch: gh-pages folder: ${{ github.workspace }}/build_docs/ diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 72df7edd2aba..8e851084f98b 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 + uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2 with: egress-policy: audit diff --git a/.github/workflows/pytest.yaml b/.github/workflows/pytest.yaml index e21eb3ef4b78..4e7b823d23fe 100644 --- a/.github/workflows/pytest.yaml +++ b/.github/workflows/pytest.yaml @@ -30,7 +30,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, windows-latest] - python-version: ["3.11", "3.12", "3.13"] + python-version: ["3.11", "3.12", "3.13", "3.14"] min-version: [false] include: - os: ubuntu-latest @@ -43,6 +43,8 @@ jobs: python-version: "3.11" - os: windows-latest python-version: "3.13" + - os: windows-latest + python-version: "3.14" env: OS: ${{ matrix.os }} PYTHON: ${{ matrix.python-version }} @@ -50,7 +52,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 + uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2 with: egress-policy: audit diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index b85162f77efd..d1acc09bdb0f 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -31,7 +31,7 @@ jobs: steps: - name: Harden Runner - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 + uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2 with: egress-policy: audit @@ -71,6 +71,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@4e94bd11f71e507f7f87df81788dff88d1dacbfb # v3.29.5 + uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5 with: sarif_file: results.sarif diff --git a/.github/workflows/upload_to_pypi.yaml b/.github/workflows/upload_to_pypi.yaml index 1e76b69845d1..8148e71f7f9a 100644 --- a/.github/workflows/upload_to_pypi.yaml +++ b/.github/workflows/upload_to_pypi.yaml @@ -17,7 +17,7 @@ jobs: id-token: write steps: - name: Harden Runner - uses: step-security/harden-runner@f4a75cfd619ee5ce8d5b864b0d183aff3c69b55a # v2.13.1 + uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2 with: egress-policy: audit diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index be836b1fbd21..1554b1101e5a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: 'v0.14.1' + rev: 'v0.14.3' hooks: - id: ruff-check types_or: [python, pyi, jupyter, toml] diff --git a/docs/changes/0.54.3.rst b/docs/changes/0.54.3.rst new file mode 100644 index 000000000000..1b13b39d086c --- /dev/null +++ b/docs/changes/0.54.3.rst @@ -0,0 +1,18 @@ +QCoDeS 0.54.3 (2025-11-11) +========================== + +Improved: +--------- + +- - Improved pyvisa-sim YAMLs for Lakeshore Models 335, 336, and 372. + - Updated Lakeshore tests to use pyvisa-sim backend instead of mocked classes. + - Updated lakeshore_base.py to bypass waiting when using blocking_t in sim mode. (:pr:`7606`) +- Fixes a bug in the LinSweeper iterator that caused it to always raise StopIteration after + completing a single sweep. This bug meant LinSweeper could not be used in a nested measurement function. (:pr:`7607`) + +Improved Drivers: +----------------- + +- The Stanford SR86x drivers now statically assign attributes statically + for more member InstrumentModules and parameters enabling better documentation, + type checking and IDE integration. (:pr:`7542`) diff --git a/docs/changes/0.54.4.rst b/docs/changes/0.54.4.rst new file mode 100644 index 000000000000..1fbe0c8856ba --- /dev/null +++ b/docs/changes/0.54.4.rst @@ -0,0 +1,8 @@ +QCoDeS 0.54.4 (2025-12-12) +========================== + +Improved: +--------- + +- The `InterDependencies_` class is now frozen during the performance of a measurement so it cannot be modified. + This enables caching of attributes on the class significantly reducing the overhead of measurements. (:pr:`7712`) diff --git a/docs/changes/index.rst b/docs/changes/index.rst index 1dc364c68f70..42d07dd40d4f 100644 --- a/docs/changes/index.rst +++ b/docs/changes/index.rst @@ -3,6 +3,8 @@ Changelogs .. toctree:: Unreleased + 0.54.4 <0.54.4> + 0.54.3 <0.54.3> 0.54.1 <0.54.1> 0.54.0 <0.54.0> 0.53.0 <0.53.0> diff --git a/docs/changes/newsfragments/7542.improved_driver b/docs/changes/newsfragments/7542.improved_driver deleted file mode 100644 index 60c9f25745d8..000000000000 --- a/docs/changes/newsfragments/7542.improved_driver +++ /dev/null @@ -1,3 +0,0 @@ -The Stanford SR86x drivers now statically assign attributes statically -for more member InstrumentModules and parameters enabling better documentation, -type checking and IDE integration. diff --git a/pyproject.toml b/pyproject.toml index 8f67137a7cd0..53e3fe9eff69 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ classifiers = [ "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", "Topic :: Scientific/Engineering", ] license = "MIT" @@ -217,11 +218,10 @@ markers = "serial" filterwarnings = [ 'error', 'ignore:open_binary is deprecated:DeprecationWarning', # pyvisa-sim deprecated in 3.11 un-deprecated in 3.12. Drop filter once we drop support for 3.11 - 'ignore:Jupyter is migrating its paths to use standard platformdirs:DeprecationWarning', # jupyter 'ignore:unclosed database in:ResourceWarning', # internal should be fixed 'ignore:unclosed\ None: self.conn = connect(path_to_db, self._debug) def set_interdependencies( - self, interdeps: InterDependencies_, shapes: Shapes | None = None + self, + interdeps: InterDependencies_, + shapes: Shapes | None = None, + override: bool = False, ) -> None: """ Set the interdependencies object (which holds all added @@ -579,7 +582,7 @@ def set_interdependencies( f"Wrong input type. Expected InterDepencies_, got {type(interdeps)}" ) - if not self.pristine: + if not self.pristine and not override: mssg = "Can not set interdependencies on a DataSet that has been started." raise RuntimeError(mssg) self._rundescriber = RunDescriber(interdeps, shapes=shapes) diff --git a/src/qcodes/dataset/data_set_in_memory.py b/src/qcodes/dataset/data_set_in_memory.py index bbd8d0bd2ae4..f64c0f40a0c7 100644 --- a/src/qcodes/dataset/data_set_in_memory.py +++ b/src/qcodes/dataset/data_set_in_memory.py @@ -748,7 +748,10 @@ def _set_parent_dataset_links(self, links: list[Link]) -> None: self._parent_dataset_links = links def _set_interdependencies( - self, interdeps: InterDependencies_, shapes: Shapes | None = None + self, + interdeps: InterDependencies_, + shapes: Shapes | None = None, + override: bool = False, ) -> None: """ Set the interdependencies object (which holds all added @@ -761,7 +764,7 @@ def _set_interdependencies( f"Wrong input type. Expected InterDepencies_, got {type(interdeps)}" ) - if not self.pristine: + if not self.pristine and not override: mssg = "Can not set interdependencies on a DataSet that has been started." raise RuntimeError(mssg) self._rundescriber = RunDescriber(interdeps, shapes=shapes) diff --git a/src/qcodes/dataset/descriptions/dependencies.py b/src/qcodes/dataset/descriptions/dependencies.py index 49a98b26b7e1..5d23129520fa 100644 --- a/src/qcodes/dataset/descriptions/dependencies.py +++ b/src/qcodes/dataset/descriptions/dependencies.py @@ -21,8 +21,6 @@ if TYPE_CHECKING: from collections.abc import Sequence - from networkx.classes.reportviews import DegreeView - from .versioning.rundescribertypes import InterDependencies_Dict _LOGGER = logging.getLogger(__name__) ParamSpecTree = dict[ParamSpecBase, tuple[ParamSpecBase, ...]] @@ -137,9 +135,6 @@ def _validate_acyclic(self, interdeps: ParamSpecTree) -> None: def _validate_no_chained_dependencies(self, interdeps: ParamSpecTree) -> None: for node, in_degree in self._dependency_subgraph.in_degree: out_degree = self._dependency_subgraph.out_degree(node) - assert isinstance(out_degree, int), ( - "The out_degree method with arguments should have returned an int" - ) if in_degree > 0 and out_degree > 0: depends_on_nodes = list(self._dependency_subgraph.successors(node)) depended_on_nodes = list(self._dependency_subgraph.predecessors(node)) @@ -155,6 +150,8 @@ def _dependency_subgraph(self) -> nx.DiGraph[str]: for edge in self.graph.edges if self.graph.edges[edge]["interdep_type"] == "depends_on" ] + # the type annotations does not currently encode that edge_subgraph of a DiGraph + # is a DiGraph return cast("nx.DiGraph[str]", self.graph.edge_subgraph(depends_on_edges)) @property @@ -164,6 +161,8 @@ def _inference_subgraph(self) -> nx.DiGraph[str]: for edge in self.graph.edges if self.graph.edges[edge]["interdep_type"] == "inferred_from" ] + # the type annotations does not currently encode that edge_subgraph of a DiGraph + # is a DiGraph return cast("nx.DiGraph[str]", self.graph.edge_subgraph(inferred_from_edges)) def extend( @@ -195,7 +194,7 @@ def _paramspec_tree_by_type(self, interdep_type: _InterDepType) -> ParamSpecTree return {key: tuple(val) for key, val in paramspec_tree_list.items()} def _node_to_paramspec(self, node_id: str) -> ParamSpecBase: - return cast("ParamSpecBase", self.graph.nodes[node_id]["value"]) + return self.graph.nodes[node_id]["value"] def _paramspec_predecessors_by_type( self, paramspec: ParamSpecBase, interdep_type: _InterDepType @@ -247,13 +246,10 @@ def inferences(self) -> ParamSpecTree: @property def standalones(self) -> frozenset[ParamSpecBase]: - # since we are not requesting the degree of a specific node, we will get a DegreeView - # the type stubs does not yet reflect this so we cast away the int type here - degree_iterator = cast("DegreeView[str]", self.graph.degree) return frozenset( [ self._node_to_paramspec(node_id) - for node_id, degree in degree_iterator + for node_id, degree in self.graph.degree if degree == 0 ] ) @@ -270,10 +266,7 @@ def paramspecs(self) -> tuple[ParamSpecBase, ...]: """ Return the ParamSpecBase objects of this instance """ - return tuple( - cast("ParamSpecBase", paramspec) - for _, paramspec in self.graph.nodes(data="value") - ) + return tuple(paramspec for _, paramspec in self.graph.nodes(data="value")) @property @deprecated( @@ -319,9 +312,7 @@ def top_level_parameters(self) -> tuple[ParamSpecBase, ...]: } standalone_top_level = { self._node_to_paramspec(node_id) - # since we are not requesting the degree of a specific node, we will get a DegreeView - # the type stubs does not yet reflect this so we cast away the int type here - for node_id, degree in cast("DegreeView[str]", self._graph.degree) + for node_id, degree in self._graph.degree if degree == 0 } @@ -349,9 +340,6 @@ def remove(self, paramspec: ParamSpecBase) -> InterDependencies_: to this instance, but has the given parameter removed. """ paramspec_in_degree = self.graph.in_degree(paramspec.name) - assert isinstance(paramspec_in_degree, int), ( - "The in_degree method with arguments should have returned an int" - ) if paramspec_in_degree > 0: raise ValueError( f"Cannot remove {paramspec.name}, other parameters depend on or are inferred from it" @@ -440,6 +428,18 @@ def validate_paramspectree( else: raise ValueError(f"Invalid {interdep_type_internal}") from TypeError(cause) + def _invalid_subsets( + self, paramspecs: Sequence[ParamSpecBase] + ) -> tuple[set[str], set[str]] | None: + subset_nodes = {paramspec.name for paramspec in paramspecs} + for subset_node in subset_nodes: + descendant_nodes_per_subset_node = nx.descendants(self.graph, subset_node) + if missing_nodes := descendant_nodes_per_subset_node.difference( + subset_nodes + ): + return (subset_nodes, missing_nodes) + return None + def validate_subset(self, paramspecs: Sequence[ParamSpecBase]) -> None: """ Validate that the given parameters form a valid subset of the @@ -454,15 +454,11 @@ def validate_subset(self, paramspecs: Sequence[ParamSpecBase]) -> None: InterdependencyError: If a dependency or inference is missing """ - subset_nodes = set([paramspec.name for paramspec in paramspecs]) - for subset_node in subset_nodes: - descendant_nodes_per_subset_node = nx.descendants(self.graph, subset_node) - if missing_nodes := descendant_nodes_per_subset_node.difference( - subset_nodes - ): - raise IncompleteSubsetError( - subset_params=subset_nodes, missing_params=missing_nodes - ) + invalid_subset = self._invalid_subsets(paramspecs) + if invalid_subset is not None: + raise IncompleteSubsetError( + subset_params=invalid_subset[0], missing_params=invalid_subset[1] + ) @classmethod def _from_graph(cls, graph: nx.DiGraph[str]) -> InterDependencies_: @@ -636,3 +632,161 @@ def paramspec_tree_to_param_name_tree( return { key.name: [item.name for item in items] for key, items in paramspec_tree.items() } + + +class FrozenInterDependencies_(InterDependencies_): # noqa: PLW1641 + # todo: not clear if this should implement __hash__. + """ + A frozen version of InterDependencies_ that is immutable and caches + expensive lookups. This is used exclusively while running a measurement + to minimize the overhead of dependency lookups for each data operation. + + Args: + interdeps: An InterDependencies_ instance to freeze + + """ + + def __init__(self, interdeps: InterDependencies_): + self._graph = interdeps.graph.copy() + nx.freeze(self._graph) + self._top_level_parameters_cache: tuple[ParamSpecBase, ...] | None = None + self._dependencies_cache: ParamSpecTree | None = None + self._inferences_cache: ParamSpecTree | None = None + self._standalones_cache: frozenset[ParamSpecBase] | None = None + self._find_all_parameters_in_tree_cache: dict[ + ParamSpecBase, set[ParamSpecBase] + ] = {} + self._invalid_subsets_cache: dict[ + tuple[ParamSpecBase, ...], tuple[set[str], set[str]] | None + ] = {} + self._id_to_paramspec_cache: dict[str, ParamSpecBase] | None = None + self._paramspec_to_id_cache: dict[ParamSpecBase, str] | None = None + + def add_dependencies(self, dependencies: ParamSpecTree | None) -> None: + raise TypeError("FrozenInterDependencies_ is immutable") + + def add_inferences(self, inferences: ParamSpecTree | None) -> None: + raise TypeError("FrozenInterDependencies_ is immutable") + + def add_standalones(self, standalones: tuple[ParamSpecBase, ...]) -> None: + raise TypeError("FrozenInterDependencies_ is immutable") + + def add_paramspecs(self, paramspecs: Sequence[ParamSpecBase]) -> None: + raise TypeError("FrozenInterDependencies_ is immutable") + + def remove(self, paramspec: ParamSpecBase) -> InterDependencies_: + raise TypeError("FrozenInterDependencies_ is immutable") + + def extend( + self, + dependencies: ParamSpecTree | None = None, + inferences: ParamSpecTree | None = None, + standalones: tuple[ParamSpecBase, ...] = (), + ) -> InterDependencies_: + """ + Create a new :class:`InterDependencies_` object + that is an extension of this instance with the provided input + """ + # We need to unfreeze the graph for the new instance + new_graph = nx.DiGraph(self.graph) + new_interdependencies = InterDependencies_._from_graph(new_graph) + + new_interdependencies.add_dependencies(dependencies) + new_interdependencies.add_inferences(inferences) + new_interdependencies.add_standalones(standalones) + return new_interdependencies + + @property + def top_level_parameters(self) -> tuple[ParamSpecBase, ...]: + if self._top_level_parameters_cache is None: + self._top_level_parameters_cache = super().top_level_parameters + return self._top_level_parameters_cache + + @property + def dependencies(self) -> ParamSpecTree: + if self._dependencies_cache is None: + self._dependencies_cache = super().dependencies + return self._dependencies_cache.copy() + + @property + def inferences(self) -> ParamSpecTree: + if self._inferences_cache is None: + self._inferences_cache = super().inferences + return self._inferences_cache.copy() + + @property + def standalones(self) -> frozenset[ParamSpecBase]: + if self._standalones_cache is None: + self._standalones_cache = super().standalones + return self._standalones_cache + + def find_all_parameters_in_tree( + self, initial_param: ParamSpecBase + ) -> set[ParamSpecBase]: + if initial_param not in self._find_all_parameters_in_tree_cache: + self._find_all_parameters_in_tree_cache[initial_param] = ( + super().find_all_parameters_in_tree(initial_param) + ) + return self._find_all_parameters_in_tree_cache[initial_param].copy() + + @classmethod + def _from_dict(cls, ser: InterDependencies_Dict) -> FrozenInterDependencies_: + interdeps = InterDependencies_._from_dict(ser) + return cls(interdeps) + + @classmethod + def _from_graph(cls, graph: nx.DiGraph[str]) -> FrozenInterDependencies_: + interdeps = InterDependencies_._from_graph(graph) + return cls(interdeps) + + def validate_subset(self, paramspecs: Sequence[ParamSpecBase]) -> None: + paramspecs_tuple = tuple(paramspecs) + if paramspecs_tuple not in self._invalid_subsets_cache: + self._invalid_subsets_cache[paramspecs_tuple] = self._invalid_subsets( + paramspecs_tuple + ) + invalid_subset = self._invalid_subsets_cache[paramspecs_tuple] + if invalid_subset is not None: + raise IncompleteSubsetError( + subset_params=invalid_subset[0], missing_params=invalid_subset[1] + ) + + @property + def _id_to_paramspec(self) -> dict[str, ParamSpecBase]: + if self._id_to_paramspec_cache is None: + self._id_to_paramspec_cache = { + node_id: data["value"] for node_id, data in self.graph.nodes(data=True) + } + return self._id_to_paramspec_cache + + @property + def _paramspec_to_id(self) -> dict[ParamSpecBase, str]: + if self._paramspec_to_id_cache is None: + self._paramspec_to_id_cache = { + data["value"]: node_id for node_id, data in self.graph.nodes(data=True) + } + return self._paramspec_to_id_cache + + def __repr__(self) -> str: + rep = ( + f"FrozenInterDependencies_(dependencies={self.dependencies}, " + f"inferences={self.inferences}, " + f"standalones={self.standalones})" + ) + return rep + + def __eq__(self, other: object) -> bool: + if not isinstance(other, FrozenInterDependencies_): + return False + return nx.utils.graphs_equal(self.graph, other.graph) + + def to_interdependencies(self) -> InterDependencies_: + """ + Convert this FrozenInterDependencies_ back to a mutable InterDependencies_ instance. + + Returns: + A new InterDependencies_ instance with the same data as this frozen instance. + + """ + new_graph = nx.DiGraph(self.graph) + return InterDependencies_._from_graph(new_graph) diff --git a/src/qcodes/dataset/measurement_extensions.py b/src/qcodes/dataset/measurement_extensions.py index 3634ebb062c7..719c6695fa0b 100644 --- a/src/qcodes/dataset/measurement_extensions.py +++ b/src/qcodes/dataset/measurement_extensions.py @@ -1,7 +1,7 @@ from __future__ import annotations import time -from collections.abc import Generator, Sequence +from collections.abc import Generator, Mapping, Sequence from contextlib import ExitStack, contextmanager from dataclasses import dataclass from typing import TYPE_CHECKING, Any @@ -37,7 +37,7 @@ class DataSetDefinition: experiment: Experiment | None = None """An optional argument specifying which Experiment this dataset should be written to""" - metadata: dict[str, Any] | None = None + metadata: Mapping[str, Any] | None = None """An optional dictionary of metadata that will be added to the dataset generated by this definition""" @@ -236,4 +236,5 @@ def __next__(self) -> float: self._iter_index += 1 return set_val else: + self._iter_index = 0 raise StopIteration diff --git a/src/qcodes/dataset/measurements.py b/src/qcodes/dataset/measurements.py index 6eb4c054df3d..dbf015cb0551 100644 --- a/src/qcodes/dataset/measurements.py +++ b/src/qcodes/dataset/measurements.py @@ -36,6 +36,7 @@ ValuesType, ) from qcodes.dataset.descriptions.dependencies import ( + FrozenInterDependencies_, IncompleteSubsetError, InterDependencies_, ParamSpecTree, @@ -759,6 +760,28 @@ def __exit__( self._span.record_exception(exception_value) self.ds.add_metadata("measurement_exception", exception_string) + # for now we set the interdependencies back to the + # not frozen state, so that further modifications are possible + # this is not recommended but we want to minimize the changes for now + + if isinstance(self.ds.description.interdeps, FrozenInterDependencies_): + intedeps = self.ds.description.interdeps.to_interdependencies() + else: + intedeps = self.ds.description.interdeps + + if isinstance(self.ds, DataSet): + self.ds.set_interdependencies( + shapes=self.ds.description.shapes, + interdeps=intedeps, + override=True, + ) + elif isinstance(self.ds, DataSetInMem): + self.ds._set_interdependencies( + shapes=self.ds.description.shapes, + interdeps=intedeps, + override=True, + ) + # and finally mark the dataset as closed, thus # finishing the measurement # Note that the completion of a dataset entails waiting for the @@ -1508,7 +1531,7 @@ def run( self.experiment, station=self.station, write_period=self._write_period, - interdeps=self._interdeps, + interdeps=FrozenInterDependencies_(self._interdeps), name=self.name, subscribers=self.subscribers, parent_datasets=self._parent_datasets, diff --git a/src/qcodes/instrument/sims/lakeshore_model335.yaml b/src/qcodes/instrument/sims/lakeshore_model335.yaml index 1166b1560fce..07a59516173d 100644 --- a/src/qcodes/instrument/sims/lakeshore_model335.yaml +++ b/src/qcodes/instrument/sims/lakeshore_model335.yaml @@ -37,21 +37,37 @@ devices: setter: q: "INNAME A,\"{}\"" + sensor_tlimit_A: + default: "300.0" + getter: + q: "TLIMIT? A" + r: "{}" + setter: + q: "TLIMIT A,{}" + + sensor_type_A: + default: "1,0,1,0,1" + getter: + q: "INTYPE? A" + r: "{}" + setter: + q: "INTYPE A,{}" + sensor_setpoint_A: default: "100" getter: - q: "setp? A" + q: "SETP? A" r: "{}" setter: - q: "setp A,\"{}\"" + q: "SETP A,\"{}\"" sensor_range_A: default: "1" getter: - q: "range? A" + q: "RANGE? A" r: "{}" setter: - q: "range A,\"{}\"" + q: "RANGE A,\"{}\"" temperature_B: @@ -80,21 +96,157 @@ devices: setter: q: "INNAME B,\"{}\"" + sensor_tlimit_B: + default: "300.0" + getter: + q: "TLIMIT? B" + r: "{}" + setter: + q: "TLIMIT B,{}" + + sensor_type_B: + default: "1,0,1,0,1" + getter: + q: "INTYPE? B" + r: "{}" + setter: + q: "INTYPE B,{}" + sensor_setpoint_B: default: "100" getter: - q: "setp? A" + q: "SETP? B" r: "{}" setter: - q: "setp A,\"{}\"" + q: "SETP B,\"{}\"" sensor_range_B: default: "1" getter: - q: "range? A" + q: "RANGE? B" + r: "{}" + setter: + q: "RANGE B,\"{}\"" + + output_mode_1: + default: "1,1,0" + getter: + q: "OUTMODE? 1" + r: "{}" + setter: + q: "OUTMODE 1,{}" + + output_mode_2: + default: "1,2,0" + getter: + q: "OUTMODE? 2" + r: "{}" + setter: + q: "OUTMODE 2,{}" + + pid_output_1: + default: "10,20,30" + getter: + q: "PID? 1" + r: "{}" + setter: + q: "PID 1,{}" + + pid_output_2: + default: "10,20,30" + getter: + q: "PID? 2" + r: "{}" + setter: + q: "PID 2,{}" + + output_range_1: + default: "1" + getter: + q: "RANGE? 1" + r: "{}" + setter: + q: "RANGE 1,{}" + + output_range_2: + default: "1" + getter: + q: "RANGE? 2" + r: "{}" + setter: + q: "RANGE 2,{}" + + heater_output_1: + default: "0.0" + getter: + q: "HTR? 1" + r: "{}" + + heater_output_2: + default: "0.0" + getter: + q: "HTR? 2" + r: "{}" + + output_setpoint_1: + default: "100.0" + getter: + q: "SETP? 1" + r: "{}" + setter: + q: "SETP 1,{}" + + output_setpoint_2: + default: "100.0" + getter: + q: "SETP? 2" r: "{}" setter: - q: "range A,\"{}\"" + q: "SETP 2,{}" + + heater_setup_1: + default: "0,1,0,0.0,1" + getter: + q: "HTRSET? 1" + r: "{}" + setter: + q: "HTRSET 1,{}" + + heater_setup_2: + default: "0,1,0,0.0,1" + getter: + q: "HTRSET? 2" + r: "{}" + setter: + q: "HTRSET 2,{}" + + setpoint_ramp_1: + default: "0,0.0" + getter: + q: "RAMP? 1" + r: "{}" + setter: + q: "RAMP 1,{}" + + setpoint_ramp_2: + default: "0,0.0" + getter: + q: "RAMP? 2" + r: "{}" + setter: + q: "RAMP 2,{}" + + setpoint_ramp_status_1: + default: "0" + getter: + q: "RAMPST? 1" + r: "{}" + + setpoint_ramp_status_2: + default: "0" + getter: + q: "RAMPST? 2" + r: "{}" resources: diff --git a/src/qcodes/instrument/sims/lakeshore_model336.yaml b/src/qcodes/instrument/sims/lakeshore_model336.yaml index 1e0277c31b16..fbeca651a23c 100644 --- a/src/qcodes/instrument/sims/lakeshore_model336.yaml +++ b/src/qcodes/instrument/sims/lakeshore_model336.yaml @@ -37,21 +37,21 @@ devices: setter: q: "INNAME A,\"{}\"" - sensor_setpoint_A: - default: "100" + sensor_tlimit_A: + default: 0 getter: - q: "setp? A" + q: "TLIMIT? A" r: "{}" setter: - q: "setp A,\"{}\"" + q: "TLIMIT A,{}" - sensor_range_A: - default: "1" + sensor_intype_A: + default: "0,0,1,0,1" getter: - q: "range? A" + q: "INTYPE? A" r: "{}" setter: - q: "range A,\"{}\"" + q: "INTYPE A,{}" sensor_curve_number_A: default: 42 @@ -91,21 +91,21 @@ devices: setter: q: "INNAME B,\"{}\"" - sensor_setpoint_B: - default: "100" + sensor_tlimit_B: + default: 0 getter: - q: "setp? A" + q: "TLIMIT? B" r: "{}" setter: - q: "setp A,\"{}\"" + q: "TLIMIT B,{}" - sensor_range_B: - default: "1" + sensor_intype_B: + default: "0,0,1,0,1" getter: - q: "range? A" + q: "INTYPE? B" r: "{}" setter: - q: "range A,\"{}\"" + q: "INTYPE B,{}" sensor_curve_number_B: default: 41 @@ -144,21 +144,21 @@ devices: setter: q: "INNAME C,\"{}\"" - sensor_setpoint_C: - default: "100" + sensor_tlimit_C: + default: 0 getter: - q: "setp? A" + q: "TLIMIT? C" r: "{}" setter: - q: "setp A,\"{}\"" + q: "TLIMIT C,{}" - sensor_range_C: - default: "1" + sensor_intype_C: + default: "0,0,1,0,1" getter: - q: "range? A" + q: "INTYPE? C" r: "{}" setter: - q: "range A,\"{}\"" + q: "INTYPE C,{}" sensor_curve_number_C: default: 40 @@ -197,21 +197,21 @@ devices: setter: q: "INNAME D,\"{}\"" - sensor_setpoint_D: - default: "100" + sensor_tlimit_D: + default: 0 getter: - q: "setp? A" + q: "TLIMIT? D" r: "{}" setter: - q: "setp A,\"{}\"" + q: "TLIMIT D,{}" - sensor_range_D: - default: "1" + sensor_intype_D: + default: "0,0,1,0,1" getter: - q: "range? A" + q: "INTYPE? D" r: "{}" setter: - q: "range A,\"{}\"" + q: "INTYPE D,{}" sensor_curve_number_D: default: 39 @@ -224,6 +224,219 @@ devices: q: "CRVHDR? 39" r: "DT-039,01110039,2,339.0,1" + pid_output_1: + default: "10,20,30" + getter: + q: "PID? 1" + r: "{}" + setter: + q: "PID 1,{}" + + pid_output_2: + default: "10,20,30" + getter: + q: "PID? 2" + r: "{}" + setter: + q: "PID 2,{}" + + outmode_output_1: + default: "1,2,0" + getter: + q: "OUTMODE? 1" + r: "{}" + setter: + q: "OUTMODE 1, {}" + + outmode_output_2: + default: "1,1,0" + getter: + q: "OUTMODE? 2" + r: "{}" + setter: + q: "OUTMODE 2, {}" + + range_output_1: + default: 1 + getter: + q: "RANGE? 1" + r: "{}" + setter: + q: "RANGE 1,{}" + + range_output_2: + default: 1 + getter: + q: "RANGE? 2" + r: "{}" + setter: + q: "RANGE 2,{}" + + setpoint_output_1: + default: 0 + getter: + q: "SETP? 1" + r: "{}" + setter: + q: "SETP 1,{}" + + setpoint_output_2: + default: 0 + getter: + q: "SETP? 2" + r: "{}" + setter: + q: "SETP 2,{}" + + htr_output_1: + default: 0.005 + getter: + q: "HTR? 1" + r: "{}" + + htr_output_2: + default: 0.005 + getter: + q: "HTR? 2" + r: "{}" + + htrset_output_1: + default: "1, 5" + getter: + q: "HTRSET? 1" + r: "{}" + setter: + q: "HTRSET 1, {}" + + htrset_output_2: + default: "1, 5" + getter: + q: "HTRSET? 2" + r: "{}" + setter: + q: "HTRSET 2, {}" + + ramp_output_1: + default: "0,0" + getter: + q: "RAMP? 1" + r: "{}" + setter: + q: "RAMP 1,{}" + + ramp_output_2: + default: "0,0" + getter: + q: "RAMP? 2" + r: "{}" + setter: + q: "RAMP 2,{}" + + rampst_output_1: + default: 0 + getter: + q: "RAMPST? 1" + r: "{}" + + rampst_output_2: + default: 0 + getter: + q: "RAMPST? 2" + r: "{}" + + # ==================== + # Output 3 (Voltage Source, no PID) + # ==================== + outmode_output_3: + default: "1,1,0" + getter: + q: "OUTMODE? 3" + r: "{}" + setter: + q: "OUTMODE 3, {}" + + range_output_3: + default: 1 + getter: + q: "RANGE? 3" + r: "{}" + setter: + q: "RANGE 3,{}" + + setpoint_output_3: + default: 0 + getter: + q: "SETP? 3" + r: "{}" + setter: + q: "SETP 3,{}" + + htr_output_3: + default: 0 + getter: + q: "HTR? 3" + r: "{}" + + ramp_output_3: + default: "0,0" + getter: + q: "RAMP? 3" + r: "{}" + setter: + q: "RAMP 3,{}" + + rampst_output_3: + default: 0 + getter: + q: "RAMPST? 3" + r: "{}" + + # ==================== + # Output 4 (Voltage Source, no PID) + # ==================== + outmode_output_4: + default: "1,2,0" + getter: + q: "OUTMODE? 4" + r: "{}" + setter: + q: "OUTMODE 4, {}" + + range_output_4: + default: 1 + getter: + q: "RANGE? 4" + r: "{}" + setter: + q: "RANGE 4,{}" + + setpoint_output_4: + default: 0 + getter: + q: "SETP? 4" + r: "{}" + setter: + q: "SETP 4,{}" + + htr_output_4: + default: 0 + getter: + q: "HTR? 4" + r: "{}" + + ramp_output_4: + default: "0,0" + getter: + q: "RAMP? 4" + r: "{}" + setter: + q: "RAMP 4,{}" + + rampst_output_4: + default: 0 + getter: + q: "RAMPST? 4" + r: "{}" resources: GPIB::2::INSTR: diff --git a/src/qcodes/instrument/sims/lakeshore_model372.yaml b/src/qcodes/instrument/sims/lakeshore_model372.yaml index 050c952da2e9..c3ac32028010 100644 --- a/src/qcodes/instrument/sims/lakeshore_model372.yaml +++ b/src/qcodes/instrument/sims/lakeshore_model372.yaml @@ -1,14 +1,972 @@ spec: "1.0" + devices: device 1: eom: GPIB INSTR: q: "\r\n" r: "\r\n" - error: ERROR + error: + command error: CMD_ERROR + query error: Q_ERROR + dialogues: - q: "*IDN?" - r: "QCoDeS, m0d3l, 336, 0.0.01" + r: "QCoDeS, m0d3l, 372, 0.0.01" + + properties: + # ==================== + # Sensor Channel 1 (ch01) + # ==================== + temperature_1: + default: 4.0 + getter: + q: "KRDG? 1" + r: "{}" + + sensor_raw_1: + default: 100.0 + getter: + q: "SRDG? 1" + r: "{}" + + sensor_status_1: + default: 0 + getter: + q: "RDGST? 1" + r: "{}" + + sensor_name_1: + default: "Channel 1" + getter: + q: "INNAME? 1" + r: "{}" + setter: + q: "INNAME 1,\"{}\"" + + sensor_tlimit_1: + default: 300.0 + getter: + q: "TLIMIT? 1" + r: "{}" + setter: + q: "TLIMIT 1,{}" + + inset_1: + default: "1,100,3,0,1" + getter: + q: "INSET? 1" + r: "{}" + setter: + q: "INSET 1,{}" + + intype_1: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 1" + r: "{}" + setter: + q: "INTYPE 1,{}" + + # ==================== + # Sensor Channel 2 (ch02) + # ==================== + temperature_2: + default: 4.0 + getter: + q: "KRDG? 2" + r: "{}" + + sensor_raw_2: + default: 100.0 + getter: + q: "SRDG? 2" + r: "{}" + + sensor_status_2: + default: 0 + getter: + q: "RDGST? 2" + r: "{}" + + sensor_name_2: + default: "Channel 2" + getter: + q: "INNAME? 2" + r: "{}" + setter: + q: "INNAME 2,\"{}\"" + + sensor_tlimit_2: + default: 300.0 + getter: + q: "TLIMIT? 2" + r: "{}" + setter: + q: "TLIMIT 2,{}" + + inset_2: + default: "1,100,3,0,1" + getter: + q: "INSET? 2" + r: "{}" + setter: + q: "INSET 2,{}" + + intype_2: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 2" + r: "{}" + setter: + q: "INTYPE 2,{}" + + # ==================== + # Sensor Channel 3 (ch03) + # ==================== + temperature_3: + default: 4.0 + getter: + q: "KRDG? 3" + r: "{}" + + sensor_raw_3: + default: 100.0 + getter: + q: "SRDG? 3" + r: "{}" + + sensor_status_3: + default: 0 + getter: + q: "RDGST? 3" + r: "{}" + + sensor_name_3: + default: "Channel 3" + getter: + q: "INNAME? 3" + r: "{}" + setter: + q: "INNAME 3,\"{}\"" + + sensor_tlimit_3: + default: 300.0 + getter: + q: "TLIMIT? 3" + r: "{}" + setter: + q: "TLIMIT 3,{}" + + inset_3: + default: "1,100,3,0,1" + getter: + q: "INSET? 3" + r: "{}" + setter: + q: "INSET 3,{}" + + intype_3: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 3" + r: "{}" + setter: + q: "INTYPE 3,{}" + + # ==================== + # Sensor Channel 4 (ch04) + # ==================== + temperature_4: + default: 4.0 + getter: + q: "KRDG? 4" + r: "{}" + + sensor_raw_4: + default: 100.0 + getter: + q: "SRDG? 4" + r: "{}" + + sensor_status_4: + default: 0 + getter: + q: "RDGST? 4" + r: "{}" + + sensor_name_4: + default: "Channel 4" + getter: + q: "INNAME? 4" + r: "{}" + setter: + q: "INNAME 4,\"{}\"" + + sensor_tlimit_4: + default: 300.0 + getter: + q: "TLIMIT? 4" + r: "{}" + setter: + q: "TLIMIT 4,{}" + + inset_4: + default: "1,100,3,0,1" + getter: + q: "INSET? 4" + r: "{}" + setter: + q: "INSET 4,{}" + + intype_4: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 4" + r: "{}" + setter: + q: "INTYPE 4,{}" + + # ==================== + # Sensor Channel 5 (ch05) + # ==================== + temperature_5: + default: 4.0 + getter: + q: "KRDG? 5" + r: "{}" + + sensor_raw_5: + default: 100.0 + getter: + q: "SRDG? 5" + r: "{}" + + sensor_status_5: + default: 0 + getter: + q: "RDGST? 5" + r: "{}" + + sensor_name_5: + default: "Channel 5" + getter: + q: "INNAME? 5" + r: "{}" + setter: + q: "INNAME 5,\"{}\"" + + sensor_tlimit_5: + default: 300.0 + getter: + q: "TLIMIT? 5" + r: "{}" + setter: + q: "TLIMIT 5,{}" + + inset_5: + default: "1,100,3,0,1" + getter: + q: "INSET? 5" + r: "{}" + setter: + q: "INSET 5,{}" + + intype_5: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 5" + r: "{}" + setter: + q: "INTYPE 5,{}" + + # ==================== + # Sensor Channel 6 (ch06) + # ==================== + temperature_6: + default: 4.0 + getter: + q: "KRDG? 6" + r: "{}" + + sensor_raw_6: + default: 100.0 + getter: + q: "SRDG? 6" + r: "{}" + + sensor_status_6: + default: 0 + getter: + q: "RDGST? 6" + r: "{}" + + sensor_name_6: + default: "Channel 6" + getter: + q: "INNAME? 6" + r: "{}" + setter: + q: "INNAME 6,\"{}\"" + + sensor_tlimit_6: + default: 300.0 + getter: + q: "TLIMIT? 6" + r: "{}" + setter: + q: "TLIMIT 6,{}" + + inset_6: + default: "1,100,3,0,1" + getter: + q: "INSET? 6" + r: "{}" + setter: + q: "INSET 6,{}" + + intype_6: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 6" + r: "{}" + setter: + q: "INTYPE 6,{}" + + # ==================== + # Sensor Channel 7 (ch07) + # ==================== + temperature_7: + default: 4.0 + getter: + q: "KRDG? 7" + r: "{}" + + sensor_raw_7: + default: 100.0 + getter: + q: "SRDG? 7" + r: "{}" + + sensor_status_7: + default: 0 + getter: + q: "RDGST? 7" + r: "{}" + + sensor_name_7: + default: "Channel 7" + getter: + q: "INNAME? 7" + r: "{}" + setter: + q: "INNAME 7,\"{}\"" + + sensor_tlimit_7: + default: 300.0 + getter: + q: "TLIMIT? 7" + r: "{}" + setter: + q: "TLIMIT 7,{}" + + inset_7: + default: "1,100,3,0,1" + getter: + q: "INSET? 7" + r: "{}" + setter: + q: "INSET 7,{}" + + intype_7: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 7" + r: "{}" + setter: + q: "INTYPE 7,{}" + + # ==================== + # Sensor Channel 8 (ch08) + # ==================== + temperature_8: + default: 4.0 + getter: + q: "KRDG? 8" + r: "{}" + + sensor_raw_8: + default: 100.0 + getter: + q: "SRDG? 8" + r: "{}" + + sensor_status_8: + default: 0 + getter: + q: "RDGST? 8" + r: "{}" + + sensor_name_8: + default: "Channel 8" + getter: + q: "INNAME? 8" + r: "{}" + setter: + q: "INNAME 8,\"{}\"" + + sensor_tlimit_8: + default: 300.0 + getter: + q: "TLIMIT? 8" + r: "{}" + setter: + q: "TLIMIT 8,{}" + + inset_8: + default: "1,100,3,0,1" + getter: + q: "INSET? 8" + r: "{}" + setter: + q: "INSET 8,{}" + + intype_8: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 8" + r: "{}" + setter: + q: "INTYPE 8,{}" + + # ==================== + # Sensor Channel 9 (ch09) + # ==================== + temperature_9: + default: 4.0 + getter: + q: "KRDG? 9" + r: "{}" + + sensor_raw_9: + default: 100.0 + getter: + q: "SRDG? 9" + r: "{}" + + sensor_status_9: + default: 0 + getter: + q: "RDGST? 9" + r: "{}" + + sensor_name_9: + default: "Channel 9" + getter: + q: "INNAME? 9" + r: "{}" + setter: + q: "INNAME 9,\"{}\"" + + sensor_tlimit_9: + default: 300.0 + getter: + q: "TLIMIT? 9" + r: "{}" + setter: + q: "TLIMIT 9,{}" + + inset_9: + default: "1,100,3,0,1" + getter: + q: "INSET? 9" + r: "{}" + setter: + q: "INSET 9,{}" + + intype_9: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 9" + r: "{}" + setter: + q: "INTYPE 9,{}" + + # ==================== + # Sensor Channel 10 (ch10) + # ==================== + temperature_10: + default: 4.0 + getter: + q: "KRDG? 10" + r: "{}" + + sensor_raw_10: + default: 100.0 + getter: + q: "SRDG? 10" + r: "{}" + + sensor_status_10: + default: 0 + getter: + q: "RDGST? 10" + r: "{}" + + sensor_name_10: + default: "Channel 10" + getter: + q: "INNAME? 10" + r: "{}" + setter: + q: "INNAME 10,\"{}\"" + + sensor_tlimit_10: + default: 300.0 + getter: + q: "TLIMIT? 10" + r: "{}" + setter: + q: "TLIMIT 10,{}" + + inset_10: + default: "1,100,3,0,1" + getter: + q: "INSET? 10" + r: "{}" + setter: + q: "INSET 10,{}" + + intype_10: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 10" + r: "{}" + setter: + q: "INTYPE 10,{}" + + # ==================== + # Sensor Channel 11 (ch11) + # ==================== + temperature_11: + default: 4.0 + getter: + q: "KRDG? 11" + r: "{}" + + sensor_raw_11: + default: 100.0 + getter: + q: "SRDG? 11" + r: "{}" + + sensor_status_11: + default: 0 + getter: + q: "RDGST? 11" + r: "{}" + + sensor_name_11: + default: "Channel 11" + getter: + q: "INNAME? 11" + r: "{}" + setter: + q: "INNAME 11,\"{}\"" + + sensor_tlimit_11: + default: 300.0 + getter: + q: "TLIMIT? 11" + r: "{}" + setter: + q: "TLIMIT 11,{}" + + inset_11: + default: "1,100,3,0,1" + getter: + q: "INSET? 11" + r: "{}" + setter: + q: "INSET 11,{}" + + intype_11: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 11" + r: "{}" + setter: + q: "INTYPE 11,{}" + + # ==================== + # Sensor Channel 12 (ch12) + # ==================== + temperature_12: + default: 4.0 + getter: + q: "KRDG? 12" + r: "{}" + + sensor_raw_12: + default: 100.0 + getter: + q: "SRDG? 12" + r: "{}" + + sensor_status_12: + default: 0 + getter: + q: "RDGST? 12" + r: "{}" + + sensor_name_12: + default: "Channel 12" + getter: + q: "INNAME? 12" + r: "{}" + setter: + q: "INNAME 12,\"{}\"" + + sensor_tlimit_12: + default: 300.0 + getter: + q: "TLIMIT? 12" + r: "{}" + setter: + q: "TLIMIT 12,{}" + + inset_12: + default: "1,100,3,0,1" + getter: + q: "INSET? 12" + r: "{}" + setter: + q: "INSET 12,{}" + + intype_12: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 12" + r: "{}" + setter: + q: "INTYPE 12,{}" + + # ==================== + # Sensor Channel 13 (ch13) + # ==================== + temperature_13: + default: 4.0 + getter: + q: "KRDG? 13" + r: "{}" + + sensor_raw_13: + default: 100.0 + getter: + q: "SRDG? 13" + r: "{}" + + sensor_status_13: + default: 0 + getter: + q: "RDGST? 13" + r: "{}" + + sensor_name_13: + default: "Channel 13" + getter: + q: "INNAME? 13" + r: "{}" + setter: + q: "INNAME 13,\"{}\"" + + sensor_tlimit_13: + default: 300.0 + getter: + q: "TLIMIT? 13" + r: "{}" + setter: + q: "TLIMIT 13,{}" + + inset_13: + default: "1,100,3,0,1" + getter: + q: "INSET? 13" + r: "{}" + setter: + q: "INSET 13,{}" + + intype_13: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 13" + r: "{}" + setter: + q: "INTYPE 13,{}" + + # ==================== + # Sensor Channel 14 (ch14) + # ==================== + temperature_14: + default: 4.0 + getter: + q: "KRDG? 14" + r: "{}" + + sensor_raw_14: + default: 100.0 + getter: + q: "SRDG? 14" + r: "{}" + + sensor_status_14: + default: 0 + getter: + q: "RDGST? 14" + r: "{}" + + sensor_name_14: + default: "Channel 14" + getter: + q: "INNAME? 14" + r: "{}" + setter: + q: "INNAME 14,\"{}\"" + + sensor_tlimit_14: + default: 300.0 + getter: + q: "TLIMIT? 14" + r: "{}" + setter: + q: "TLIMIT 14,{}" + + inset_14: + default: "1,100,3,0,1" + getter: + q: "INSET? 14" + r: "{}" + setter: + q: "INSET 14,{}" + + intype_14: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 14" + r: "{}" + setter: + q: "INTYPE 14,{}" + + # ==================== + # Sensor Channel 15 (ch15) + # ==================== + temperature_15: + default: 4.0 + getter: + q: "KRDG? 15" + r: "{}" + + sensor_raw_15: + default: 100.0 + getter: + q: "SRDG? 15" + r: "{}" + + sensor_status_15: + default: 0 + getter: + q: "RDGST? 15" + r: "{}" + + sensor_name_15: + default: "Channel 15" + getter: + q: "INNAME? 15" + r: "{}" + setter: + q: "INNAME 15,\"{}\"" + + sensor_tlimit_15: + default: 300.0 + getter: + q: "TLIMIT? 15" + r: "{}" + setter: + q: "TLIMIT 15,{}" + + inset_15: + default: "1,100,3,0,1" + getter: + q: "INSET? 15" + r: "{}" + setter: + q: "INSET 15,{}" + + intype_15: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 15" + r: "{}" + setter: + q: "INTYPE 15,{}" + + # ==================== + # Sensor Channel 16 (ch16) + # ==================== + temperature_16: + default: 4.0 + getter: + q: "KRDG? 16" + r: "{}" + + sensor_raw_16: + default: 100.0 + getter: + q: "SRDG? 16" + r: "{}" + + sensor_status_16: + default: 0 + getter: + q: "RDGST? 16" + r: "{}" + + sensor_name_16: + default: "Channel 16" + getter: + q: "INNAME? 16" + r: "{}" + setter: + q: "INNAME 16,\"{}\"" + + sensor_tlimit_16: + default: 300.0 + getter: + q: "TLIMIT? 16" + r: "{}" + setter: + q: "TLIMIT 16,{}" + + inset_16: + default: "1,100,3,0,1" + getter: + q: "INSET? 16" + r: "{}" + setter: + q: "INSET 16,{}" + + intype_16: + default: "0,1,0,5,0,1" + getter: + q: "INTYPE? 16" + r: "{}" + setter: + q: "INTYPE 16,{}" + + # ==================== + # Heater Output 0 (sample_heater) + # ==================== + outmode_output_0: + default: "5,2,0,0,0,1" + getter: + q: "OUTMODE? 0" + r: "{}" + setter: + q: "OUTMODE 0,{}" + + pid_output_0: + default: "10,20,30" + getter: + q: "PID? 0" + r: "{}" + setter: + q: "PID 0,{}" + + range_output_0: + default: 0 + getter: + q: "RANGE? 0" + r: "{}" + setter: + q: "RANGE 0,{}" + + setpoint_output_0: + default: 4.0 + getter: + q: "SETP? 0" + r: "{}" + setter: + q: "SETP 0,{}" + + # ==================== + # Heater Output 1 (warmup_heater) + # ==================== + outmode_output_1: + default: "5,2,0,0,0,1" + getter: + q: "OUTMODE? 1" + r: "{}" + setter: + q: "OUTMODE 1,{}" + + pid_output_1: + default: "1,2,3" + getter: + q: "PID? 1" + r: "{}" + setter: + q: "PID 1,{}" + + range_output_1: + default: 0 + getter: + q: "RANGE? 1" + r: "{}" + setter: + q: "RANGE 1,{}" + + setpoint_output_1: + default: 4.0 + getter: + q: "SETP? 1" + r: "{}" + setter: + q: "SETP 1,{}" + + # ==================== + # Heater Output 2 (analog_heater) + # ==================== + outmode_output_2: + default: "5,2,0,0,0,1" + getter: + q: "OUTMODE? 2" + r: "{}" + setter: + q: "OUTMODE 2,{}" + + pid_output_2: + default: "10,20,30" + getter: + q: "PID? 2" + r: "{}" + setter: + q: "PID 2,{}" + + range_output_2: + default: 0 + getter: + q: "RANGE? 2" + r: "{}" + setter: + q: "RANGE 2,{}" + + setpoint_output_2: + default: 4.0 + getter: + q: "SETP? 2" + r: "{}" + setter: + q: "SETP 2,{}" resources: GPIB::3::INSTR: diff --git a/src/qcodes/instrument_drivers/Lakeshore/Lakeshore_model_336.py b/src/qcodes/instrument_drivers/Lakeshore/Lakeshore_model_336.py index 977e04adec0f..509c087608e9 100644 --- a/src/qcodes/instrument_drivers/Lakeshore/Lakeshore_model_336.py +++ b/src/qcodes/instrument_drivers/Lakeshore/Lakeshore_model_336.py @@ -85,6 +85,10 @@ class LakeshoreModel336VoltageSource(LakeshoreBaseOutput): RANGES: ClassVar[dict[str, int]] = {"off": 0, "low": 1, "medium": 2, "high": 3} + _input_channel_parameter_kwargs: ClassVar[dict[str, dict[str, int]]] = { + "val_mapping": _channel_name_to_outmode_command_map + } + def __init__( self, parent: "LakeshoreModel336", diff --git a/src/qcodes/instrument_drivers/tektronix/DPO7200xx.py b/src/qcodes/instrument_drivers/tektronix/DPO7200xx.py index dc9d64e9cd59..57929e2c0c58 100644 --- a/src/qcodes/instrument_drivers/tektronix/DPO7200xx.py +++ b/src/qcodes/instrument_drivers/tektronix/DPO7200xx.py @@ -106,10 +106,14 @@ def __init__( self.add_submodule(measurement_name, measurement_module) measurement_list.append(measurement_module) - self.add_submodule("measurement", measurement_list) - self.add_submodule( + self.measurement: ChannelList[TektronixDPOMeasurement] = self.add_submodule( + "measurement", measurement_list + ) + """Instrument module measurement""" + self.statistics: TektronixDPOMeasurementStatistics = self.add_submodule( "statistics", TektronixDPOMeasurementStatistics(self, "statistics") ) + """Instrument module statistics""" channel_list = ChannelList(self, "channel", TektronixDPOChannel) for channel_number in range(1, self.number_of_channels + 1): @@ -123,7 +127,10 @@ def __init__( self.add_submodule(channel_name, channel_module) channel_list.append(channel_module) - self.add_submodule("channel", channel_list) + self.channel: ChannelList[TektronixDPOChannel] = self.add_submodule( + "channel", channel_list + ) + """Instrument module channel""" self.connect_message() @@ -445,9 +452,10 @@ def __init__( super().__init__(parent, name, **kwargs) self._identifier = f"CH{channel_number}" - self.add_submodule( + self.waveform: TektronixDPOWaveform = self.add_submodule( "waveform", TektronixDPOWaveform(self, "waveform", self._identifier) ) + """Instrument module waveform""" self.scale: Parameter = self.add_parameter( "scale", diff --git a/tests/dataset/measurement/test_measurement_context_manager.py b/tests/dataset/measurement/test_measurement_context_manager.py index 550a59e91c38..4377b81148af 100644 --- a/tests/dataset/measurement/test_measurement_context_manager.py +++ b/tests/dataset/measurement/test_measurement_context_manager.py @@ -21,6 +21,10 @@ import qcodes as qc import qcodes.validators as vals from qcodes.dataset.data_set import DataSet, load_by_id +from qcodes.dataset.descriptions.dependencies import ( + FrozenInterDependencies_, + InterDependencies_, +) from qcodes.dataset.experiment_container import new_experiment from qcodes.dataset.export_config import DataExportType from qcodes.dataset.measurements import Measurement @@ -730,6 +734,16 @@ def test_datasaver_scalars( with pytest.raises(ValueError): datasaver.add_result((DMM.v1, 0)) + ds = datasaver.dataset + assert isinstance(ds, DataSet) + assert isinstance(ds.description.interdeps, InterDependencies_) + assert not isinstance(ds.description.interdeps, FrozenInterDependencies_) + + loaded_ds = load_by_id(ds.run_id) + + assert isinstance(loaded_ds.description.interdeps, InterDependencies_) + assert not isinstance(loaded_ds.description.interdeps, FrozenInterDependencies_) + # More assertions of setpoints, labels and units in the DB! diff --git a/tests/dataset/test_dependencies.py b/tests/dataset/test_dependencies.py index e2607655ad5a..c4deebc32239 100644 --- a/tests/dataset/test_dependencies.py +++ b/tests/dataset/test_dependencies.py @@ -6,6 +6,7 @@ from networkx import NetworkXError from qcodes.dataset.descriptions.dependencies import ( + FrozenInterDependencies_, IncompleteSubsetError, InterDependencies_, ) @@ -477,3 +478,60 @@ def test_dependency_on_middle_parameter( # in both directions, ps4 is actually a member of the tree for ps1 assert idps.top_level_parameters == (ps1,) assert idps.find_all_parameters_in_tree(ps1) == {ps1, ps2, ps3, ps4} + + +def test_frozen_interdependencies(some_paramspecbases) -> None: + ps1, ps2, ps3, ps4 = some_paramspecbases + idps = InterDependencies_(dependencies={ps1: (ps2, ps3)}, inferences={ps2: (ps4,)}) + + frozen = FrozenInterDependencies_(idps) + + assert frozen.dependencies == idps.dependencies + assert frozen.inferences == idps.inferences + assert frozen.standalones == idps.standalones + assert frozen.top_level_parameters == idps.top_level_parameters + + # Test immutability + with pytest.raises(TypeError, match="FrozenInterDependencies_ is immutable"): + frozen.add_dependencies({ps4: (ps1,)}) + + with pytest.raises(TypeError, match="FrozenInterDependencies_ is immutable"): + frozen.add_inferences({ps4: (ps1,)}) + + with pytest.raises(TypeError, match="FrozenInterDependencies_ is immutable"): + frozen.add_standalones((ps4,)) + + with pytest.raises(TypeError, match="FrozenInterDependencies_ is immutable"): + frozen.remove(ps1) + + with pytest.raises(TypeError, match="FrozenInterDependencies_ is immutable"): + frozen.add_paramspecs((ps1,)) + + # Test extend returns InterDependencies_ (mutable) + ps5 = ParamSpecBase("psb5", "numeric", "number", "") + extended = frozen.extend(standalones=(ps5,)) + assert isinstance(extended, InterDependencies_) + assert not isinstance(extended, FrozenInterDependencies_) + assert ps5 in extended.standalones + + # Test caching of properties + # Access properties to trigger caching + _ = frozen.dependencies + _ = frozen.inferences + _ = frozen.standalones + _ = frozen.top_level_parameters + + assert frozen._dependencies_cache is not None + assert frozen._inferences_cache is not None + assert frozen._standalones_cache is not None + assert frozen._top_level_parameters_cache is not None + + +def test_frozen_from_dict(some_paramspecbases) -> None: + ps1, ps2, ps3, _ = some_paramspecbases + idps = InterDependencies_(dependencies={ps1: (ps2, ps3)}) + ser = idps._to_dict() + + frozen = FrozenInterDependencies_._from_dict(ser) + assert isinstance(frozen, FrozenInterDependencies_) + assert frozen == FrozenInterDependencies_(idps) diff --git a/tests/dataset/test_measurement_extensions.py b/tests/dataset/test_measurement_extensions.py index 34fd0e7abe30..87a342b7323f 100644 --- a/tests/dataset/test_measurement_extensions.py +++ b/tests/dataset/test_measurement_extensions.py @@ -1,4 +1,5 @@ import gc +import itertools from functools import partial from itertools import product from pathlib import Path @@ -238,6 +239,30 @@ def test_linsweeper(default_params, default_database_and_experiment): ) +def test_nested_linsweeper(default_params): + set1, set2, _, _, _, _ = default_params + linsweeper1 = LinSweeper(set1, 0, 1, 11, 0.001) + linsweeper2 = LinSweeper(set2, -1, 0, 6, 0.001) + data_pairs = [] + for _ in linsweeper1: + for _ in linsweeper2: + data_pairs.append([set1(), set2()]) + + assert len(data_pairs) == 11 * 6 + assert np.all( + np.isclose( + np.array(data_pairs), + np.array( + list( + itertools.product( + linsweeper1.get_setpoints(), linsweeper2.get_setpoints() + ) + ) + ), + ) + ) + + def test_context_with_pws(pws_params, default_database_and_experiment): _ = default_database_and_experiment pws1, set1 = pws_params diff --git a/tests/drivers/test_lakeshore_335.py b/tests/drivers/test_lakeshore_335.py index 0120707befa9..d676bd15efb1 100644 --- a/tests/drivers/test_lakeshore_335.py +++ b/tests/drivers/test_lakeshore_335.py @@ -7,10 +7,8 @@ from .test_lakeshore_372 import ( DictClass, MockVisaInstrument, - command, instrument_fixture, query, - split_args, ) log = logging.getLogger(__name__) @@ -82,91 +80,6 @@ def get_t_when_heating(self): # start at 7K. return max(4, 7 - delta) - @query("PID?") - def pidq(self, arg): - heater = self.heaters[arg] - return f"{heater.P},{heater.I},{heater.D}" - - @command("PID") - @split_args() - def pid(self, output, P, I, D): # noqa E741 - for a, v in zip(["P", "I", "D"], [P, I, D]): - setattr(self.heaters[output], a, v) - - @query("OUTMODE?") - def outmodeq(self, arg): - heater = self.heaters[arg] - return f"{heater.mode},{heater.input_channel},{heater.powerup_enable}" - - @command("OUTMODE") - @split_args() - def outputmode(self, output, mode, input_channel, powerup_enable): - h = self.heaters[output] - h.output = output - h.mode = mode - h.input_channel = input_channel - h.powerup_enable = powerup_enable - - @query("INTYPE?") - def intypeq(self, channel): - ch = self.channel_mock[channel] - return ( - f"{ch.sensor_type}," - f"{ch.auto_range_enabled},{ch.range}," - f"{ch.compensation_enabled},{ch.units}" - ) - - @command("INTYPE") - @split_args() - def intype( - self, - channel, - sensor_type, - auto_range_enabled, - range_, - compensation_enabled, - units, - ): - ch = self.channel_mock[channel] - ch.sensor_type = sensor_type - ch.auto_range_enabled = auto_range_enabled - ch.range = range_ - ch.compensation_enabled = compensation_enabled - ch.units = units - - @query("RANGE?") - def rangeq(self, heater): - h = self.heaters[heater] - return f"{h.output_range}" - - @command("RANGE") - @split_args() - def range_cmd(self, heater, output_range): - h = self.heaters[heater] - h.output_range = output_range - - @query("SETP?") - def setpointq(self, heater): - h = self.heaters[heater] - return f"{h.setpoint}" - - @command("SETP") - @split_args() - def setpoint(self, heater, setpoint): - h = self.heaters[heater] - h.setpoint = setpoint - - @query("TLIMIT?") - def tlimitq(self, channel): - chan = self.channel_mock[channel] - return f"{chan.tlimit}" - - @command("TLIMIT") - @split_args() - def tlimitcmd(self, channel, tlimit): - chan = self.channel_mock[channel] - chan.tlimit = tlimit - @query("KRDG?") def temperature(self, output): chan = self.channel_mock[output] diff --git a/tests/drivers/test_lakeshore_336.py b/tests/drivers/test_lakeshore_336.py index 0ba20e47b295..e7923c1010b7 100644 --- a/tests/drivers/test_lakeshore_336.py +++ b/tests/drivers/test_lakeshore_336.py @@ -1,16 +1,16 @@ import logging import time +import pytest + from qcodes.instrument import InstrumentBase from qcodes.instrument_drivers.Lakeshore import LakeshoreModel336 from .test_lakeshore_372 import ( DictClass, MockVisaInstrument, - command, instrument_fixture, query, - split_args, ) log = logging.getLogger(__name__) @@ -102,91 +102,6 @@ def get_t_when_heating(self): # start at 7K. return max(4, 7 - delta) - @query("PID?") - def pidq(self, arg): - heater = self.heaters[arg] - return f"{heater.P},{heater.I},{heater.D}" - - @command("PID") - @split_args() - def pid(self, output, P, I, D): # noqa E741 - for a, v in zip(["P", "I", "D"], [P, I, D]): - setattr(self.heaters[output], a, v) - - @query("OUTMODE?") - def outmodeq(self, arg): - heater = self.heaters[arg] - return f"{heater.mode},{heater.input_channel},{heater.powerup_enable}" - - @command("OUTMODE") - @split_args() - def outputmode(self, output, mode, input_channel, powerup_enable): - h = self.heaters[output] - h.output = output - h.mode = mode - h.input_channel = input_channel - h.powerup_enable = powerup_enable - - @query("INTYPE?") - def intypeq(self, channel): - ch = self.channel_mock[channel] - return ( - f"{ch.sensor_type}," - f"{ch.auto_range_enabled},{ch.range}," - f"{ch.compensation_enabled},{ch.units}" - ) - - @command("INTYPE") - @split_args() - def intype( - self, - channel, - sensor_type, - auto_range_enabled, - range_, - compensation_enabled, - units, - ): - ch = self.channel_mock[channel] - ch.sensor_type = sensor_type - ch.auto_range_enabled = auto_range_enabled - ch.range = range_ - ch.compensation_enabled = compensation_enabled - ch.units = units - - @query("RANGE?") - def rangeq(self, heater): - h = self.heaters[heater] - return f"{h.output_range}" - - @command("RANGE") - @split_args() - def range_cmd(self, heater, output_range): - h = self.heaters[heater] - h.output_range = output_range - - @query("SETP?") - def setpointq(self, heater): - h = self.heaters[heater] - return f"{h.setpoint}" - - @command("SETP") - @split_args() - def setpoint(self, heater, setpoint): - h = self.heaters[heater] - h.setpoint = setpoint - - @query("TLIMIT?") - def tlimitq(self, channel): - chan = self.channel_mock[channel] - return f"{chan.tlimit}" - - @command("TLIMIT") - @split_args() - def tlimitcmd(self, channel, tlimit): - chan = self.channel_mock[channel] - chan.tlimit = tlimit - @query("KRDG?") def temperature(self, output): chan = self.channel_mock[output] @@ -218,19 +133,19 @@ def test_pid_set(lakeshore_336) -> None: assert (h.P(), h.I(), h.D()) == (P, I, D) -def test_output_mode(lakeshore_336) -> None: +@pytest.mark.parametrize("output_num", [1, 2, 3, 4]) +@pytest.mark.parametrize("mode", ["off", "closed_loop", "zone", "open_loop"]) +@pytest.mark.parametrize("input_channel", ["A", "B", "C", "D"]) +def test_output_mode(lakeshore_336, output_num, mode, input_channel) -> None: ls = lakeshore_336 mode = "off" - input_channel = "A" - powerup_enable = True - outputs = [getattr(ls, f"output_{n}") for n in range(1, 5)] - for h in outputs: # a.k.a. heaters - h.mode(mode) - h.input_channel(input_channel) - h.powerup_enable(powerup_enable) - assert h.mode() == mode - assert h.input_channel() == input_channel - assert h.powerup_enable() == powerup_enable + h = getattr(ls, f"output_{output_num}") + h.mode(mode) + h.input_channel(input_channel) + h.powerup_enable(True) + assert h.mode() == mode + assert h.input_channel() == input_channel + assert h.powerup_enable() def test_range(lakeshore_336) -> None: diff --git a/tests/drivers/test_lakeshore_336_legacy.py b/tests/drivers/test_lakeshore_336_legacy.py index 3501ce2b3f5b..adef5b5e6bd6 100644 --- a/tests/drivers/test_lakeshore_336_legacy.py +++ b/tests/drivers/test_lakeshore_336_legacy.py @@ -9,10 +9,8 @@ from .test_lakeshore_372 import ( DictClass, MockVisaInstrument, - command, instrument_fixture, query, - split_args, ) log = logging.getLogger(__name__) @@ -104,91 +102,6 @@ def get_t_when_heating(self): # start at 7K. return max(4, 7 - delta) - @query("PID?") - def pidq(self, arg): - heater = self.heaters[arg] - return f"{heater.P},{heater.I},{heater.D}" - - @command("PID") - @split_args() - def pid(self, output, P, I, D): # noqa E741 - for a, v in zip(["P", "I", "D"], [P, I, D]): - setattr(self.heaters[output], a, v) - - @query("OUTMODE?") - def outmodeq(self, arg): - heater = self.heaters[arg] - return f"{heater.mode},{heater.input_channel},{heater.powerup_enable}" - - @command("OUTMODE") - @split_args() - def outputmode(self, output, mode, input_channel, powerup_enable): - h = self.heaters[output] - h.output = output - h.mode = mode - h.input_channel = input_channel - h.powerup_enable = powerup_enable - - @query("INTYPE?") - def intypeq(self, channel): - ch = self.channel_mock[channel] - return ( - f"{ch.sensor_type}," - f"{ch.auto_range_enabled},{ch.range}," - f"{ch.compensation_enabled},{ch.units}" - ) - - @command("INTYPE") - @split_args() - def intype( - self, - channel, - sensor_type, - auto_range_enabled, - range_, - compensation_enabled, - units, - ): - ch = self.channel_mock[channel] - ch.sensor_type = sensor_type - ch.auto_range_enabled = auto_range_enabled - ch.range = range_ - ch.compensation_enabled = compensation_enabled - ch.units = units - - @query("RANGE?") - def rangeq(self, heater): - h = self.heaters[heater] - return f"{h.output_range}" - - @command("RANGE") - @split_args() - def range_cmd(self, heater, output_range): - h = self.heaters[heater] - h.output_range = output_range - - @query("SETP?") - def setpointq(self, heater): - h = self.heaters[heater] - return f"{h.setpoint}" - - @command("SETP") - @split_args() - def setpoint(self, heater, setpoint): - h = self.heaters[heater] - h.setpoint = setpoint - - @query("TLIMIT?") - def tlimitq(self, channel): - chan = self.channel_mock[channel] - return f"{chan.tlimit}" - - @command("TLIMIT") - @split_args() - def tlimitcmd(self, channel, tlimit): - chan = self.channel_mock[channel] - chan.tlimit = tlimit - @query("KRDG?") def temperature(self, output): chan = self.channel_mock[output] diff --git a/tests/drivers/test_lakeshore_372.py b/tests/drivers/test_lakeshore_372.py index 4f59e3f3d1ae..4ebe96f40f49 100644 --- a/tests/drivers/test_lakeshore_372.py +++ b/tests/drivers/test_lakeshore_372.py @@ -4,7 +4,6 @@ import time import warnings from contextlib import suppress -from functools import wraps from typing import TYPE_CHECKING, Any, Literal, TypeVar import pytest @@ -108,18 +107,6 @@ def wrapper(func: Callable[P, T]) -> Callable[P, T]: return wrapper -def split_args(split_char: str = ","): - def wrapper(func): - @wraps(func) - def decorated_func(self, string_arg): - args = string_arg.split(split_char) - return func(self, *args) - - return decorated_func - - return wrapper - - class DictClass: def __init__(self, **kwargs): # https://stackoverflow.com/questions/16237659/python-how-to-implement-getattr @@ -220,123 +207,6 @@ def get_t_when_heating(self): # start at 7K. return max(4, 7 - delta) - @query("PID?") - def pidq(self, arg): - heater = self.heaters[arg] - return f"{heater.P},{heater.I},{heater.D}" - - @command("PID") - @split_args() - def pid(self, output, P, I, D): # noqa E741 - for a, v in zip(["P", "I", "D"], [P, I, D]): - setattr(self.heaters[output], a, v) - - @query("OUTMODE?") - def outmodeq(self, arg): - heater = self.heaters[arg] - return ( - f"{heater.mode},{heater.input_channel}," - f"{heater.powerup_enable},{heater.polarity}," - f"{heater.use_filter},{heater.delay}" - ) - - @command("OUTMODE") - @split_args() - def outputmode( - self, output, mode, input_channel, powerup_enable, polarity, use_filter, delay - ): - h = self.heaters[output] - h.output = output - h.mode = mode - h.input_channel = input_channel - h.powerup_enable = powerup_enable - h.polarity = polarity - h.use_filter = use_filter - h.delay = delay - - @query("INSET?") - def insetq(self, channel): - ch = self.channel_mock[channel] - return ( - f"{ch.enabled},{ch.dwell}," - f"{ch.pause},{ch.curve_number}," - f"{ch.temperature_coefficient}" - ) - - @command("INSET") - @split_args() - def inset( - self, channel, enabled, dwell, pause, curve_number, temperature_coefficient - ): - ch = self.channel_mock[channel] - ch.enabled = enabled - ch.dwell = dwell - ch.pause = pause - ch.curve_number = curve_number - ch.temperature_coefficient = temperature_coefficient - - @query("INTYPE?") - def intypeq(self, channel): - ch = self.channel_mock[channel] - return ( - f"{ch.excitation_mode},{ch.excitation_range_number}," - f"{ch.auto_range},{ch.range}," - f"{ch.current_source_shunted},{ch.units}" - ) - - @command("INTYPE") - @split_args() - def intype( - self, - channel, - excitation_mode, - excitation_range_number, - auto_range, - range, - current_source_shunted, - units, - ): - ch = self.channel_mock[channel] - ch.excitation_mode = excitation_mode - ch.excitation_range_number = excitation_range_number - ch.auto_range = auto_range - ch.range = range - ch.current_source_shunted = current_source_shunted - ch.units = units - - @query("RANGE?") - def rangeq(self, heater): - h = self.heaters[heater] - return f"{h.output_range}" - - @command("RANGE") - @split_args() - def range_cmd(self, heater, output_range): - h = self.heaters[heater] - h.output_range = output_range - - @query("SETP?") - def setpointq(self, heater): - h = self.heaters[heater] - return f"{h.setpoint}" - - @command("SETP") - @split_args() - def setpoint(self, heater, setpoint): - h = self.heaters[heater] - h.setpoint = setpoint - - @query("TLIMIT?") - def tlimitq(self, channel): - chan = self.channel_mock[channel] - return f"{chan.tlimit}" - - @command("TLIMIT") - @split_args() - def tlimitcmd(self, channel, tlimit): - chan = self.channel_mock[channel] - chan.tlimit = tlimit - @query("KRDG?") def temperature(self, output): chan = self.channel_mock[output] diff --git a/tests/drivers/test_tektronix_dpo7200xx.py b/tests/drivers/test_tektronix_dpo7200xx.py index 016ff6beaf64..8d6eef682d5d 100644 --- a/tests/drivers/test_tektronix_dpo7200xx.py +++ b/tests/drivers/test_tektronix_dpo7200xx.py @@ -1,13 +1,17 @@ import sys import timeit +from typing import TYPE_CHECKING import pytest from qcodes.instrument_drivers.tektronix.DPO7200xx import TektronixDPO7000xx +if TYPE_CHECKING: + from collections.abc import Generator + @pytest.fixture(scope="function") -def tektronix_dpo(): +def tektronix_dpo() -> "Generator[TektronixDPO7000xx, None, None]": """ A six channel-per-relay instrument """ @@ -24,7 +28,7 @@ def tektronix_dpo(): @pytest.mark.xfail( condition=sys.platform == "win32", reason="Time resolution is too low on windows" ) -def test_adjust_timer(tektronix_dpo) -> None: +def test_adjust_timer(tektronix_dpo: TektronixDPO7000xx) -> None: """ After adjusting the type of the measurement or the source of the measurement, we need wait at least 0.1 seconds @@ -54,7 +58,7 @@ def test_adjust_timer(tektronix_dpo) -> None: # measurements slightly sooner then 'minimum_adjustment_time' -def test_measurements_return_float(tektronix_dpo) -> None: +def test_measurements_return_float(tektronix_dpo: TektronixDPO7000xx) -> None: amplitude = tektronix_dpo.measurement[0].amplitude() assert isinstance(amplitude, float) @@ -62,6 +66,6 @@ def test_measurements_return_float(tektronix_dpo) -> None: assert isinstance(mean_amplitude, float) -def test_measurement_sets_state(tektronix_dpo) -> None: +def test_measurement_sets_state(tektronix_dpo: TektronixDPO7000xx) -> None: tektronix_dpo.measurement[1].frequency() assert tektronix_dpo.measurement[1].state() == 1